aboutsummaryrefslogtreecommitdiff
path: root/contrib/llvm-project/clang/lib
diff options
context:
space:
mode:
Diffstat (limited to 'contrib/llvm-project/clang/lib')
-rw-r--r--contrib/llvm-project/clang/lib/ARCMigrate/Transforms.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/AST/APValue.cpp83
-rw-r--r--contrib/llvm-project/clang/lib/AST/ASTContext.cpp223
-rw-r--r--contrib/llvm-project/clang/lib/AST/ASTDiagnostic.cpp18
-rw-r--r--contrib/llvm-project/clang/lib/AST/ASTImporter.cpp321
-rw-r--r--contrib/llvm-project/clang/lib/AST/ASTStructuralEquivalence.cpp150
-rw-r--r--contrib/llvm-project/clang/lib/AST/ASTTypeTraits.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/AST/AttrImpl.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/AST/CommentParser.cpp47
-rw-r--r--contrib/llvm-project/clang/lib/AST/CommentSema.cpp48
-rw-r--r--contrib/llvm-project/clang/lib/AST/ComputeDependence.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/AST/Decl.cpp109
-rw-r--r--contrib/llvm-project/clang/lib/AST/DeclBase.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/AST/DeclCXX.cpp153
-rw-r--r--contrib/llvm-project/clang/lib/AST/DeclObjC.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/AST/DeclPrinter.cpp13
-rwxr-xr-xcontrib/llvm-project/clang/lib/AST/DeclTemplate.cpp30
-rw-r--r--contrib/llvm-project/clang/lib/AST/Expr.cpp248
-rw-r--r--contrib/llvm-project/clang/lib/AST/ExprCXX.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/AST/ExprClassification.cpp13
-rw-r--r--contrib/llvm-project/clang/lib/AST/ExprConstant.cpp177
-rw-r--r--contrib/llvm-project/clang/lib/AST/FormatString.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.h4
-rw-r--r--contrib/llvm-project/clang/lib/AST/Interp/Function.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/AST/ItaniumMangle.cpp306
-rw-r--r--contrib/llvm-project/clang/lib/AST/JSONNodeDumper.cpp15
-rw-r--r--contrib/llvm-project/clang/lib/AST/Mangle.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/AST/MicrosoftCXXABI.cpp15
-rw-r--r--contrib/llvm-project/clang/lib/AST/MicrosoftMangle.cpp30
-rw-r--r--contrib/llvm-project/clang/lib/AST/ODRHash.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/AST/OSLog.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/AST/OpenMPClause.cpp89
-rw-r--r--contrib/llvm-project/clang/lib/AST/ParentMapContext.cpp14
-rw-r--r--contrib/llvm-project/clang/lib/AST/QualTypeNames.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/AST/Randstruct.cpp231
-rw-r--r--contrib/llvm-project/clang/lib/AST/RawCommentList.cpp65
-rw-r--r--contrib/llvm-project/clang/lib/AST/RecordLayoutBuilder.cpp19
-rw-r--r--contrib/llvm-project/clang/lib/AST/Stmt.cpp27
-rw-r--r--contrib/llvm-project/clang/lib/AST/StmtCXX.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/AST/StmtOpenMP.cpp405
-rw-r--r--contrib/llvm-project/clang/lib/AST/StmtPrinter.cpp185
-rw-r--r--contrib/llvm-project/clang/lib/AST/StmtProfile.cpp61
-rw-r--r--contrib/llvm-project/clang/lib/AST/TemplateBase.cpp44
-rw-r--r--contrib/llvm-project/clang/lib/AST/TemplateName.cpp55
-rw-r--r--contrib/llvm-project/clang/lib/AST/TextNodeDumper.cpp24
-rw-r--r--contrib/llvm-project/clang/lib/AST/Type.cpp55
-rw-r--r--contrib/llvm-project/clang/lib/AST/TypeLoc.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/AST/TypePrinter.cpp63
-rw-r--r--contrib/llvm-project/clang/lib/AST/VTableBuilder.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/ASTMatchers/ASTMatchFinder.cpp222
-rw-r--r--contrib/llvm-project/clang/lib/ASTMatchers/ASTMatchersInternal.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.h10
-rw-r--r--contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Parser.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Registry.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/AnalysisDeclContext.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/BodyFarm.cpp57
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/CFG.cpp122
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/CalledOnceCheck.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/ExprMutationAnalyzer.cpp20
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/ControlFlowContext.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowAnalysisContext.cpp340
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp309
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Models/ChromiumCheckModel.cpp67
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp753
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Transfer.cpp299
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.cpp203
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/FlowSensitive/WatchedLiteralsSolver.cpp600
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/PathDiagnostic.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/ReachableCode.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/RetainSummaryManager.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/ThreadSafety.cpp306
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/ThreadSafetyCommon.cpp37
-rw-r--r--contrib/llvm-project/clang/lib/Analysis/UninitializedValues.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Attributes.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/Basic/BuiltinTargetFeatures.h95
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Builtins.cpp48
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Cuda.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Diagnostic.cpp92
-rw-r--r--contrib/llvm-project/clang/lib/Basic/DiagnosticIDs.cpp42
-rw-r--r--contrib/llvm-project/clang/lib/Basic/FileManager.cpp191
-rw-r--r--contrib/llvm-project/clang/lib/Basic/IdentifierTable.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/Basic/LangOptions.cpp132
-rw-r--r--contrib/llvm-project/clang/lib/Basic/LangStandards.cpp45
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Module.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/Basic/OpenCLOptions.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/Basic/OpenMPKinds.cpp64
-rw-r--r--contrib/llvm-project/clang/lib/Basic/SourceManager.cpp22
-rw-r--r--contrib/llvm-project/clang/lib/Basic/TargetID.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Basic/TargetInfo.cpp24
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets.cpp18
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp30
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/AArch64.h2
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.cpp40
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.h7
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/AVR.cpp557
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/CSKY.cpp314
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/CSKY.h107
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/DirectX.cpp22
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/DirectX.h93
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.cpp52
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.h62
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/PPC.cpp35
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/PPC.h16
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/RISCV.cpp16
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/RISCV.h4
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/SPIR.h10
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.cpp13
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.h24
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/VE.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.cpp19
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.h1
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/X86.cpp25
-rw-r--r--contrib/llvm-project/clang/lib/Basic/Targets/X86.h25
-rw-r--r--contrib/llvm-project/clang/lib/Basic/TokenKinds.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ABIInfo.h1
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/Address.h5
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/BackendUtil.cpp631
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp27
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGBlocks.cpp80
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGBuilder.h70
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp2069
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCUDANV.cpp84
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCUDARuntime.h13
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h5
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp222
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp246
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCleanup.cpp30
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGCoroutine.cpp107
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp501
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.h44
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGDecl.cpp73
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGDeclCXX.cpp14
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGException.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp403
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGExprCXX.cpp22
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGExprConstant.cpp47
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGExprScalar.cpp104
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGHLSLRuntime.cpp52
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGHLSLRuntime.h38
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGNonTrivialStruct.cpp14
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp64
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp84
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGObjCMac.cpp69
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGObjCRuntime.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGOpenCLRuntime.cpp51
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGOpenCLRuntime.h6
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.cpp1030
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.h29
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp173
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGRecordLayout.h6
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGStmt.cpp70
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGStmtOpenMP.cpp1003
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGVTT.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CGVTables.cpp26
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenAction.cpp38
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp198
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h176
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp674
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h115
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp53
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.h5
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ConstantInitBuilder.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.cpp52
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.h20
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp34
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/MacroPPCallbacks.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/MacroPPCallbacks.h6
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp25
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ModuleBuilder.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.cpp121
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.h21
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp461
-rw-r--r--contrib/llvm-project/clang/lib/CodeGen/TargetInfo.h2
-rw-r--r--contrib/llvm-project/clang/lib/CrossTU/CrossTranslationUnit.cpp114
-rw-r--r--contrib/llvm-project/clang/lib/DirectoryWatcher/DirectoryScanner.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Driver/Action.cpp30
-rw-r--r--contrib/llvm-project/clang/lib/Driver/Compilation.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/Driver/Distro.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/Driver/Driver.cpp814
-rw-r--r--contrib/llvm-project/clang/lib/Driver/SanitizerArgs.cpp116
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChain.cpp53
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.cpp11
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.cpp69
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.h3
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp20
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/AVR.cpp154
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/AVR.h17
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.cpp174
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.h1
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp54
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.h1
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/CSKY.cpp170
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/CSKY.h47
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/VE.h2
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/X86.cpp26
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/CSKYToolChain.cpp204
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/CSKYToolChain.h63
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp945
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.h13
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.cpp215
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.h15
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.cpp99
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.h4
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.cpp404
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.h31
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Flang.cpp55
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.cpp26
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.h25
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.cpp221
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/HIPAMD.cpp60
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/HIPAMD.h4
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/HIPUtility.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/HLSL.cpp183
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/HLSL.h40
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.h3
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Lanai.h2
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.cpp93
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.h2
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/MSP430.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.cpp815
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.h31
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/MSVCSetupApi.h523
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Minix.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/OpenBSD.cpp31
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.cpp188
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.h115
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.h1
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/ROCm.h38
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.h3
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/Solaris.cpp14
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/VEToolchain.h1
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.cpp110
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.h9
-rw-r--r--contrib/llvm-project/clang/lib/Driver/ToolChains/ZOS.h2
-rw-r--r--contrib/llvm-project/clang/lib/Driver/Types.cpp41
-rw-r--r--contrib/llvm-project/clang/lib/Edit/RewriteObjCFoundationAPI.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/ExtractAPI/API.cpp233
-rw-r--r--contrib/llvm-project/clang/lib/ExtractAPI/DeclarationFragments.cpp799
-rw-r--r--contrib/llvm-project/clang/lib/ExtractAPI/ExtractAPIConsumer.cpp974
-rw-r--r--contrib/llvm-project/clang/lib/ExtractAPI/Serialization/SerializerBase.cpp19
-rw-r--r--contrib/llvm-project/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp708
-rw-r--r--contrib/llvm-project/clang/lib/ExtractAPI/TypedefUnderlyingTypeResolver.cpp79
-rw-r--r--contrib/llvm-project/clang/lib/ExtractAPI/TypedefUnderlyingTypeResolver.h46
-rw-r--r--contrib/llvm-project/clang/lib/Format/AffectedRangeManager.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Format/BreakableToken.cpp98
-rw-r--r--contrib/llvm-project/clang/lib/Format/ContinuationIndenter.cpp844
-rw-r--r--contrib/llvm-project/clang/lib/Format/ContinuationIndenter.h6
-rw-r--r--contrib/llvm-project/clang/lib/Format/DefinitionBlockSeparator.cpp30
-rw-r--r--contrib/llvm-project/clang/lib/Format/Format.cpp456
-rw-r--r--contrib/llvm-project/clang/lib/Format/FormatToken.cpp32
-rw-r--r--contrib/llvm-project/clang/lib/Format/FormatToken.h626
-rw-r--r--contrib/llvm-project/clang/lib/Format/FormatTokenLexer.cpp277
-rw-r--r--contrib/llvm-project/clang/lib/Format/FormatTokenLexer.h7
-rw-r--r--contrib/llvm-project/clang/lib/Format/MacroExpander.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/Format/NamespaceEndCommentsFixer.cpp119
-rw-r--r--contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.cpp141
-rw-r--r--contrib/llvm-project/clang/lib/Format/SortJavaScriptImports.cpp26
-rw-r--r--contrib/llvm-project/clang/lib/Format/TokenAnalyzer.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/Format/TokenAnnotator.cpp1631
-rw-r--r--contrib/llvm-project/clang/lib/Format/TokenAnnotator.h37
-rw-r--r--contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.cpp450
-rw-r--r--contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp1543
-rw-r--r--contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.h42
-rw-r--r--contrib/llvm-project/clang/lib/Format/WhitespaceManager.cpp276
-rw-r--r--contrib/llvm-project/clang/lib/Format/WhitespaceManager.h20
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/ASTUnit.cpp16
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/CompilerInstance.cpp23
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/CompilerInvocation.cpp530
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp29
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/DependencyFile.cpp35
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/DependencyGraph.cpp26
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/ExtractAPIConsumer.cpp32
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/FrontendAction.cpp81
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/FrontendActions.cpp137
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/FrontendOptions.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/InitPreprocessor.cpp70
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/LogDiagnosticPrinter.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/ModuleDependencyCollector.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/MultiplexConsumer.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/PrecompiledPreamble.cpp353
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/PrintPreprocessedOutput.cpp44
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/Rewrite/InclusionRewriter.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp20
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/Rewrite/RewriteObjC.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Frontend/TextDiagnostic.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__clang_cuda_intrinsics.h4
-rw-r--r--contrib/llvm-project/clang/lib/Headers/__wmmintrin_pclmul.h20
-rw-r--r--contrib/llvm-project/clang/lib/Headers/altivec.h735
-rw-r--r--contrib/llvm-project/clang/lib/Headers/amxintrin.h2
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx2intrin.h16
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512bwintrin.h24
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512fintrin.h149
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512vlbf16intrin.h2
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512vlbwintrin.h16
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512vlintrin.h176
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avx512vlvnniintrin.h32
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avxintrin.h95
-rw-r--r--contrib/llvm-project/clang/lib/Headers/avxvnniintrin.h32
-rw-r--r--contrib/llvm-project/clang/lib/Headers/bmiintrin.h8
-rw-r--r--contrib/llvm-project/clang/lib/Headers/cetintrin.h18
-rw-r--r--contrib/llvm-project/clang/lib/Headers/emmintrin.h1318
-rw-r--r--contrib/llvm-project/clang/lib/Headers/f16cintrin.h6
-rw-r--r--contrib/llvm-project/clang/lib/Headers/hlsl.h15
-rw-r--r--contrib/llvm-project/clang/lib/Headers/hlsl/hlsl_basic_types.h64
-rw-r--r--contrib/llvm-project/clang/lib/Headers/hlsl/hlsl_intrinsics.h15
-rw-r--r--contrib/llvm-project/clang/lib/Headers/hresetintrin.h4
-rw-r--r--contrib/llvm-project/clang/lib/Headers/ia32intrin.h22
-rw-r--r--contrib/llvm-project/clang/lib/Headers/immintrin.h30
-rw-r--r--contrib/llvm-project/clang/lib/Headers/intrin.h33
-rw-r--r--contrib/llvm-project/clang/lib/Headers/keylockerintrin.h44
-rw-r--r--contrib/llvm-project/clang/lib/Headers/mm_malloc.h6
-rw-r--r--contrib/llvm-project/clang/lib/Headers/opencl-c-base.h10
-rw-r--r--contrib/llvm-project/clang/lib/Headers/opencl-c.h13820
-rw-r--r--contrib/llvm-project/clang/lib/Headers/pmmintrin.h2
-rw-r--r--contrib/llvm-project/clang/lib/Headers/ppc_wrappers/bmi2intrin.h134
-rw-r--r--contrib/llvm-project/clang/lib/Headers/ppc_wrappers/bmiintrin.h165
-rw-r--r--contrib/llvm-project/clang/lib/Headers/ppc_wrappers/emmintrin.h2918
-rw-r--r--contrib/llvm-project/clang/lib/Headers/ppc_wrappers/immintrin.h27
-rw-r--r--contrib/llvm-project/clang/lib/Headers/ppc_wrappers/mm_malloc.h26
-rw-r--r--contrib/llvm-project/clang/lib/Headers/ppc_wrappers/mmintrin.h769
-rw-r--r--contrib/llvm-project/clang/lib/Headers/ppc_wrappers/nmmintrin.h26
-rw-r--r--contrib/llvm-project/clang/lib/Headers/ppc_wrappers/pmmintrin.h147
-rw-r--r--contrib/llvm-project/clang/lib/Headers/ppc_wrappers/smmintrin.h580
-rw-r--r--contrib/llvm-project/clang/lib/Headers/ppc_wrappers/tmmintrin.h642
-rw-r--r--contrib/llvm-project/clang/lib/Headers/ppc_wrappers/x86gprintrin.h17
-rw-r--r--contrib/llvm-project/clang/lib/Headers/ppc_wrappers/x86intrin.h28
-rw-r--r--contrib/llvm-project/clang/lib/Headers/ppc_wrappers/xmmintrin.h2064
-rw-r--r--contrib/llvm-project/clang/lib/Headers/rdseedintrin.h6
-rw-r--r--contrib/llvm-project/clang/lib/Headers/rtmintrin.h2
-rw-r--r--contrib/llvm-project/clang/lib/Headers/smmintrin.h491
-rw-r--r--contrib/llvm-project/clang/lib/Headers/stdbool.h15
-rw-r--r--contrib/llvm-project/clang/lib/Headers/stddef.h2
-rw-r--r--contrib/llvm-project/clang/lib/Headers/stdnoreturn.h13
-rw-r--r--contrib/llvm-project/clang/lib/Headers/uintrintrin.h16
-rw-r--r--contrib/llvm-project/clang/lib/Headers/unwind.h13
-rw-r--r--contrib/llvm-project/clang/lib/Headers/velintrin.h71
-rw-r--r--contrib/llvm-project/clang/lib/Headers/velintrin_approx.h120
-rw-r--r--contrib/llvm-project/clang/lib/Headers/velintrin_gen.h1257
-rw-r--r--contrib/llvm-project/clang/lib/Headers/wasm_simd128.h4
-rw-r--r--contrib/llvm-project/clang/lib/Headers/xmmintrin.h12
-rw-r--r--contrib/llvm-project/clang/lib/Index/IndexBody.cpp22
-rw-r--r--contrib/llvm-project/clang/lib/Index/IndexDecl.cpp68
-rw-r--r--contrib/llvm-project/clang/lib/Index/IndexSymbol.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/Index/IndexTypeSourceInfo.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/Index/IndexingContext.cpp10
-rw-r--r--contrib/llvm-project/clang/lib/Index/USRGeneration.cpp31
-rw-r--r--contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.cpp30
-rw-r--r--contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.h14
-rw-r--r--contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.cpp56
-rw-r--r--contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.h4
-rw-r--r--contrib/llvm-project/clang/lib/Interpreter/Interpreter.cpp30
-rw-r--r--contrib/llvm-project/clang/lib/Lex/DependencyDirectivesScanner.cpp862
-rw-r--r--contrib/llvm-project/clang/lib/Lex/DependencyDirectivesSourceMinimizer.cpp982
-rw-r--r--contrib/llvm-project/clang/lib/Lex/HeaderSearch.cpp237
-rw-r--r--contrib/llvm-project/clang/lib/Lex/InitHeaderSearch.cpp13
-rw-r--r--contrib/llvm-project/clang/lib/Lex/Lexer.cpp381
-rw-r--r--contrib/llvm-project/clang/lib/Lex/LiteralSupport.cpp241
-rw-r--r--contrib/llvm-project/clang/lib/Lex/MacroInfo.cpp34
-rw-r--r--contrib/llvm-project/clang/lib/Lex/ModuleMap.cpp48
-rw-r--r--contrib/llvm-project/clang/lib/Lex/PPDirectives.cpp445
-rw-r--r--contrib/llvm-project/clang/lib/Lex/PPExpressions.cpp19
-rw-r--r--contrib/llvm-project/clang/lib/Lex/PPLexerChange.cpp109
-rw-r--r--contrib/llvm-project/clang/lib/Lex/PPMacroExpansion.cpp127
-rw-r--r--contrib/llvm-project/clang/lib/Lex/Pragma.cpp102
-rw-r--r--contrib/llvm-project/clang/lib/Lex/PreprocessingRecord.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/Lex/Preprocessor.cpp38
-rw-r--r--contrib/llvm-project/clang/lib/Lex/PreprocessorLexer.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/Lex/TokenLexer.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseAST.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseCXXInlineMethods.cpp13
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp508
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseDeclCXX.cpp245
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseExpr.cpp112
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseExprCXX.cpp48
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseHLSL.cpp42
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseObjc.cpp63
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseOpenMP.cpp164
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParsePragma.cpp343
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseStmt.cpp181
-rw-r--r--contrib/llvm-project/clang/lib/Parse/ParseTemplate.cpp54
-rw-r--r--contrib/llvm-project/clang/lib/Parse/Parser.cpp328
-rw-r--r--contrib/llvm-project/clang/lib/Sema/AnalysisBasedWarnings.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/Sema/CodeCompleteConsumer.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/Sema/IdentifierResolver.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Sema/OpenCLBuiltins.td90
-rw-r--r--contrib/llvm-project/clang/lib/Sema/ParsedAttr.cpp64
-rw-r--r--contrib/llvm-project/clang/lib/Sema/Sema.cpp68
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaAccess.cpp35
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaAttr.cpp265
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaAvailability.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaCUDA.cpp51
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaCXXScopeSpec.cpp15
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaCast.cpp21
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaChecking.cpp739
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaCodeComplete.cpp36
-rwxr-xr-xcontrib/llvm-project/clang/lib/Sema/SemaConcept.cpp89
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaCoroutine.cpp323
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp943
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaDeclAttr.cpp574
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaDeclCXX.cpp297
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaDeclObjC.cpp53
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaExceptionSpec.cpp17
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp1044
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaExprCXX.cpp272
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaExprMember.cpp30
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaExprObjC.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaInit.cpp97
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaLambda.cpp35
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaLookup.cpp426
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaModule.cpp327
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaOpenMP.cpp2115
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaOverload.cpp488
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaStmt.cpp51
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaStmtAsm.cpp27
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaStmtAttr.cpp78
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp119
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp51
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiate.cpp99
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp194
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaTemplateVariadic.cpp28
-rw-r--r--contrib/llvm-project/clang/lib/Sema/SemaType.cpp400
-rw-r--r--contrib/llvm-project/clang/lib/Sema/TreeTransform.h297
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTCommon.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTReader.cpp1212
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp100
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTReaderStmt.cpp156
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp461
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTWriterDecl.cpp74
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/ASTWriterStmt.cpp67
-rw-r--r--contrib/llvm-project/clang/lib/Serialization/GlobalModuleIndex.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp19
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp12
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp16
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp80
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CastValueChecker.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp30
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CloneChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp51
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoChecker.cpp249
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.cpp288
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.h78
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoTesterChecker.cpp184
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp23
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/FuchsiaHandleChecker.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GTestChecker.cpp19
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp185
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp15
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Iterator.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Iterator.h8
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp150
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp1
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp22
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/STLAlgorithmModeling.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp14
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp34
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp679
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp30
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StringChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Taint.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Taint.h106
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp21
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TrustReturnsNonnullChecker.cpp60
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp9
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObjectChecker.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedPointee.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp7
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ValistChecker.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/cert/InvalidPtrChecker.cpp16
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp17
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporter.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp78
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallDescription.cpp44
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallEvent.cpp34
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp29
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ConstraintManager.cpp80
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp71
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/DynamicType.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Environment.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp253
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp21
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp24
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp73
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/LoopUnrolling.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/MemRegion.cpp51
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ProgramState.cpp37
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp243
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangedConstraintManager.cpp71
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RegionStore.cpp152
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp93
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SVals.cpp67
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp15
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp14
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp173
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Store.cpp8
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp43
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/TextDiagnostics.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Core/WorkList.cpp16
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp63
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalyzerHelpFlags.cpp5
-rw-r--r--contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/CheckerRegistry.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Support/RISCVVIntrinsicUtils.cpp955
-rw-r--r--contrib/llvm-project/clang/lib/Testing/CommandLineArgs.cpp42
-rw-r--r--contrib/llvm-project/clang/lib/Testing/TestAST.cpp162
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Core/Replacement.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp124
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningService.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningTool.cpp55
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp112
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp129
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp3
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Inclusions/StandardLibrary.cpp165
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/InterpolatingCompilationDatabase.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Refactoring/Rename/USRFindingAction.cpp6
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Syntax/BuildTree.cpp4
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Transformer/RewriteRule.cpp53
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Transformer/Stencil.cpp2
-rw-r--r--contrib/llvm-project/clang/lib/Tooling/Transformer/Transformer.cpp58
559 files changed, 56750 insertions, 30049 deletions
diff --git a/contrib/llvm-project/clang/lib/ARCMigrate/Transforms.cpp b/contrib/llvm-project/clang/lib/ARCMigrate/Transforms.cpp
index a08b0d084bb6..90b2b32b6b1b 100644
--- a/contrib/llvm-project/clang/lib/ARCMigrate/Transforms.cpp
+++ b/contrib/llvm-project/clang/lib/ARCMigrate/Transforms.cpp
@@ -25,7 +25,7 @@ using namespace trans;
ASTTraverser::~ASTTraverser() { }
bool MigrationPass::CFBridgingFunctionsDefined() {
- if (!EnableCFBridgeFns.hasValue())
+ if (!EnableCFBridgeFns)
EnableCFBridgeFns = SemaRef.isKnownName("CFBridgingRetain") &&
SemaRef.isKnownName("CFBridgingRelease");
return *EnableCFBridgeFns;
diff --git a/contrib/llvm-project/clang/lib/AST/APValue.cpp b/contrib/llvm-project/clang/lib/AST/APValue.cpp
index ef333c771166..a22031142c7c 100644
--- a/contrib/llvm-project/clang/lib/AST/APValue.cpp
+++ b/contrib/llvm-project/clang/lib/AST/APValue.cpp
@@ -625,6 +625,67 @@ static double GetApproxValue(const llvm::APFloat &F) {
return V.convertToDouble();
}
+static bool TryPrintAsStringLiteral(raw_ostream &Out,
+ const PrintingPolicy &Policy,
+ const ArrayType *ATy,
+ ArrayRef<APValue> Inits) {
+ if (Inits.empty())
+ return false;
+
+ QualType Ty = ATy->getElementType();
+ if (!Ty->isAnyCharacterType())
+ return false;
+
+ // Nothing we can do about a sequence that is not null-terminated
+ if (!Inits.back().getInt().isZero())
+ return false;
+ else
+ Inits = Inits.drop_back();
+
+ llvm::SmallString<40> Buf;
+ Buf.push_back('"');
+
+ // Better than printing a two-digit sequence of 10 integers.
+ constexpr size_t MaxN = 36;
+ StringRef Ellipsis;
+ if (Inits.size() > MaxN && !Policy.EntireContentsOfLargeArray) {
+ Ellipsis = "[...]";
+ Inits =
+ Inits.take_front(std::min(MaxN - Ellipsis.size() / 2, Inits.size()));
+ }
+
+ for (auto &Val : Inits) {
+ int64_t Char64 = Val.getInt().getExtValue();
+ if (!isASCII(Char64))
+ return false; // Bye bye, see you in integers.
+ auto Ch = static_cast<unsigned char>(Char64);
+ // The diagnostic message is 'quoted'
+ StringRef Escaped = escapeCStyle<EscapeChar::SingleAndDouble>(Ch);
+ if (Escaped.empty()) {
+ if (!isPrintable(Ch))
+ return false;
+ Buf.emplace_back(Ch);
+ } else {
+ Buf.append(Escaped);
+ }
+ }
+
+ Buf.append(Ellipsis);
+ Buf.push_back('"');
+
+ if (Ty->isWideCharType())
+ Out << 'L';
+ else if (Ty->isChar8Type())
+ Out << "u8";
+ else if (Ty->isChar16Type())
+ Out << 'u';
+ else if (Ty->isChar32Type())
+ Out << 'U';
+
+ Out << Buf;
+ return true;
+}
+
void APValue::printPretty(raw_ostream &Out, const ASTContext &Ctx,
QualType Ty) const {
printPretty(Out, Ctx.getPrintingPolicy(), Ty, &Ctx);
@@ -700,7 +761,7 @@ void APValue::printPretty(raw_ostream &Out, const PrintingPolicy &Policy,
if (!hasLValuePath()) {
// No lvalue path: just print the offset.
CharUnits O = getLValueOffset();
- CharUnits S = Ctx ? Ctx->getTypeSizeInCharsIfKnown(InnerTy).getValueOr(
+ CharUnits S = Ctx ? Ctx->getTypeSizeInCharsIfKnown(InnerTy).value_or(
CharUnits::Zero())
: CharUnits::Zero();
if (!O.isZero()) {
@@ -795,17 +856,23 @@ void APValue::printPretty(raw_ostream &Out, const PrintingPolicy &Policy,
}
case APValue::Array: {
const ArrayType *AT = Ty->castAsArrayTypeUnsafe();
+ unsigned N = getArrayInitializedElts();
+ if (N != 0 && TryPrintAsStringLiteral(Out, Policy, AT,
+ {&getArrayInitializedElt(0), N}))
+ return;
QualType ElemTy = AT->getElementType();
Out << '{';
- if (unsigned N = getArrayInitializedElts()) {
- getArrayInitializedElt(0).printPretty(Out, Policy, ElemTy, Ctx);
- for (unsigned I = 1; I != N; ++I) {
+ unsigned I = 0;
+ switch (N) {
+ case 0:
+ for (; I != N; ++I) {
Out << ", ";
- if (I == 10) {
- // Avoid printing out the entire contents of large arrays.
- Out << "...";
- break;
+ if (I == 10 && !Policy.EntireContentsOfLargeArray) {
+ Out << "...}";
+ return;
}
+ LLVM_FALLTHROUGH;
+ default:
getArrayInitializedElt(I).printPretty(Out, Policy, ElemTy, Ctx);
}
}
diff --git a/contrib/llvm-project/clang/lib/AST/ASTContext.cpp b/contrib/llvm-project/clang/lib/AST/ASTContext.cpp
index e4b3827b8714..682b71a3d686 100644
--- a/contrib/llvm-project/clang/lib/AST/ASTContext.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ASTContext.cpp
@@ -739,8 +739,8 @@ canonicalizeImmediatelyDeclaredConstraint(const ASTContext &C, Expr *IDC,
// template<typename... T> concept C = true;
// template<C<int> T> struct S; -> constraint is C<{T, int}>
NewConverted.push_back(ConstrainedType);
- for (auto &Arg : OldConverted.front().pack_elements().drop_front(1))
- NewConverted.push_back(Arg);
+ llvm::append_range(NewConverted,
+ OldConverted.front().pack_elements().drop_front(1));
TemplateArgument NewPack(NewConverted);
NewConverted.clear();
@@ -752,8 +752,7 @@ canonicalizeImmediatelyDeclaredConstraint(const ASTContext &C, Expr *IDC,
"Unexpected first argument kind for immediately-declared "
"constraint");
NewConverted.push_back(ConstrainedType);
- for (auto &Arg : OldConverted.drop_front(1))
- NewConverted.push_back(Arg);
+ llvm::append_range(NewConverted, OldConverted.drop_front(1));
}
Expr *NewIDC = ConceptSpecializationExpr::Create(
C, CSE->getNamedConcept(), NewConverted, nullptr,
@@ -888,7 +887,7 @@ ASTContext::getCanonicalTemplateTemplateParmDecl(
TargetCXXABI::Kind ASTContext::getCXXABIKind() const {
auto Kind = getTargetInfo().getCXXABI().getKind();
- return getLangOpts().CXXABI.getValueOr(Kind);
+ return getLangOpts().CXXABI.value_or(Kind);
}
CXXABI *ASTContext::createCXXABI(const TargetInfo &T) {
@@ -973,7 +972,8 @@ static bool isAddrSpaceMapManglingEnabled(const TargetInfo &TI,
ASTContext::ASTContext(LangOptions &LOpts, SourceManager &SM,
IdentifierTable &idents, SelectorTable &sels,
Builtin::Context &builtins, TranslationUnitKind TUKind)
- : ConstantArrayTypes(this_()), FunctionProtoTypes(this_()),
+ : ConstantArrayTypes(this_(), ConstantArrayTypesLog2InitSize),
+ FunctionProtoTypes(this_(), FunctionProtoTypesLog2InitSize),
TemplateSpecializationTypes(this_()),
DependentTemplateSpecializationTypes(this_()), AutoTypes(this_()),
SubstTemplateTemplateParmPacks(this_()),
@@ -1707,8 +1707,17 @@ const llvm::fltSemantics &ASTContext::getFloatTypeSemantics(QualType T) const {
case BuiltinType::BFloat16:
return Target->getBFloat16Format();
case BuiltinType::Float16:
- case BuiltinType::Half:
return Target->getHalfFormat();
+ case BuiltinType::Half:
+ // For HLSL, when the native half type is disabled, half will be treat as
+ // float.
+ if (getLangOpts().HLSL)
+ if (getLangOpts().NativeHalfType)
+ return Target->getHalfFormat();
+ else
+ return Target->getFloatFormat();
+ else
+ return Target->getHalfFormat();
case BuiltinType::Float: return Target->getFloatFormat();
case BuiltinType::Double: return Target->getDoubleFormat();
case BuiltinType::Ibm128:
@@ -1981,8 +1990,11 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
case Type::Vector: {
const auto *VT = cast<VectorType>(T);
TypeInfo EltInfo = getTypeInfo(VT->getElementType());
- Width = EltInfo.Width * VT->getNumElements();
- Align = Width;
+ Width = VT->isExtVectorBoolType() ? VT->getNumElements()
+ : EltInfo.Width * VT->getNumElements();
+ // Enforce at least byte alignment.
+ Align = std::max<unsigned>(8, Width);
+
// If the alignment is not a power of 2, round up to the next power of 2.
// This happens for non-power-of-2 length vectors.
if (Align & (Align-1)) {
@@ -2115,8 +2127,10 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
Align = Target->getLongFractAlign();
break;
case BuiltinType::BFloat16:
- Width = Target->getBFloat16Width();
- Align = Target->getBFloat16Align();
+ if (Target->hasBFloat16Type()) {
+ Width = Target->getBFloat16Width();
+ Align = Target->getBFloat16Align();
+ }
break;
case BuiltinType::Float16:
case BuiltinType::Half:
@@ -2376,6 +2390,10 @@ TypeInfo ASTContext::getTypeInfoImpl(const Type *T) const {
return getTypeInfo(
cast<AttributedType>(T)->getEquivalentType().getTypePtr());
+ case Type::BTFTagAttributed:
+ return getTypeInfo(
+ cast<BTFTagAttributedType>(T)->getWrappedType().getTypePtr());
+
case Type::Atomic: {
// Start with the base type information.
TypeInfo Info = getTypeInfo(cast<AtomicType>(T)->getValueType());
@@ -2593,8 +2611,7 @@ void ASTContext::DeepCollectObjCIvars(const ObjCInterfaceDecl *OI,
if (const ObjCInterfaceDecl *SuperClass = OI->getSuperClass())
DeepCollectObjCIvars(SuperClass, false, Ivars);
if (!leafClass) {
- for (const auto *I : OI->ivars())
- Ivars.push_back(I);
+ llvm::append_range(Ivars, OI->ivars());
} else {
auto *IDecl = const_cast<ObjCInterfaceDecl *>(OI);
for (const ObjCIvarDecl *Iv = IDecl->all_declared_ivar_begin(); Iv;
@@ -2676,7 +2693,11 @@ getSubobjectSizeInBits(const FieldDecl *Field, const ASTContext &Context) {
if (!RD->isUnion())
return structHasUniqueObjectRepresentations(Context, RD);
}
- if (!Field->getType()->isReferenceType() &&
+
+ // A _BitInt type may not be unique if it has padding bits
+ // but if it is a bitfield the padding bits are not used.
+ bool IsBitIntType = Field->getType()->isBitIntType();
+ if (!Field->getType()->isReferenceType() && !IsBitIntType &&
!Context.hasUniqueObjectRepresentations(Field->getType()))
return llvm::None;
@@ -2684,9 +2705,17 @@ getSubobjectSizeInBits(const FieldDecl *Field, const ASTContext &Context) {
Context.toBits(Context.getTypeSizeInChars(Field->getType()));
if (Field->isBitField()) {
int64_t BitfieldSize = Field->getBitWidthValue(Context);
- if (BitfieldSize > FieldSizeInBits)
+ if (IsBitIntType) {
+ if ((unsigned)BitfieldSize >
+ cast<BitIntType>(Field->getType())->getNumBits())
+ return llvm::None;
+ } else if (BitfieldSize > FieldSizeInBits) {
return llvm::None;
+ }
FieldSizeInBits = BitfieldSize;
+ } else if (IsBitIntType &&
+ !Context.hasUniqueObjectRepresentations(Field->getType())) {
+ return llvm::None;
}
return FieldSizeInBits;
}
@@ -2784,8 +2813,13 @@ bool ASTContext::hasUniqueObjectRepresentations(QualType Ty) const {
return false;
// All integrals and enums are unique.
- if (Ty->isIntegralOrEnumerationType())
+ if (Ty->isIntegralOrEnumerationType()) {
+ // Except _BitInt types that have padding bits.
+ if (const auto *BIT = dyn_cast<BitIntType>(Ty))
+ return getTypeSize(BIT) == BIT->getNumBits();
+
return true;
+ }
// All other pointers are unique.
if (Ty->isPointerType())
@@ -2808,8 +2842,7 @@ bool ASTContext::hasUniqueObjectRepresentations(QualType Ty) const {
Optional<int64_t> StructSize =
structHasUniqueObjectRepresentations(*this, Record);
- return StructSize &&
- StructSize.getValue() == static_cast<int64_t>(getTypeSize(Ty));
+ return StructSize && *StructSize == static_cast<int64_t>(getTypeSize(Ty));
}
// FIXME: More cases to handle here (list by rsmith):
@@ -4233,6 +4266,13 @@ static bool isCanonicalResultType(QualType T) {
QualType
ASTContext::getFunctionNoProtoType(QualType ResultTy,
const FunctionType::ExtInfo &Info) const {
+ // FIXME: This assertion cannot be enabled (yet) because the ObjC rewriter
+ // functionality creates a function without a prototype regardless of
+ // language mode (so it makes them even in C++). Once the rewriter has been
+ // fixed, this assertion can be enabled again.
+ //assert(!LangOpts.requiresStrictPrototypes() &&
+ // "strict prototypes are disabled");
+
// Unique functions, to guarantee there is only one function of a particular
// structure.
llvm::FoldingSetNodeID ID;
@@ -4436,8 +4476,7 @@ QualType ASTContext::getFunctionTypeInternal(
QualType, SourceLocation, FunctionType::FunctionTypeExtraBitfields,
FunctionType::ExceptionType, Expr *, FunctionDecl *,
FunctionProtoType::ExtParameterInfo, Qualifiers>(
- NumArgs, EPI.Variadic,
- FunctionProtoType::hasExtraBitfields(EPI.ExceptionSpec.Type),
+ NumArgs, EPI.Variadic, EPI.requiresFunctionProtoTypeExtraBitfields(),
ESH.NumExceptionType, ESH.NumExprPtr, ESH.NumFunctionDeclPtr,
EPI.ExtParameterInfos ? NumArgs : 0,
EPI.TypeQuals.hasNonFastQualifiers() ? 1 : 0);
@@ -4682,6 +4721,26 @@ QualType ASTContext::getAttributedType(attr::Kind attrKind,
return QualType(type, 0);
}
+QualType ASTContext::getBTFTagAttributedType(const BTFTypeTagAttr *BTFAttr,
+ QualType Wrapped) {
+ llvm::FoldingSetNodeID ID;
+ BTFTagAttributedType::Profile(ID, Wrapped, BTFAttr);
+
+ void *InsertPos = nullptr;
+ BTFTagAttributedType *Ty =
+ BTFTagAttributedTypes.FindNodeOrInsertPos(ID, InsertPos);
+ if (Ty)
+ return QualType(Ty, 0);
+
+ QualType Canon = getCanonicalType(Wrapped);
+ Ty = new (*this, TypeAlignment) BTFTagAttributedType(Canon, Wrapped, BTFAttr);
+
+ Types.push_back(Ty);
+ BTFTagAttributedTypes.InsertNode(Ty, InsertPos);
+
+ return QualType(Ty, 0);
+}
+
/// Retrieve a substitution-result type.
QualType
ASTContext::getSubstTemplateTypeParmType(const TemplateTypeParmType *Parm,
@@ -4826,11 +4885,10 @@ ASTContext::getTemplateSpecializationType(TemplateName Template,
"No dependent template names here!");
// Look through qualified template names.
if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
- Template = TemplateName(QTN->getTemplateDecl());
+ Template = QTN->getUnderlyingTemplate();
bool IsTypeAlias =
- Template.getAsTemplateDecl() &&
- isa<TypeAliasTemplateDecl>(Template.getAsTemplateDecl());
+ isa_and_nonnull<TypeAliasTemplateDecl>(Template.getAsTemplateDecl());
QualType CanonType;
if (!Underlying.isNull())
CanonType = getCanonicalType(Underlying);
@@ -4882,7 +4940,7 @@ QualType ASTContext::getCanonicalTemplateSpecializationType(
// Look through qualified template names.
if (QualifiedTemplateName *QTN = Template.getAsQualifiedTemplateName())
- Template = TemplateName(QTN->getTemplateDecl());
+ Template = TemplateName(QTN->getUnderlyingTemplate());
// Build the canonical template specialization type.
TemplateName CanonTemplate = getCanonicalTemplateName(Template);
@@ -6099,6 +6157,9 @@ ASTContext::getNameForTemplate(TemplateName Name,
return DeclarationNameInfo(subst->getParameterPack()->getDeclName(),
NameLoc);
}
+ case TemplateName::UsingTemplate:
+ return DeclarationNameInfo(Name.getAsUsingShadowDecl()->getDeclName(),
+ NameLoc);
}
llvm_unreachable("bad template name kind!");
@@ -6107,6 +6168,7 @@ ASTContext::getNameForTemplate(TemplateName Name,
TemplateName
ASTContext::getCanonicalTemplateName(const TemplateName &Name) const {
switch (Name.getKind()) {
+ case TemplateName::UsingTemplate:
case TemplateName::QualifiedTemplate:
case TemplateName::Template: {
TemplateDecl *Template = Name.getAsTemplateDecl();
@@ -6797,41 +6859,6 @@ static FloatingRank getFloatingRank(QualType T) {
}
}
-/// getFloatingTypeOfSizeWithinDomain - Returns a real floating
-/// point or a complex type (based on typeDomain/typeSize).
-/// 'typeDomain' is a real floating point or complex type.
-/// 'typeSize' is a real floating point or complex type.
-QualType ASTContext::getFloatingTypeOfSizeWithinDomain(QualType Size,
- QualType Domain) const {
- FloatingRank EltRank = getFloatingRank(Size);
- if (Domain->isComplexType()) {
- switch (EltRank) {
- case BFloat16Rank: llvm_unreachable("Complex bfloat16 is not supported");
- case Float16Rank:
- case HalfRank: llvm_unreachable("Complex half is not supported");
- case Ibm128Rank: return getComplexType(Ibm128Ty);
- case FloatRank: return getComplexType(FloatTy);
- case DoubleRank: return getComplexType(DoubleTy);
- case LongDoubleRank: return getComplexType(LongDoubleTy);
- case Float128Rank: return getComplexType(Float128Ty);
- }
- }
-
- assert(Domain->isRealFloatingType() && "Unknown domain!");
- switch (EltRank) {
- case Float16Rank: return HalfTy;
- case BFloat16Rank: return BFloat16Ty;
- case HalfRank: return HalfTy;
- case FloatRank: return FloatTy;
- case DoubleRank: return DoubleTy;
- case LongDoubleRank: return LongDoubleTy;
- case Float128Rank: return Float128Ty;
- case Ibm128Rank:
- return Ibm128Ty;
- }
- llvm_unreachable("getFloatingRank(): illegal value for rank");
-}
-
/// getFloatingTypeOrder - Compare the rank of the two specified floating
/// point types, ignoring the domain of the type (i.e. 'double' ==
/// '_Complex double'). If LHS > RHS, return 1. If LHS == RHS, return 0. If
@@ -8976,10 +9003,9 @@ TemplateName ASTContext::getAssumedTemplateName(DeclarationName Name) const {
/// Retrieve the template name that represents a qualified
/// template name such as \c std::vector.
-TemplateName
-ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS,
- bool TemplateKeyword,
- TemplateDecl *Template) const {
+TemplateName ASTContext::getQualifiedTemplateName(NestedNameSpecifier *NNS,
+ bool TemplateKeyword,
+ TemplateName Template) const {
assert(NNS && "Missing nested-name-specifier in qualified template name");
// FIXME: Canonicalization?
@@ -10294,7 +10320,16 @@ QualType ASTContext::mergeTypes(QualType LHS, QualType RHS,
if (RHS->isObjCIdType() && LHS->isBlockPointerType())
return RHS;
}
-
+ // Allow __auto_type to match anything; it merges to the type with more
+ // information.
+ if (const auto *AT = LHS->getAs<AutoType>()) {
+ if (!AT->isDeduced() && AT->isGNUAutoType())
+ return RHS;
+ }
+ if (const auto *AT = RHS->getAs<AutoType>()) {
+ if (!AT->isDeduced() && AT->isGNUAutoType())
+ return LHS;
+ }
return {};
}
@@ -11231,7 +11266,7 @@ QualType ASTContext::GetBuiltinType(unsigned Id,
// We really shouldn't be making a no-proto type here.
- if (ArgTypes.empty() && Variadic && !getLangOpts().CPlusPlus)
+ if (ArgTypes.empty() && Variadic && !getLangOpts().requiresStrictPrototypes())
return getFunctionNoProtoType(ResType, EI);
FunctionProtoType::ExtProtoInfo EPI;
@@ -11328,7 +11363,7 @@ static GVALinkage adjustGVALinkageForAttributes(const ASTContext &Context,
// name between the host and device compilation which is the same for the
// same compilation unit whereas different among different compilation
// units.
- if (Context.shouldExternalizeStaticVar(D))
+ if (Context.shouldExternalize(D))
return GVA_StrongExternal;
}
return L;
@@ -11683,9 +11718,11 @@ MangleContext *ASTContext::createDeviceMangleContext(const TargetInfo &T) {
if (const auto *RD = dyn_cast<CXXRecordDecl>(ND))
return RD->getDeviceLambdaManglingNumber();
return llvm::None;
- });
+ },
+ /*IsAux=*/true);
case TargetCXXABI::Microsoft:
- return MicrosoftMangleContext::create(*this, getDiagnostics());
+ return MicrosoftMangleContext::create(*this, getDiagnostics(),
+ /*IsAux=*/true);
}
llvm_unreachable("Unsupported ABI");
}
@@ -11729,6 +11766,8 @@ QualType ASTContext::getRealTypeForBitwidth(unsigned DestWidth,
FloatModeKind Ty =
getTargetInfo().getRealTypeByWidth(DestWidth, ExplicitType);
switch (Ty) {
+ case FloatModeKind::Half:
+ return HalfTy;
case FloatModeKind::Float:
return FloatTy;
case FloatModeKind::Double:
@@ -11751,9 +11790,19 @@ void ASTContext::setManglingNumber(const NamedDecl *ND, unsigned Number) {
MangleNumbers[ND] = Number;
}
-unsigned ASTContext::getManglingNumber(const NamedDecl *ND) const {
+unsigned ASTContext::getManglingNumber(const NamedDecl *ND,
+ bool ForAuxTarget) const {
auto I = MangleNumbers.find(ND);
- return I != MangleNumbers.end() ? I->second : 1;
+ unsigned Res = I != MangleNumbers.end() ? I->second : 1;
+ // CUDA/HIP host compilation encodes host and device mangling numbers
+ // as lower and upper half of 32 bit integer.
+ if (LangOpts.CUDA && !LangOpts.CUDAIsDevice) {
+ Res = ForAuxTarget ? Res >> 16 : Res & 0xFFFF;
+ } else {
+ assert(!ForAuxTarget && "Only CUDA/HIP host compilation supports mangling "
+ "number for aux target");
+ }
+ return Res > 1 ? Res : 1;
}
void ASTContext::setStaticLocalNumber(const VarDecl *VD, unsigned Number) {
@@ -11852,7 +11901,7 @@ ASTContext::getPredefinedStringLiteralFromCache(StringRef Key) const {
StringLiteral *&Result = StringLiteralCache[Key];
if (!Result)
Result = StringLiteral::Create(
- *this, Key, StringLiteral::Ascii,
+ *this, Key, StringLiteral::Ordinary,
/*Pascal*/ false, getStringLiteralArrayType(CharTy, Key.size()),
SourceLocation());
return Result;
@@ -11875,6 +11924,23 @@ ASTContext::getMSGuidDecl(MSGuidDecl::Parts Parts) const {
return New;
}
+UnnamedGlobalConstantDecl *
+ASTContext::getUnnamedGlobalConstantDecl(QualType Ty,
+ const APValue &APVal) const {
+ llvm::FoldingSetNodeID ID;
+ UnnamedGlobalConstantDecl::Profile(ID, Ty, APVal);
+
+ void *InsertPos;
+ if (UnnamedGlobalConstantDecl *Existing =
+ UnnamedGlobalConstantDecls.FindNodeOrInsertPos(ID, InsertPos))
+ return Existing;
+
+ UnnamedGlobalConstantDecl *New =
+ UnnamedGlobalConstantDecl::Create(*this, Ty, APVal);
+ UnnamedGlobalConstantDecls.InsertNode(New, InsertPos);
+ return New;
+}
+
TemplateParamObjectDecl *
ASTContext::getTemplateParamObjectDecl(QualType T, const APValue &V) const {
assert(T->isRecordType() && "template param object of unexpected type");
@@ -11956,8 +12022,13 @@ uint64_t ASTContext::getTargetNullPointerValue(QualType QT) const {
}
unsigned ASTContext::getTargetAddressSpace(QualType T) const {
- return T->isFunctionType() ? getTargetInfo().getProgramAddressSpace()
- : getTargetAddressSpace(T.getQualifiers());
+ // Return the address space for the type. If the type is a
+ // function type without an address space qualifier, the
+ // program address space is used. Otherwise, the target picks
+ // the best address space based on the type information
+ return T->isFunctionType() && !T.hasAddressSpace()
+ ? getTargetInfo().getProgramAddressSpace()
+ : getTargetAddressSpace(T.getQualifiers());
}
unsigned ASTContext::getTargetAddressSpace(Qualifiers Q) const {
@@ -12255,7 +12326,7 @@ operator<<(const StreamingDiagnostic &DB,
return DB << "a prior #pragma section";
}
-bool ASTContext::mayExternalizeStaticVar(const Decl *D) const {
+bool ASTContext::mayExternalize(const Decl *D) const {
bool IsStaticVar =
isa<VarDecl>(D) && cast<VarDecl>(D)->getStorageClass() == SC_Static;
bool IsExplicitDeviceVar = (D->hasAttr<CUDADeviceAttr>() &&
@@ -12272,8 +12343,8 @@ bool ASTContext::mayExternalizeStaticVar(const Decl *D) const {
GVA_Internal);
}
-bool ASTContext::shouldExternalizeStaticVar(const Decl *D) const {
- return mayExternalizeStaticVar(D) &&
+bool ASTContext::shouldExternalize(const Decl *D) const {
+ return mayExternalize(D) &&
(D->hasAttr<HIPManagedAttr>() || D->hasAttr<CUDAGlobalAttr>() ||
CUDADeviceVarODRUsedByHost.count(cast<VarDecl>(D)));
}
diff --git a/contrib/llvm-project/clang/lib/AST/ASTDiagnostic.cpp b/contrib/llvm-project/clang/lib/AST/ASTDiagnostic.cpp
index 724ede272fbf..28269ec219e4 100644
--- a/contrib/llvm-project/clang/lib/AST/ASTDiagnostic.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ASTDiagnostic.cpp
@@ -270,9 +270,9 @@ ConvertTypeToDiagnosticString(ASTContext &Context, QualType Ty,
std::string S = Ty.getAsString(Context.getPrintingPolicy());
std::string CanS = CanTy.getAsString(Context.getPrintingPolicy());
- for (unsigned I = 0, E = QualTypeVals.size(); I != E; ++I) {
+ for (const intptr_t &QualTypeVal : QualTypeVals) {
QualType CompareTy =
- QualType::getFromOpaquePtr(reinterpret_cast<void*>(QualTypeVals[I]));
+ QualType::getFromOpaquePtr(reinterpret_cast<void *>(QualTypeVal));
if (CompareTy.isNull())
continue;
if (CompareTy == Ty)
@@ -302,11 +302,11 @@ ConvertTypeToDiagnosticString(ASTContext &Context, QualType Ty,
// Check to see if we already desugared this type in this
// diagnostic. If so, don't do it again.
bool Repeated = false;
- for (unsigned i = 0, e = PrevArgs.size(); i != e; ++i) {
+ for (const auto &PrevArg : PrevArgs) {
// TODO: Handle ak_declcontext case.
- if (PrevArgs[i].first == DiagnosticsEngine::ak_qualtype) {
- void *Ptr = (void*)PrevArgs[i].second;
- QualType PrevTy(QualType::getFromOpaquePtr(Ptr));
+ if (PrevArg.first == DiagnosticsEngine::ak_qualtype) {
+ QualType PrevTy(
+ QualType::getFromOpaquePtr(reinterpret_cast<void *>(PrevArg.second)));
if (PrevTy == Ty) {
Repeated = true;
break;
@@ -372,7 +372,7 @@ void clang::FormatASTNodeDiagnosticArgument(
default: llvm_unreachable("unknown ArgumentKind");
case DiagnosticsEngine::ak_addrspace: {
assert(Modifier.empty() && Argument.empty() &&
- "Invalid modifier for Qualfiers argument");
+ "Invalid modifier for Qualifiers argument");
auto S = Qualifiers::getAddrSpaceAsString(static_cast<LangAS>(Val));
if (S.empty()) {
@@ -387,7 +387,7 @@ void clang::FormatASTNodeDiagnosticArgument(
}
case DiagnosticsEngine::ak_qual: {
assert(Modifier.empty() && Argument.empty() &&
- "Invalid modifier for Qualfiers argument");
+ "Invalid modifier for Qualifiers argument");
Qualifiers Q(Qualifiers::fromOpaqueValue(Val));
auto S = Q.getAsString();
@@ -1874,7 +1874,7 @@ class TemplateDiff {
// FIXME: Diffing the APValue would be neat.
// FIXME: Suppress this and use the full name of the declaration if the
// parameter is a pointer or reference.
- TPO->printAsInit(OS);
+ TPO->printAsInit(OS, Policy);
return;
}
VD->printName(OS);
diff --git a/contrib/llvm-project/clang/lib/AST/ASTImporter.cpp b/contrib/llvm-project/clang/lib/AST/ASTImporter.cpp
index 457465e87d93..e9730112eaa3 100644
--- a/contrib/llvm-project/clang/lib/AST/ASTImporter.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ASTImporter.cpp
@@ -84,7 +84,7 @@ namespace clang {
using ExpectedSLoc = llvm::Expected<SourceLocation>;
using ExpectedName = llvm::Expected<DeclarationName>;
- std::string ImportError::toString() const {
+ std::string ASTImportError::toString() const {
// FIXME: Improve error texts.
switch (Error) {
case NameConflict:
@@ -98,15 +98,13 @@ namespace clang {
return "Invalid error code.";
}
- void ImportError::log(raw_ostream &OS) const {
- OS << toString();
- }
+ void ASTImportError::log(raw_ostream &OS) const { OS << toString(); }
- std::error_code ImportError::convertToErrorCode() const {
+ std::error_code ASTImportError::convertToErrorCode() const {
llvm_unreachable("Function not implemented.");
}
- char ImportError::ID;
+ char ASTImportError::ID;
template <class T>
SmallVector<Decl *, 2>
@@ -138,6 +136,46 @@ namespace clang {
To->setIsUsed();
}
+ /// How to handle import errors that occur when import of a child declaration
+ /// of a DeclContext fails.
+ class ChildErrorHandlingStrategy {
+ /// This context is imported (in the 'from' domain).
+ /// It is nullptr if a non-DeclContext is imported.
+ const DeclContext *const FromDC;
+ /// Ignore import errors of the children.
+ /// If true, the context can be imported successfully if a child
+ /// of it failed to import. Otherwise the import errors of the child nodes
+ /// are accumulated (joined) into the import error object of the parent.
+ /// (Import of a parent can fail in other ways.)
+ bool const IgnoreChildErrors;
+
+ public:
+ ChildErrorHandlingStrategy(const DeclContext *FromDC)
+ : FromDC(FromDC), IgnoreChildErrors(!isa<TagDecl>(FromDC)) {}
+ ChildErrorHandlingStrategy(const Decl *FromD)
+ : FromDC(dyn_cast<DeclContext>(FromD)),
+ IgnoreChildErrors(!isa<TagDecl>(FromD)) {}
+
+ /// Process the import result of a child (of the current declaration).
+ /// \param ResultErr The import error that can be used as result of
+ /// importing the parent. This may be changed by the function.
+ /// \param ChildErr Result of importing a child. Can be success or error.
+ void handleChildImportResult(Error &ResultErr, Error &&ChildErr) {
+ if (ChildErr && !IgnoreChildErrors)
+ ResultErr = joinErrors(std::move(ResultErr), std::move(ChildErr));
+ else
+ consumeError(std::move(ChildErr));
+ }
+
+ /// Determine if import failure of a child does not cause import failure of
+ /// its parent.
+ bool ignoreChildErrorOnParent(Decl *FromChildD) const {
+ if (!IgnoreChildErrors || !FromDC)
+ return false;
+ return FromDC->containsDecl(FromChildD);
+ }
+ };
+
class ASTNodeImporter : public TypeVisitor<ASTNodeImporter, ExpectedType>,
public DeclVisitor<ASTNodeImporter, ExpectedDecl>,
public StmtVisitor<ASTNodeImporter, ExpectedStmt> {
@@ -245,6 +283,7 @@ namespace clang {
ToD = CreateFun(std::forward<Args>(args)...);
// Keep track of imported Decls.
Importer.RegisterImportedDecl(FromD, ToD);
+ Importer.SharedState->markAsNewDecl(ToD);
InitializeImportedDecl(FromD, ToD);
return false; // A new Decl is created.
}
@@ -507,6 +546,7 @@ namespace clang {
ExpectedDecl VisitUsingDecl(UsingDecl *D);
ExpectedDecl VisitUsingShadowDecl(UsingShadowDecl *D);
ExpectedDecl VisitUsingDirectiveDecl(UsingDirectiveDecl *D);
+ ExpectedDecl VisitUsingPackDecl(UsingPackDecl *D);
ExpectedDecl ImportUsingShadowDecls(BaseUsingDecl *D, BaseUsingDecl *ToSI);
ExpectedDecl VisitUsingEnumDecl(UsingEnumDecl *D);
ExpectedDecl VisitUnresolvedUsingValueDecl(UnresolvedUsingValueDecl *D);
@@ -1025,7 +1065,7 @@ using namespace clang;
ExpectedType ASTNodeImporter::VisitType(const Type *T) {
Importer.FromDiag(SourceLocation(), diag::err_unsupported_ast_node)
<< T->getTypeClassName();
- return make_error<ImportError>(ImportError::UnsupportedConstruct);
+ return make_error<ASTImportError>(ASTImportError::UnsupportedConstruct);
}
ExpectedType ASTNodeImporter::VisitAtomicType(const AtomicType *T){
@@ -1676,7 +1716,7 @@ Error ASTNodeImporter::ImportDeclParts(
if (RT && RT->getDecl() == D) {
Importer.FromDiag(D->getLocation(), diag::err_unsupported_ast_node)
<< D->getDeclKindName();
- return make_error<ImportError>(ImportError::UnsupportedConstruct);
+ return make_error<ASTImportError>(ASTImportError::UnsupportedConstruct);
}
}
}
@@ -1809,7 +1849,7 @@ ASTNodeImporter::ImportDeclContext(DeclContext *FromDC, bool ForceImport) {
// because there is an ODR error with two typedefs. As another example,
// the client may allow EnumConstantDecls with same names but with
// different values in two distinct translation units.
- bool AccumulateChildErrors = isa<TagDecl>(FromDC);
+ ChildErrorHandlingStrategy HandleChildErrors(FromDC);
Error ChildErrors = Error::success();
for (auto *From : FromDC->decls()) {
@@ -1849,20 +1889,14 @@ ASTNodeImporter::ImportDeclContext(DeclContext *FromDC, bool ForceImport) {
if (FromRecordDecl->isCompleteDefinition() &&
!ToRecordDecl->isCompleteDefinition()) {
Error Err = ImportDefinition(FromRecordDecl, ToRecordDecl);
-
- if (Err && AccumulateChildErrors)
- ChildErrors = joinErrors(std::move(ChildErrors), std::move(Err));
- else
- consumeError(std::move(Err));
+ HandleChildErrors.handleChildImportResult(ChildErrors,
+ std::move(Err));
}
}
}
} else {
- if (AccumulateChildErrors)
- ChildErrors =
- joinErrors(std::move(ChildErrors), ImportedOrErr.takeError());
- else
- consumeError(ImportedOrErr.takeError());
+ HandleChildErrors.handleChildImportResult(ChildErrors,
+ ImportedOrErr.takeError());
}
}
@@ -2012,6 +2046,14 @@ Error ASTNodeImporter::ImportDefinition(
}
To->startDefinition();
+ // Set the definition to complete even if it is really not complete during
+ // import. Some AST constructs (expressions) require the record layout
+ // to be calculated (see 'clang::computeDependence') at the time they are
+ // constructed. Import of such AST node is possible during import of the
+ // same record, there is no way to have a completely defined record (all
+ // fields imported) at that time without multiple AST import passes.
+ if (!Importer.isMinimalImport())
+ To->setCompleteDefinition(true);
// Complete the definition even if error is returned.
// The RecordDecl may be already part of the AST so it is better to
// have it in complete state even if something is wrong with it.
@@ -2076,9 +2118,10 @@ Error ASTNodeImporter::ImportDefinition(
ToCXX->setBases(Bases.data(), Bases.size());
}
- if (shouldForceImportDeclContext(Kind))
+ if (shouldForceImportDeclContext(Kind)) {
if (Error Err = ImportDeclContext(From, /*ForceImport=*/true))
return Err;
+ }
return Error::success();
}
@@ -2196,13 +2239,13 @@ bool ASTNodeImporter::IsStructuralMatch(Decl *From, Decl *To, bool Complain) {
ExpectedDecl ASTNodeImporter::VisitDecl(Decl *D) {
Importer.FromDiag(D->getLocation(), diag::err_unsupported_ast_node)
<< D->getDeclKindName();
- return make_error<ImportError>(ImportError::UnsupportedConstruct);
+ return make_error<ASTImportError>(ASTImportError::UnsupportedConstruct);
}
ExpectedDecl ASTNodeImporter::VisitImportDecl(ImportDecl *D) {
Importer.FromDiag(D->getLocation(), diag::err_unsupported_ast_node)
<< D->getDeclKindName();
- return make_error<ImportError>(ImportError::UnsupportedConstruct);
+ return make_error<ASTImportError>(ASTImportError::UnsupportedConstruct);
}
ExpectedDecl ASTNodeImporter::VisitEmptyDecl(EmptyDecl *D) {
@@ -2865,7 +2908,7 @@ ExpectedDecl ASTNodeImporter::VisitRecordDecl(RecordDecl *D) {
return TInfoOrErr.takeError();
if (GetImportedOrCreateSpecialDecl(
D2CXX, CXXRecordDecl::CreateLambda, D, Importer.getToContext(),
- DC, *TInfoOrErr, Loc, DCXX->isDependentLambda(),
+ DC, *TInfoOrErr, Loc, DCXX->getLambdaDependencyKind(),
DCXX->isGenericLambda(), DCXX->getLambdaCaptureDefault()))
return D2CXX;
ExpectedDecl CDeclOrErr = import(DCXX->getLambdaContextDecl());
@@ -3182,23 +3225,32 @@ static bool isAncestorDeclContextOf(const DeclContext *DC, const Decl *D) {
return false;
}
+static bool hasTypeDeclaredInsideFunction(QualType T, const FunctionDecl *FD) {
+ if (T.isNull())
+ return false;
+ if (const auto *RecordT = T->getAs<RecordType>()) {
+ const RecordDecl *RD = RecordT->getDecl();
+ assert(RD);
+ if (isAncestorDeclContextOf(FD, RD)) {
+ assert(RD->getLexicalDeclContext() == RD->getDeclContext());
+ return true;
+ }
+ if (const auto *RDTempl = dyn_cast<ClassTemplateSpecializationDecl>(RD))
+ return llvm::count_if(RDTempl->getTemplateArgs().asArray(),
+ [FD](const TemplateArgument &Arg) {
+ return hasTypeDeclaredInsideFunction(
+ Arg.getAsType(), FD);
+ });
+ }
+ return false;
+}
+
bool ASTNodeImporter::hasAutoReturnTypeDeclaredInside(FunctionDecl *D) {
QualType FromTy = D->getType();
const auto *FromFPT = FromTy->getAs<FunctionProtoType>();
assert(FromFPT && "Must be called on FunctionProtoType");
- if (const AutoType *AutoT =
- FromFPT->getReturnType()->getContainedAutoType()) {
- QualType DeducedT = AutoT->getDeducedType();
- if (const auto *RecordT =
- !DeducedT.isNull() ? DeducedT->getAs<RecordType>() : nullptr) {
- const RecordDecl *RD = RecordT->getDecl();
- assert(RD);
- if (isAncestorDeclContextOf(D, RD)) {
- assert(RD->getLexicalDeclContext() == RD->getDeclContext());
- return true;
- }
- }
- }
+ if (const AutoType *AutoT = FromFPT->getReturnType()->getContainedAutoType())
+ return hasTypeDeclaredInsideFunction(AutoT->getDeducedType(), D);
if (const auto *TypedefT = FromFPT->getReturnType()->getAs<TypedefType>()) {
const TypedefNameDecl *TD = TypedefT->getDecl();
assert(TD);
@@ -3637,19 +3689,19 @@ ExpectedDecl ASTNodeImporter::VisitFieldDecl(FieldDecl *D) {
// initializer of a FieldDecl might not had been instantiated in the
// "To" context. However, the "From" context might instantiated that,
// thus we have to merge that.
+ // Note: `hasInClassInitializer()` is not the same as non-null
+ // `getInClassInitializer()` value.
if (Expr *FromInitializer = D->getInClassInitializer()) {
- // We don't have yet the initializer set.
- if (FoundField->hasInClassInitializer() &&
- !FoundField->getInClassInitializer()) {
- if (ExpectedExpr ToInitializerOrErr = import(FromInitializer))
+ if (ExpectedExpr ToInitializerOrErr = import(FromInitializer)) {
+ // Import of the FromInitializer may result in the setting of
+ // InClassInitializer. If not, set it here.
+ assert(FoundField->hasInClassInitializer() &&
+ "Field should have an in-class initializer if it has an "
+ "expression for it.");
+ if (!FoundField->getInClassInitializer())
FoundField->setInClassInitializer(*ToInitializerOrErr);
- else {
- // We can't return error here,
- // since we already mapped D as imported.
- // FIXME: warning message?
- consumeError(ToInitializerOrErr.takeError());
- return FoundField;
- }
+ } else {
+ return ToInitializerOrErr.takeError();
}
}
return FoundField;
@@ -3661,7 +3713,7 @@ ExpectedDecl ASTNodeImporter::VisitFieldDecl(FieldDecl *D) {
Importer.ToDiag(FoundField->getLocation(), diag::note_odr_value_here)
<< FoundField->getType();
- return make_error<ImportError>(ImportError::NameConflict);
+ return make_error<ASTImportError>(ASTImportError::NameConflict);
}
}
@@ -3734,7 +3786,7 @@ ExpectedDecl ASTNodeImporter::VisitIndirectFieldDecl(IndirectFieldDecl *D) {
Importer.ToDiag(FoundField->getLocation(), diag::note_odr_value_here)
<< FoundField->getType();
- return make_error<ImportError>(ImportError::NameConflict);
+ return make_error<ASTImportError>(ASTImportError::NameConflict);
}
}
@@ -3925,7 +3977,7 @@ ExpectedDecl ASTNodeImporter::VisitObjCIvarDecl(ObjCIvarDecl *D) {
Importer.ToDiag(FoundIvar->getLocation(), diag::note_odr_value_here)
<< FoundIvar->getType();
- return make_error<ImportError>(ImportError::NameConflict);
+ return make_error<ASTImportError>(ASTImportError::NameConflict);
}
}
@@ -4233,7 +4285,7 @@ ExpectedDecl ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
diag::note_odr_objc_method_here)
<< D->isInstanceMethod() << Name;
- return make_error<ImportError>(ImportError::NameConflict);
+ return make_error<ASTImportError>(ASTImportError::NameConflict);
}
// Check the number of parameters.
@@ -4245,7 +4297,7 @@ ExpectedDecl ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
diag::note_odr_objc_method_here)
<< D->isInstanceMethod() << Name;
- return make_error<ImportError>(ImportError::NameConflict);
+ return make_error<ASTImportError>(ASTImportError::NameConflict);
}
// Check parameter types.
@@ -4261,7 +4313,7 @@ ExpectedDecl ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
Importer.ToDiag((*FoundP)->getLocation(), diag::note_odr_value_here)
<< (*FoundP)->getType();
- return make_error<ImportError>(ImportError::NameConflict);
+ return make_error<ASTImportError>(ASTImportError::NameConflict);
}
}
@@ -4274,7 +4326,7 @@ ExpectedDecl ASTNodeImporter::VisitObjCMethodDecl(ObjCMethodDecl *D) {
diag::note_odr_objc_method_here)
<< D->isInstanceMethod() << Name;
- return make_error<ImportError>(ImportError::NameConflict);
+ return make_error<ASTImportError>(ASTImportError::NameConflict);
}
// FIXME: Any other bits we need to merge?
@@ -4788,6 +4840,35 @@ ExpectedDecl ASTNodeImporter::VisitUsingDirectiveDecl(UsingDirectiveDecl *D) {
return ToUsingDir;
}
+ExpectedDecl ASTNodeImporter::VisitUsingPackDecl(UsingPackDecl *D) {
+ DeclContext *DC, *LexicalDC;
+ DeclarationName Name;
+ SourceLocation Loc;
+ NamedDecl *ToD = nullptr;
+ if (Error Err = ImportDeclParts(D, DC, LexicalDC, Name, ToD, Loc))
+ return std::move(Err);
+ if (ToD)
+ return ToD;
+
+ auto ToInstantiatedFromUsingOrErr =
+ Importer.Import(D->getInstantiatedFromUsingDecl());
+ if (!ToInstantiatedFromUsingOrErr)
+ return ToInstantiatedFromUsingOrErr.takeError();
+ SmallVector<NamedDecl *, 4> Expansions(D->expansions().size());
+ if (Error Err = ImportArrayChecked(D->expansions(), Expansions.begin()))
+ return std::move(Err);
+
+ UsingPackDecl *ToUsingPack;
+ if (GetImportedOrCreateDecl(ToUsingPack, D, Importer.getToContext(), DC,
+ cast<NamedDecl>(*ToInstantiatedFromUsingOrErr),
+ Expansions))
+ return ToUsingPack;
+
+ addDeclToContexts(D, ToUsingPack);
+
+ return ToUsingPack;
+}
+
ExpectedDecl ASTNodeImporter::VisitUnresolvedUsingValueDecl(
UnresolvedUsingValueDecl *D) {
DeclContext *DC, *LexicalDC;
@@ -5172,7 +5253,7 @@ ASTNodeImporter::VisitObjCImplementationDecl(ObjCImplementationDecl *D) {
Importer.FromDiag(D->getLocation(),
diag::note_odr_objc_missing_superclass);
- return make_error<ImportError>(ImportError::NameConflict);
+ return make_error<ASTImportError>(ASTImportError::NameConflict);
}
}
@@ -5211,7 +5292,7 @@ ExpectedDecl ASTNodeImporter::VisitObjCPropertyDecl(ObjCPropertyDecl *D) {
Importer.ToDiag(FoundProp->getLocation(), diag::note_odr_value_here)
<< FoundProp->getType();
- return make_error<ImportError>(ImportError::NameConflict);
+ return make_error<ASTImportError>(ASTImportError::NameConflict);
}
// FIXME: Check property attributes, getters, setters, etc.?
@@ -5316,7 +5397,7 @@ ASTNodeImporter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
<< D->getPropertyDecl()->getDeclName()
<< (D->getPropertyImplementation() == ObjCPropertyImplDecl::Dynamic);
- return make_error<ImportError>(ImportError::NameConflict);
+ return make_error<ASTImportError>(ASTImportError::NameConflict);
}
// For @synthesize, check that we have the same
@@ -5331,7 +5412,7 @@ ASTNodeImporter::VisitObjCPropertyImplDecl(ObjCPropertyImplDecl *D) {
diag::note_odr_objc_synthesize_ivar_here)
<< D->getPropertyIvarDecl()->getDeclName();
- return make_error<ImportError>(ImportError::NameConflict);
+ return make_error<ASTImportError>(ASTImportError::NameConflict);
}
// Merge the existing implementation with the new implementation.
@@ -5661,7 +5742,7 @@ ExpectedDecl ASTNodeImporter::VisitClassTemplateSpecializationDecl(
}
} else { // ODR violation.
// FIXME HandleNameConflict
- return make_error<ImportError>(ImportError::NameConflict);
+ return make_error<ASTImportError>(ASTImportError::NameConflict);
}
}
@@ -5944,9 +6025,10 @@ ExpectedDecl ASTNodeImporter::VisitVarTemplateSpecializationDecl(
return TInfoOrErr.takeError();
TemplateArgumentListInfo ToTAInfo;
- if (Error Err = ImportTemplateArgumentListInfo(
- D->getTemplateArgsInfo(), ToTAInfo))
- return std::move(Err);
+ if (const ASTTemplateArgumentListInfo *Args = D->getTemplateArgsInfo()) {
+ if (Error Err = ImportTemplateArgumentListInfo(*Args, ToTAInfo))
+ return std::move(Err);
+ }
using PartVarSpecDecl = VarTemplatePartialSpecializationDecl;
// Create a new specialization.
@@ -6138,13 +6220,13 @@ ASTNodeImporter::VisitFunctionTemplateDecl(FunctionTemplateDecl *D) {
ExpectedStmt ASTNodeImporter::VisitStmt(Stmt *S) {
Importer.FromDiag(S->getBeginLoc(), diag::err_unsupported_ast_node)
<< S->getStmtClassName();
- return make_error<ImportError>(ImportError::UnsupportedConstruct);
+ return make_error<ASTImportError>(ASTImportError::UnsupportedConstruct);
}
ExpectedStmt ASTNodeImporter::VisitGCCAsmStmt(GCCAsmStmt *S) {
if (Importer.returnWithErrorInTest())
- return make_error<ImportError>(ImportError::UnsupportedConstruct);
+ return make_error<ASTImportError>(ASTImportError::UnsupportedConstruct);
SmallVector<IdentifierInfo *, 4> Names;
for (unsigned I = 0, E = S->getNumOutputs(); I != E; I++) {
IdentifierInfo *ToII = Importer.Import(S->getOutputIdentifier(I));
@@ -6257,9 +6339,10 @@ ExpectedStmt ASTNodeImporter::VisitCompoundStmt(CompoundStmt *S) {
if (!ToRBracLocOrErr)
return ToRBracLocOrErr.takeError();
- return CompoundStmt::Create(
- Importer.getToContext(), ToStmts,
- *ToLBracLocOrErr, *ToRBracLocOrErr);
+ FPOptionsOverride FPO =
+ S->hasStoredFPFeatures() ? S->getStoredFPFeatures() : FPOptionsOverride();
+ return CompoundStmt::Create(Importer.getToContext(), ToStmts, FPO,
+ *ToLBracLocOrErr, *ToRBracLocOrErr);
}
ExpectedStmt ASTNodeImporter::VisitCaseStmt(CaseStmt *S) {
@@ -6651,11 +6734,12 @@ ExpectedStmt ASTNodeImporter::VisitObjCAutoreleasePoolStmt(
ExpectedStmt ASTNodeImporter::VisitExpr(Expr *E) {
Importer.FromDiag(E->getBeginLoc(), diag::err_unsupported_ast_node)
<< E->getStmtClassName();
- return make_error<ImportError>(ImportError::UnsupportedConstruct);
+ return make_error<ASTImportError>(ASTImportError::UnsupportedConstruct);
}
ExpectedStmt ASTNodeImporter::VisitSourceLocExpr(SourceLocExpr *E) {
Error Err = Error::success();
+ auto ToType = importChecked(Err, E->getType());
auto BLoc = importChecked(Err, E->getBeginLoc());
auto RParenLoc = importChecked(Err, E->getEndLoc());
if (Err)
@@ -6665,8 +6749,8 @@ ExpectedStmt ASTNodeImporter::VisitSourceLocExpr(SourceLocExpr *E) {
return ParentContextOrErr.takeError();
return new (Importer.getToContext())
- SourceLocExpr(Importer.getToContext(), E->getIdentKind(), BLoc, RParenLoc,
- *ParentContextOrErr);
+ SourceLocExpr(Importer.getToContext(), E->getIdentKind(), ToType, BLoc,
+ RParenLoc, *ParentContextOrErr);
}
ExpectedStmt ASTNodeImporter::VisitVAArgExpr(VAArgExpr *E) {
@@ -7312,7 +7396,7 @@ ExpectedStmt ASTNodeImporter::VisitExplicitCastExpr(ExplicitCastExpr *E) {
}
default:
llvm_unreachable("Cast expression of unsupported type!");
- return make_error<ImportError>(ImportError::UnsupportedConstruct);
+ return make_error<ASTImportError>(ASTImportError::UnsupportedConstruct);
}
}
@@ -8118,8 +8202,23 @@ ExpectedStmt ASTNodeImporter::VisitCXXDefaultInitExpr(CXXDefaultInitExpr *E) {
if (!UsedContextOrErr)
return UsedContextOrErr.takeError();
- return CXXDefaultInitExpr::Create(
- Importer.getToContext(), *ToBeginLocOrErr, *ToFieldOrErr, *UsedContextOrErr);
+ FieldDecl *ToField = *ToFieldOrErr;
+ assert(ToField->hasInClassInitializer() &&
+ "Field should have in-class initializer if there is a default init "
+ "expression that uses it.");
+ if (!ToField->getInClassInitializer()) {
+ // The in-class initializer may be not yet set in "To" AST even if the
+ // field is already there. This must be set here to make construction of
+ // CXXDefaultInitExpr work.
+ auto ToInClassInitializerOrErr =
+ import(E->getField()->getInClassInitializer());
+ if (!ToInClassInitializerOrErr)
+ return ToInClassInitializerOrErr.takeError();
+ ToField->setInClassInitializer(*ToInClassInitializerOrErr);
+ }
+
+ return CXXDefaultInitExpr::Create(Importer.getToContext(), *ToBeginLocOrErr,
+ ToField, *UsedContextOrErr);
}
ExpectedStmt ASTNodeImporter::VisitCXXNamedCastExpr(CXXNamedCastExpr *E) {
@@ -8158,7 +8257,7 @@ ExpectedStmt ASTNodeImporter::VisitCXXNamedCastExpr(CXXNamedCastExpr *E) {
ToOperatorLoc, ToRParenLoc, ToAngleBrackets);
} else {
llvm_unreachable("Unknown cast type");
- return make_error<ImportError>();
+ return make_error<ASTImportError>();
}
}
@@ -8353,7 +8452,7 @@ ASTImporter::Import(ExprWithCleanups::CleanupObject From) {
// FIXME: Handle BlockDecl when we implement importing BlockExpr in
// ASTNodeImporter.
- return make_error<ImportError>(ImportError::UnsupportedConstruct);
+ return make_error<ASTImportError>(ASTImportError::UnsupportedConstruct);
}
ExpectedTypePtr ASTImporter::Import(const Type *FromT) {
@@ -8558,6 +8657,13 @@ Expected<Attr *> ASTImporter::Import(const Attr *FromAttr) {
break;
}
+ case attr::EnableIf: {
+ const auto *From = cast<EnableIfAttr>(FromAttr);
+ AI.importAttr(From, AI.importArg(From->getCond()).value(),
+ From->getMessage());
+ break;
+ }
+
case attr::AssertCapability: {
const auto *From = cast<AssertCapabilityAttr>(FromAttr);
AI.importAttr(From,
@@ -8697,7 +8803,7 @@ Expected<Decl *> ASTImporter::Import(Decl *FromD) {
// Check whether there was a previous failed import.
// If yes return the existing error.
if (auto Error = getImportDeclErrorIfAny(FromD))
- return make_error<ImportError>(*Error);
+ return make_error<ASTImportError>(*Error);
// Check whether we've already imported this declaration.
Decl *ToD = GetAlreadyImportedOrNull(FromD);
@@ -8705,7 +8811,7 @@ Expected<Decl *> ASTImporter::Import(Decl *FromD) {
// Already imported (possibly from another TU) and with an error.
if (auto Error = SharedState->getImportDeclErrorIfAny(ToD)) {
setImportDeclError(FromD, *Error);
- return make_error<ImportError>(*Error);
+ return make_error<ASTImportError>(*Error);
}
// If FromD has some updated flags after last import, apply it.
@@ -8757,9 +8863,9 @@ Expected<Decl *> ASTImporter::Import(Decl *FromD) {
// Error encountered for the first time.
// After takeError the error is not usable any more in ToDOrErr.
// Get a copy of the error object (any more simple solution for this?).
- ImportError ErrOut;
+ ASTImportError ErrOut;
handleAllErrors(ToDOrErr.takeError(),
- [&ErrOut](const ImportError &E) { ErrOut = E; });
+ [&ErrOut](const ASTImportError &E) { ErrOut = E; });
setImportDeclError(FromD, ErrOut);
// Set the error for the mapped to Decl, which is in the "to" context.
if (Pos != ImportedDecls.end())
@@ -8767,8 +8873,20 @@ Expected<Decl *> ASTImporter::Import(Decl *FromD) {
// Set the error for all nodes which have been created before we
// recognized the error.
- for (const auto &Path : SavedImportPaths[FromD])
+ for (const auto &Path : SavedImportPaths[FromD]) {
+ // The import path contains import-dependency nodes first.
+ // Save the node that was imported as dependency of the current node.
+ Decl *PrevFromDi = FromD;
for (Decl *FromDi : Path) {
+ // Begin and end of the path equals 'FromD', skip it.
+ if (FromDi == FromD)
+ continue;
+ // We should not set import error on a node and all following nodes in
+ // the path if child import errors are ignored.
+ if (ChildErrorHandlingStrategy(FromDi).ignoreChildErrorOnParent(
+ PrevFromDi))
+ break;
+ PrevFromDi = FromDi;
setImportDeclError(FromDi, ErrOut);
//FIXME Should we remove these Decls from ImportedDecls?
// Set the error for the mapped to Decl, which is in the "to" context.
@@ -8778,10 +8896,11 @@ Expected<Decl *> ASTImporter::Import(Decl *FromD) {
// FIXME Should we remove these Decls from the LookupTable,
// and from ImportedFromDecls?
}
+ }
SavedImportPaths.erase(FromD);
// Do not return ToDOrErr, error was taken out of it.
- return make_error<ImportError>(ErrOut);
+ return make_error<ASTImportError>(ErrOut);
}
ToD = *ToDOrErr;
@@ -8793,7 +8912,7 @@ Expected<Decl *> ASTImporter::Import(Decl *FromD) {
if (!ToD) {
auto Err = getImportDeclErrorIfAny(FromD);
assert(Err);
- return make_error<ImportError>(*Err);
+ return make_error<ASTImportError>(*Err);
}
// We could import from the current TU without error. But previously we
@@ -8801,7 +8920,7 @@ Expected<Decl *> ASTImporter::Import(Decl *FromD) {
// ASTImporter object) and with an error.
if (auto Error = SharedState->getImportDeclErrorIfAny(ToD)) {
setImportDeclError(FromD, *Error);
- return make_error<ImportError>(*Error);
+ return make_error<ASTImportError>(*Error);
}
// Make sure that ImportImpl registered the imported decl.
@@ -9106,13 +9225,11 @@ Expected<TemplateName> ASTImporter::Import(TemplateName From) {
auto QualifierOrErr = Import(QTN->getQualifier());
if (!QualifierOrErr)
return QualifierOrErr.takeError();
-
- if (ExpectedDecl ToTemplateOrErr = Import(From.getAsTemplateDecl()))
- return ToContext.getQualifiedTemplateName(
- *QualifierOrErr, QTN->hasTemplateKeyword(),
- cast<TemplateDecl>(*ToTemplateOrErr));
- else
- return ToTemplateOrErr.takeError();
+ auto TNOrErr = Import(QTN->getUnderlyingTemplate());
+ if (!TNOrErr)
+ return TNOrErr.takeError();
+ return ToContext.getQualifiedTemplateName(
+ *QualifierOrErr, QTN->hasTemplateKeyword(), *TNOrErr);
}
case TemplateName::DependentTemplate: {
@@ -9161,6 +9278,12 @@ Expected<TemplateName> ASTImporter::Import(TemplateName From) {
return ToContext.getSubstTemplateTemplateParmPack(
cast<TemplateTemplateParmDecl>(*ParamOrErr), *ArgPackOrErr);
}
+ case TemplateName::UsingTemplate: {
+ auto UsingOrError = Import(From.getAsUsingShadowDecl());
+ if (!UsingOrError)
+ return UsingOrError.takeError();
+ return TemplateName(cast<UsingShadowDecl>(*UsingOrError));
+ }
}
llvm_unreachable("Invalid template name kind");
@@ -9210,13 +9333,13 @@ Expected<FileID> ASTImporter::Import(FileID FromID, bool IsBuiltin) {
ExpectedSLoc ToExLocS = Import(FromEx.getExpansionLocStart());
if (!ToExLocS)
return ToExLocS.takeError();
- unsigned TokenLen = FromSM.getFileIDSize(FromID);
+ unsigned ExLength = FromSM.getFileIDSize(FromID);
SourceLocation MLoc;
if (FromEx.isMacroArgExpansion()) {
- MLoc = ToSM.createMacroArgExpansionLoc(*ToSpLoc, *ToExLocS, TokenLen);
+ MLoc = ToSM.createMacroArgExpansionLoc(*ToSpLoc, *ToExLocS, ExLength);
} else {
if (ExpectedSLoc ToExLocE = Import(FromEx.getExpansionLocEnd()))
- MLoc = ToSM.createExpansionLoc(*ToSpLoc, *ToExLocS, *ToExLocE, TokenLen,
+ MLoc = ToSM.createExpansionLoc(*ToSpLoc, *ToExLocS, *ToExLocE, ExLength,
FromEx.isExpansionTokenRange());
else
return ToExLocE.takeError();
@@ -9262,7 +9385,7 @@ Expected<FileID> ASTImporter::Import(FileID FromID, bool IsBuiltin) {
Cache->getBufferOrNone(FromContext.getDiagnostics(),
FromSM.getFileManager(), SourceLocation{});
if (!FromBuf)
- return llvm::make_error<ImportError>(ImportError::Unknown);
+ return llvm::make_error<ASTImportError>(ASTImportError::Unknown);
std::unique_ptr<llvm::MemoryBuffer> ToBuf =
llvm::MemoryBuffer::getMemBufferCopy(FromBuf->getBuffer(),
@@ -9338,7 +9461,7 @@ Expected<CXXCtorInitializer *> ASTImporter::Import(CXXCtorInitializer *From) {
*ToExprOrErr, *RParenLocOrErr);
} else {
// FIXME: assert?
- return make_error<ImportError>();
+ return make_error<ASTImportError>();
}
}
@@ -9667,7 +9790,7 @@ Expected<DeclarationName> ASTImporter::HandleNameConflict(DeclarationName Name,
unsigned NumDecls) {
if (ODRHandling == ODRHandlingType::Conservative)
// Report error at any name conflict.
- return make_error<ImportError>(ImportError::NameConflict);
+ return make_error<ASTImportError>(ASTImportError::NameConflict);
else
// Allow to create the new Decl with the same name.
return Name;
@@ -9727,16 +9850,16 @@ Decl *ASTImporter::MapImported(Decl *From, Decl *To) {
return To;
}
-llvm::Optional<ImportError>
+llvm::Optional<ASTImportError>
ASTImporter::getImportDeclErrorIfAny(Decl *FromD) const {
auto Pos = ImportDeclErrors.find(FromD);
if (Pos != ImportDeclErrors.end())
return Pos->second;
else
- return Optional<ImportError>();
+ return Optional<ASTImportError>();
}
-void ASTImporter::setImportDeclError(Decl *From, ImportError Error) {
+void ASTImporter::setImportDeclError(Decl *From, ASTImportError Error) {
auto InsertRes = ImportDeclErrors.insert({From, Error});
(void)InsertRes;
// Either we set the error for the first time, or we already had set one and
diff --git a/contrib/llvm-project/clang/lib/AST/ASTStructuralEquivalence.cpp b/contrib/llvm-project/clang/lib/AST/ASTStructuralEquivalence.cpp
index 0813a5204a5e..d80fc3ce7292 100644
--- a/contrib/llvm-project/clang/lib/AST/ASTStructuralEquivalence.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ASTStructuralEquivalence.cpp
@@ -517,6 +517,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
case TemplateName::Template:
case TemplateName::QualifiedTemplate:
case TemplateName::SubstTemplateTemplateParm:
+ case TemplateName::UsingTemplate:
// It is sufficient to check value of getAsTemplateDecl.
break;
@@ -932,6 +933,13 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return false;
break;
+ case Type::BTFTagAttributed:
+ if (!IsStructurallyEquivalent(
+ Context, cast<BTFTagAttributedType>(T1)->getWrappedType(),
+ cast<BTFTagAttributedType>(T2)->getWrappedType()))
+ return false;
+ break;
+
case Type::Paren:
if (!IsStructurallyEquivalent(Context, cast<ParenType>(T1)->getInnerType(),
cast<ParenType>(T2)->getInnerType()))
@@ -1228,16 +1236,17 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
!IsStructurallyEquivalent(Context, Int1->getNumBitsExpr(),
Int2->getNumBitsExpr()))
return false;
+ break;
}
} // end switch
return true;
}
-/// Determine structural equivalence of two fields.
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
- FieldDecl *Field1, FieldDecl *Field2) {
- const auto *Owner2 = cast<RecordDecl>(Field2->getDeclContext());
+ FieldDecl *Field1, FieldDecl *Field2,
+ QualType Owner2Type) {
+ const auto *Owner2 = cast<Decl>(Field2->getDeclContext());
// For anonymous structs/unions, match up the anonymous struct/union type
// declarations directly, so that we don't go off searching for anonymous
@@ -1257,7 +1266,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Context.Diag2(
Owner2->getLocation(),
Context.getApplicableDiagnostic(diag::err_odr_tag_type_inconsistent))
- << Context.ToCtx.getTypeDeclType(Owner2);
+ << Owner2Type;
Context.Diag2(Field2->getLocation(), diag::note_odr_field_name)
<< Field2->getDeclName();
Context.Diag1(Field1->getLocation(), diag::note_odr_field_name)
@@ -1272,7 +1281,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Context.Diag2(
Owner2->getLocation(),
Context.getApplicableDiagnostic(diag::err_odr_tag_type_inconsistent))
- << Context.ToCtx.getTypeDeclType(Owner2);
+ << Owner2Type;
Context.Diag2(Field2->getLocation(), diag::note_odr_field)
<< Field2->getDeclName() << Field2->getType();
Context.Diag1(Field1->getLocation(), diag::note_odr_field)
@@ -1288,6 +1297,14 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return true;
}
+/// Determine structural equivalence of two fields.
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ FieldDecl *Field1, FieldDecl *Field2) {
+ const auto *Owner2 = cast<RecordDecl>(Field2->getDeclContext());
+ return IsStructurallyEquivalent(Context, Field1, Field2,
+ Context.ToCtx.getTypeDeclType(Owner2));
+}
+
/// Determine structural equivalence of two methods.
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
CXXMethodDecl *Method1,
@@ -1602,6 +1619,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
}
// Check the fields for consistency.
+ QualType D2Type = Context.ToCtx.getTypeDeclType(D2);
RecordDecl::field_iterator Field2 = D2->field_begin(),
Field2End = D2->field_end();
for (RecordDecl::field_iterator Field1 = D1->field_begin(),
@@ -1620,7 +1638,7 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return false;
}
- if (!IsStructurallyEquivalent(Context, *Field1, *Field2))
+ if (!IsStructurallyEquivalent(Context, *Field1, *Field2, D2Type))
return false;
}
@@ -1926,6 +1944,126 @@ static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
return true;
}
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ ObjCIvarDecl *D1, ObjCIvarDecl *D2,
+ QualType Owner2Type) {
+ if (D1->getAccessControl() != D2->getAccessControl())
+ return false;
+
+ return IsStructurallyEquivalent(Context, cast<FieldDecl>(D1),
+ cast<FieldDecl>(D2), Owner2Type);
+}
+
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ ObjCIvarDecl *D1, ObjCIvarDecl *D2) {
+ QualType Owner2Type =
+ Context.ToCtx.getObjCInterfaceType(D2->getContainingInterface());
+ return IsStructurallyEquivalent(Context, D1, D2, Owner2Type);
+}
+
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ ObjCMethodDecl *Method1,
+ ObjCMethodDecl *Method2) {
+ bool PropertiesEqual =
+ Method1->isInstanceMethod() == Method2->isInstanceMethod() &&
+ Method1->isVariadic() == Method2->isVariadic() &&
+ Method1->isDirectMethod() == Method2->isDirectMethod();
+ if (!PropertiesEqual)
+ return false;
+
+ // Compare selector slot names.
+ Selector Selector1 = Method1->getSelector(),
+ Selector2 = Method2->getSelector();
+ unsigned NumArgs = Selector1.getNumArgs();
+ if (NumArgs != Selector2.getNumArgs())
+ return false;
+ // Compare all selector slots. For selectors with arguments it means all arg
+ // slots. And if there are no arguments, compare the first-and-only slot.
+ unsigned SlotsToCheck = NumArgs > 0 ? NumArgs : 1;
+ for (unsigned I = 0; I < SlotsToCheck; ++I) {
+ if (!IsStructurallyEquivalent(Selector1.getIdentifierInfoForSlot(I),
+ Selector2.getIdentifierInfoForSlot(I)))
+ return false;
+ }
+
+ // Compare types.
+ if (!IsStructurallyEquivalent(Context, Method1->getReturnType(),
+ Method2->getReturnType()))
+ return false;
+ assert(
+ Method1->param_size() == Method2->param_size() &&
+ "Same number of arguments should be already enforced in Selector checks");
+ for (ObjCMethodDecl::param_type_iterator
+ ParamT1 = Method1->param_type_begin(),
+ ParamT1End = Method1->param_type_end(),
+ ParamT2 = Method2->param_type_begin(),
+ ParamT2End = Method2->param_type_end();
+ (ParamT1 != ParamT1End) && (ParamT2 != ParamT2End);
+ ++ParamT1, ++ParamT2) {
+ if (!IsStructurallyEquivalent(Context, *ParamT1, *ParamT2))
+ return false;
+ }
+
+ return true;
+}
+
+static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
+ ObjCCategoryDecl *D1,
+ ObjCCategoryDecl *D2) {
+ if (!IsStructurallyEquivalent(D1->getIdentifier(), D2->getIdentifier()))
+ return false;
+
+ if (!IsStructurallyEquivalent(D1->getClassInterface()->getIdentifier(),
+ D2->getClassInterface()->getIdentifier()))
+ return false;
+
+ // Compare protocols.
+ ObjCCategoryDecl::protocol_iterator Protocol2 = D2->protocol_begin(),
+ Protocol2End = D2->protocol_end();
+ for (ObjCCategoryDecl::protocol_iterator Protocol1 = D1->protocol_begin(),
+ Protocol1End = D1->protocol_end();
+ Protocol1 != Protocol1End; ++Protocol1, ++Protocol2) {
+ if (Protocol2 == Protocol2End)
+ return false;
+ if (!IsStructurallyEquivalent((*Protocol1)->getIdentifier(),
+ (*Protocol2)->getIdentifier()))
+ return false;
+ }
+ if (Protocol2 != Protocol2End)
+ return false;
+
+ // Compare ivars.
+ QualType D2Type = Context.ToCtx.getObjCInterfaceType(D2->getClassInterface());
+ ObjCCategoryDecl::ivar_iterator Ivar2 = D2->ivar_begin(),
+ Ivar2End = D2->ivar_end();
+ for (ObjCCategoryDecl::ivar_iterator Ivar1 = D1->ivar_begin(),
+ Ivar1End = D1->ivar_end();
+ Ivar1 != Ivar1End; ++Ivar1, ++Ivar2) {
+ if (Ivar2 == Ivar2End)
+ return false;
+ if (!IsStructurallyEquivalent(Context, *Ivar1, *Ivar2, D2Type))
+ return false;
+ }
+ if (Ivar2 != Ivar2End)
+ return false;
+
+ // Compare methods.
+ ObjCCategoryDecl::method_iterator Method2 = D2->meth_begin(),
+ Method2End = D2->meth_end();
+ for (ObjCCategoryDecl::method_iterator Method1 = D1->meth_begin(),
+ Method1End = D1->meth_end();
+ Method1 != Method1End; ++Method1, ++Method2) {
+ if (Method2 == Method2End)
+ return false;
+ if (!IsStructurallyEquivalent(Context, *Method1, *Method2))
+ return false;
+ }
+ if (Method2 != Method2End)
+ return false;
+
+ return true;
+}
+
/// Determine structural equivalence of two declarations.
static bool IsStructurallyEquivalent(StructuralEquivalenceContext &Context,
Decl *D1, Decl *D2) {
diff --git a/contrib/llvm-project/clang/lib/AST/ASTTypeTraits.cpp b/contrib/llvm-project/clang/lib/AST/ASTTypeTraits.cpp
index b333f4618efb..64823f77e58a 100644
--- a/contrib/llvm-project/clang/lib/AST/ASTTypeTraits.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ASTTypeTraits.cpp
@@ -16,6 +16,7 @@
#include "clang/AST/ASTContext.h"
#include "clang/AST/Attr.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/DeclObjC.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/AST/OpenMPClause.h"
#include "clang/AST/TypeLoc.h"
@@ -52,6 +53,7 @@ const ASTNodeKind::KindInfo ASTNodeKind::AllKindInfo[] = {
{NKI_None, "Attr"},
#define ATTR(A) {NKI_Attr, #A "Attr"},
#include "clang/Basic/AttrList.inc"
+ {NKI_None, "ObjCProtocolLoc"},
};
bool ASTNodeKind::isBaseOf(ASTNodeKind Other, unsigned *Distance) const {
@@ -193,6 +195,8 @@ void DynTypedNode::print(llvm::raw_ostream &OS,
QualType(T, 0).print(OS, PP);
else if (const Attr *A = get<Attr>())
A->printPretty(OS, PP);
+ else if (const ObjCProtocolLoc *P = get<ObjCProtocolLoc>())
+ P->getProtocol()->print(OS, PP);
else
OS << "Unable to print values of type " << NodeKind.asStringRef() << "\n";
}
@@ -228,5 +232,7 @@ SourceRange DynTypedNode::getSourceRange() const {
return CBS->getSourceRange();
if (const auto *A = get<Attr>())
return A->getRange();
+ if (const ObjCProtocolLoc *P = get<ObjCProtocolLoc>())
+ return P->getSourceRange();
return SourceRange();
}
diff --git a/contrib/llvm-project/clang/lib/AST/AttrImpl.cpp b/contrib/llvm-project/clang/lib/AST/AttrImpl.cpp
index 7b8acfcd92be..c1e7435b22da 100644
--- a/contrib/llvm-project/clang/lib/AST/AttrImpl.cpp
+++ b/contrib/llvm-project/clang/lib/AST/AttrImpl.cpp
@@ -168,7 +168,7 @@ OMPDeclareTargetDeclAttr::getActiveAttr(const ValueDecl *VD) {
llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy>
OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(const ValueDecl *VD) {
llvm::Optional<OMPDeclareTargetDeclAttr *> ActiveAttr = getActiveAttr(VD);
- if (ActiveAttr.hasValue())
+ if (ActiveAttr)
return ActiveAttr.getValue()->getMapType();
return llvm::None;
}
@@ -176,7 +176,7 @@ OMPDeclareTargetDeclAttr::isDeclareTargetDeclaration(const ValueDecl *VD) {
llvm::Optional<OMPDeclareTargetDeclAttr::DevTypeTy>
OMPDeclareTargetDeclAttr::getDeviceType(const ValueDecl *VD) {
llvm::Optional<OMPDeclareTargetDeclAttr *> ActiveAttr = getActiveAttr(VD);
- if (ActiveAttr.hasValue())
+ if (ActiveAttr)
return ActiveAttr.getValue()->getDevType();
return llvm::None;
}
@@ -184,7 +184,7 @@ OMPDeclareTargetDeclAttr::getDeviceType(const ValueDecl *VD) {
llvm::Optional<SourceLocation>
OMPDeclareTargetDeclAttr::getLocation(const ValueDecl *VD) {
llvm::Optional<OMPDeclareTargetDeclAttr *> ActiveAttr = getActiveAttr(VD);
- if (ActiveAttr.hasValue())
+ if (ActiveAttr)
return ActiveAttr.getValue()->getRange().getBegin();
return llvm::None;
}
diff --git a/contrib/llvm-project/clang/lib/AST/CommentParser.cpp b/contrib/llvm-project/clang/lib/AST/CommentParser.cpp
index 29983b0a16c3..7bac1fb99b88 100644
--- a/contrib/llvm-project/clang/lib/AST/CommentParser.cpp
+++ b/contrib/llvm-project/clang/lib/AST/CommentParser.cpp
@@ -289,22 +289,19 @@ void Parser::parseTParamCommandArgs(TParamCommandComment *TPC,
Arg.getText());
}
-void Parser::parseBlockCommandArgs(BlockCommandComment *BC,
- TextTokenRetokenizer &Retokenizer,
- unsigned NumArgs) {
- typedef BlockCommandComment::Argument Argument;
- Argument *Args =
- new (Allocator.Allocate<Argument>(NumArgs)) Argument[NumArgs];
+ArrayRef<Comment::Argument>
+Parser::parseCommandArgs(TextTokenRetokenizer &Retokenizer, unsigned NumArgs) {
+ auto *Args = new (Allocator.Allocate<Comment::Argument>(NumArgs))
+ Comment::Argument[NumArgs];
unsigned ParsedArgs = 0;
Token Arg;
while (ParsedArgs < NumArgs && Retokenizer.lexWord(Arg)) {
- Args[ParsedArgs] = Argument(SourceRange(Arg.getLocation(),
- Arg.getEndLocation()),
- Arg.getText());
+ Args[ParsedArgs] = Comment::Argument{
+ SourceRange(Arg.getLocation(), Arg.getEndLocation()), Arg.getText()};
ParsedArgs++;
}
- S.actOnBlockCommandArgs(BC, llvm::makeArrayRef(Args, ParsedArgs));
+ return llvm::makeArrayRef(Args, ParsedArgs);
}
BlockCommandComment *Parser::parseBlockCommand() {
@@ -360,7 +357,7 @@ BlockCommandComment *Parser::parseBlockCommand() {
else if (TPC)
parseTParamCommandArgs(TPC, Retokenizer);
else
- parseBlockCommandArgs(BC, Retokenizer, Info->NumArgs);
+ S.actOnBlockCommandArgs(BC, parseCommandArgs(Retokenizer, Info->NumArgs));
Retokenizer.putBackLeftoverTokens();
}
@@ -401,32 +398,24 @@ BlockCommandComment *Parser::parseBlockCommand() {
InlineCommandComment *Parser::parseInlineCommand() {
assert(Tok.is(tok::backslash_command) || Tok.is(tok::at_command));
+ const CommandInfo *Info = Traits.getCommandInfo(Tok.getCommandID());
const Token CommandTok = Tok;
consumeToken();
TextTokenRetokenizer Retokenizer(Allocator, *this);
+ ArrayRef<Comment::Argument> Args =
+ parseCommandArgs(Retokenizer, Info->NumArgs);
- Token ArgTok;
- bool ArgTokValid = Retokenizer.lexWord(ArgTok);
-
- InlineCommandComment *IC;
- if (ArgTokValid) {
- IC = S.actOnInlineCommand(CommandTok.getLocation(),
- CommandTok.getEndLocation(),
- CommandTok.getCommandID(),
- ArgTok.getLocation(),
- ArgTok.getEndLocation(),
- ArgTok.getText());
- } else {
- IC = S.actOnInlineCommand(CommandTok.getLocation(),
- CommandTok.getEndLocation(),
- CommandTok.getCommandID());
+ InlineCommandComment *IC = S.actOnInlineCommand(
+ CommandTok.getLocation(), CommandTok.getEndLocation(),
+ CommandTok.getCommandID(), Args);
+ if (Args.size() < Info->NumArgs) {
Diag(CommandTok.getEndLocation().getLocWithOffset(1),
- diag::warn_doc_inline_contents_no_argument)
- << CommandTok.is(tok::at_command)
- << Traits.getCommandInfo(CommandTok.getCommandID())->Name
+ diag::warn_doc_inline_command_not_enough_arguments)
+ << CommandTok.is(tok::at_command) << Info->Name << Args.size()
+ << Info->NumArgs
<< SourceRange(CommandTok.getLocation(), CommandTok.getEndLocation());
}
diff --git a/contrib/llvm-project/clang/lib/AST/CommentSema.cpp b/contrib/llvm-project/clang/lib/AST/CommentSema.cpp
index 087f103e4931..9b0f03445888 100644
--- a/contrib/llvm-project/clang/lib/AST/CommentSema.cpp
+++ b/contrib/llvm-project/clang/lib/AST/CommentSema.cpp
@@ -265,10 +265,8 @@ void Sema::actOnParamCommandParamNameArg(ParamCommandComment *Command,
// User didn't provide a direction argument.
Command->setDirection(ParamCommandComment::In, /* Explicit = */ false);
}
- typedef BlockCommandComment::Argument Argument;
- Argument *A = new (Allocator) Argument(SourceRange(ArgLocBegin,
- ArgLocEnd),
- Arg);
+ auto *A = new (Allocator)
+ Comment::Argument{SourceRange(ArgLocBegin, ArgLocEnd), Arg};
Command->setArgs(llvm::makeArrayRef(A, 1));
}
@@ -303,10 +301,8 @@ void Sema::actOnTParamCommandParamNameArg(TParamCommandComment *Command,
// Parser will not feed us more arguments than needed.
assert(Command->getNumArgs() == 0);
- typedef BlockCommandComment::Argument Argument;
- Argument *A = new (Allocator) Argument(SourceRange(ArgLocBegin,
- ArgLocEnd),
- Arg);
+ auto *A = new (Allocator)
+ Comment::Argument{SourceRange(ArgLocBegin, ArgLocEnd), Arg};
Command->setArgs(llvm::makeArrayRef(A, 1));
if (!isTemplateOrSpecialization()) {
@@ -361,37 +357,15 @@ void Sema::actOnTParamCommandFinish(TParamCommandComment *Command,
checkBlockCommandEmptyParagraph(Command);
}
-InlineCommandComment *Sema::actOnInlineCommand(SourceLocation CommandLocBegin,
- SourceLocation CommandLocEnd,
- unsigned CommandID) {
- ArrayRef<InlineCommandComment::Argument> Args;
+InlineCommandComment *
+Sema::actOnInlineCommand(SourceLocation CommandLocBegin,
+ SourceLocation CommandLocEnd, unsigned CommandID,
+ ArrayRef<Comment::Argument> Args) {
StringRef CommandName = Traits.getCommandInfo(CommandID)->Name;
- return new (Allocator) InlineCommandComment(
- CommandLocBegin,
- CommandLocEnd,
- CommandID,
- getInlineCommandRenderKind(CommandName),
- Args);
-}
-InlineCommandComment *Sema::actOnInlineCommand(SourceLocation CommandLocBegin,
- SourceLocation CommandLocEnd,
- unsigned CommandID,
- SourceLocation ArgLocBegin,
- SourceLocation ArgLocEnd,
- StringRef Arg) {
- typedef InlineCommandComment::Argument Argument;
- Argument *A = new (Allocator) Argument(SourceRange(ArgLocBegin,
- ArgLocEnd),
- Arg);
- StringRef CommandName = Traits.getCommandInfo(CommandID)->Name;
-
- return new (Allocator) InlineCommandComment(
- CommandLocBegin,
- CommandLocEnd,
- CommandID,
- getInlineCommandRenderKind(CommandName),
- llvm::makeArrayRef(A, 1));
+ return new (Allocator)
+ InlineCommandComment(CommandLocBegin, CommandLocEnd, CommandID,
+ getInlineCommandRenderKind(CommandName), Args);
}
InlineContentComment *Sema::actOnUnknownCommand(SourceLocation LocBegin,
diff --git a/contrib/llvm-project/clang/lib/AST/ComputeDependence.cpp b/contrib/llvm-project/clang/lib/AST/ComputeDependence.cpp
index 2a2b98776e65..1f573346b441 100644
--- a/contrib/llvm-project/clang/lib/AST/ComputeDependence.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ComputeDependence.cpp
@@ -696,7 +696,7 @@ ExprDependence clang::computeDependence(CXXNewExpr *E) {
E->getAllocatedTypeSourceInfo()->getType()->getDependence());
D |= toExprDependenceForImpliedType(E->getAllocatedType()->getDependence());
auto Size = E->getArraySize();
- if (Size.hasValue() && *Size)
+ if (Size && *Size)
D |= turnTypeToValueDependence((*Size)->getDependence());
if (auto *I = E->getInitializer())
D |= turnTypeToValueDependence(I->getDependence());
diff --git a/contrib/llvm-project/clang/lib/AST/Decl.cpp b/contrib/llvm-project/clang/lib/AST/Decl.cpp
index e6ce5a80dacd..68a9dca7d1f8 100644
--- a/contrib/llvm-project/clang/lib/AST/Decl.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Decl.cpp
@@ -30,6 +30,8 @@
#include "clang/AST/ODRHash.h"
#include "clang/AST/PrettyDeclStackTrace.h"
#include "clang/AST/PrettyPrinter.h"
+#include "clang/AST/Randstruct.h"
+#include "clang/AST/RecordLayout.h"
#include "clang/AST/Redeclarable.h"
#include "clang/AST/Stmt.h"
#include "clang/AST/TemplateBase.h"
@@ -391,11 +393,18 @@ void LinkageComputer::mergeTemplateLV(
bool considerVisibility =
shouldConsiderTemplateVisibility(fn, specInfo);
- // Merge information from the template parameters.
FunctionTemplateDecl *temp = specInfo->getTemplate();
- LinkageInfo tempLV =
- getLVForTemplateParameterList(temp->getTemplateParameters(), computation);
- LV.mergeMaybeWithVisibility(tempLV, considerVisibility);
+
+ // Merge information from the template declaration.
+ LinkageInfo tempLV = getLVForDecl(temp, computation);
+ // The linkage of the specialization should be consistent with the
+ // template declaration.
+ LV.setLinkage(tempLV.getLinkage());
+
+ // Merge information from the template parameters.
+ LinkageInfo paramsLV =
+ getLVForTemplateParameterList(temp->getTemplateParameters(), computation);
+ LV.mergeMaybeWithVisibility(paramsLV, considerVisibility);
// Merge information from the template arguments.
const TemplateArgumentList &templateArgs = *specInfo->TemplateArguments;
@@ -579,6 +588,7 @@ static bool isExportedFromModuleInterfaceUnit(const NamedDecl *D) {
// FIXME: Handle isModulePrivate.
switch (D->getModuleOwnershipKind()) {
case Decl::ModuleOwnershipKind::Unowned:
+ case Decl::ModuleOwnershipKind::ReachableWhenImported:
case Decl::ModuleOwnershipKind::ModulePrivate:
return false;
case Decl::ModuleOwnershipKind::Visible:
@@ -589,11 +599,12 @@ static bool isExportedFromModuleInterfaceUnit(const NamedDecl *D) {
}
static LinkageInfo getInternalLinkageFor(const NamedDecl *D) {
- // Internal linkage declarations within a module interface unit are modeled
- // as "module-internal linkage", which means that they have internal linkage
- // formally but can be indirectly accessed from outside the module via inline
- // functions and templates defined within the module.
- if (isInModulePurview(D))
+ // (for the modules ts) Internal linkage declarations within a module
+ // interface unit are modeled as "module-internal linkage", which means that
+ // they have internal linkage formally but can be indirectly accessed from
+ // outside the module via inline functions and templates defined within the
+ // module.
+ if (isInModulePurview(D) && D->getASTContext().getLangOpts().ModulesTS)
return LinkageInfo(ModuleInternalLinkage, DefaultVisibility, false);
return LinkageInfo::internal();
@@ -1540,6 +1551,11 @@ LinkageInfo LinkageComputer::getDeclLinkageAndVisibility(const NamedDecl *D) {
}
Module *Decl::getOwningModuleForLinkage(bool IgnoreLinkage) const {
+ if (isa<NamespaceDecl>(this))
+ // Namespaces never have module linkage. It is the entities within them
+ // that [may] do.
+ return nullptr;
+
Module *M = getOwningModule();
if (!M)
return nullptr;
@@ -1550,24 +1566,28 @@ Module *Decl::getOwningModuleForLinkage(bool IgnoreLinkage) const {
return nullptr;
case Module::ModuleInterfaceUnit:
+ case Module::ModulePartitionInterface:
+ case Module::ModulePartitionImplementation:
return M;
+ case Module::ModuleHeaderUnit:
case Module::GlobalModuleFragment: {
// External linkage declarations in the global module have no owning module
// for linkage purposes. But internal linkage declarations in the global
// module fragment of a particular module are owned by that module for
// linkage purposes.
+ // FIXME: p1815 removes the need for this distinction -- there are no
+ // internal linkage declarations that need to be referred to from outside
+ // this TU.
if (IgnoreLinkage)
return nullptr;
bool InternalLinkage;
if (auto *ND = dyn_cast<NamedDecl>(this))
InternalLinkage = !ND->hasExternalFormalLinkage();
- else {
- auto *NSD = dyn_cast<NamespaceDecl>(this);
- InternalLinkage = (NSD && NSD->isAnonymousNamespace()) ||
- isInAnonymousNamespace();
- }
- return InternalLinkage ? M->Parent : nullptr;
+ else
+ InternalLinkage = isInAnonymousNamespace();
+ return InternalLinkage ? M->Kind == Module::ModuleHeaderUnit ? M : M->Parent
+ : nullptr;
}
case Module::PrivateModuleFragment:
@@ -1832,7 +1852,7 @@ bool NamedDecl::hasLinkage() const {
NamedDecl *NamedDecl::getUnderlyingDeclImpl() {
NamedDecl *ND = this;
- while (auto *UD = dyn_cast<UsingShadowDecl>(ND))
+ if (auto *UD = dyn_cast<UsingShadowDecl>(ND))
ND = UD->getTargetDecl();
if (auto *AD = dyn_cast<ObjCCompatibleAliasDecl>(ND))
@@ -2018,7 +2038,7 @@ const char *VarDecl::getStorageClassSpecifierString(StorageClass SC) {
VarDecl::VarDecl(Kind DK, ASTContext &C, DeclContext *DC,
SourceLocation StartLoc, SourceLocation IdLoc,
- IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo,
+ const IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo,
StorageClass SC)
: DeclaratorDecl(DK, DC, IdLoc, Id, T, TInfo, StartLoc),
redeclarable_base(C) {
@@ -2033,10 +2053,9 @@ VarDecl::VarDecl(Kind DK, ASTContext &C, DeclContext *DC,
// Everything else is implicitly initialized to false.
}
-VarDecl *VarDecl::Create(ASTContext &C, DeclContext *DC,
- SourceLocation StartL, SourceLocation IdL,
- IdentifierInfo *Id, QualType T, TypeSourceInfo *TInfo,
- StorageClass S) {
+VarDecl *VarDecl::Create(ASTContext &C, DeclContext *DC, SourceLocation StartL,
+ SourceLocation IdL, const IdentifierInfo *Id,
+ QualType T, TypeSourceInfo *TInfo, StorageClass S) {
return new (C, DC) VarDecl(Var, C, DC, StartL, IdL, Id, T, TInfo, S);
}
@@ -2690,6 +2709,42 @@ VarDecl::needsDestruction(const ASTContext &Ctx) const {
return getType().isDestructedType();
}
+bool VarDecl::hasFlexibleArrayInit(const ASTContext &Ctx) const {
+ assert(hasInit() && "Expect initializer to check for flexible array init");
+ auto *Ty = getType()->getAs<RecordType>();
+ if (!Ty || !Ty->getDecl()->hasFlexibleArrayMember())
+ return false;
+ auto *List = dyn_cast<InitListExpr>(getInit()->IgnoreParens());
+ if (!List)
+ return false;
+ const Expr *FlexibleInit = List->getInit(List->getNumInits() - 1);
+ auto InitTy = Ctx.getAsConstantArrayType(FlexibleInit->getType());
+ if (!InitTy)
+ return false;
+ return InitTy->getSize() != 0;
+}
+
+CharUnits VarDecl::getFlexibleArrayInitChars(const ASTContext &Ctx) const {
+ assert(hasInit() && "Expect initializer to check for flexible array init");
+ auto *Ty = getType()->getAs<RecordType>();
+ if (!Ty || !Ty->getDecl()->hasFlexibleArrayMember())
+ return CharUnits::Zero();
+ auto *List = dyn_cast<InitListExpr>(getInit()->IgnoreParens());
+ if (!List)
+ return CharUnits::Zero();
+ const Expr *FlexibleInit = List->getInit(List->getNumInits() - 1);
+ auto InitTy = Ctx.getAsConstantArrayType(FlexibleInit->getType());
+ if (!InitTy)
+ return CharUnits::Zero();
+ CharUnits FlexibleArraySize = Ctx.getTypeSizeInChars(InitTy);
+ const ASTRecordLayout &RL = Ctx.getASTRecordLayout(Ty->getDecl());
+ CharUnits FlexibleArrayOffset =
+ Ctx.toCharUnitsFromBits(RL.getFieldOffset(RL.getFieldCount() - 1));
+ if (FlexibleArrayOffset + FlexibleArraySize < RL.getSize())
+ return CharUnits::Zero();
+ return FlexibleArrayOffset + FlexibleArraySize - RL.getSize();
+}
+
MemberSpecializationInfo *VarDecl::getMemberSpecializationInfo() const {
if (isStaticDataMember())
// FIXME: Remove ?
@@ -2798,7 +2853,8 @@ Expr *ParmVarDecl::getDefaultArg() {
Expr *Arg = getInit();
if (auto *E = dyn_cast_or_null<FullExpr>(Arg))
- return E->getSubExpr();
+ if (!isa<ConstantExpr>(E))
+ return E->getSubExpr();
return Arg;
}
@@ -2885,6 +2941,7 @@ FunctionDecl::FunctionDecl(Kind DK, ASTContext &C, DeclContext *DC,
FunctionDeclBits.IsDefaulted = false;
FunctionDeclBits.IsExplicitlyDefaulted = false;
FunctionDeclBits.HasDefaultedFunctionInfo = false;
+ FunctionDeclBits.IsIneligibleOrNotSelected = false;
FunctionDeclBits.HasImplicitReturnZero = false;
FunctionDeclBits.IsLateTemplateParsed = false;
FunctionDeclBits.ConstexprKind = static_cast<uint64_t>(ConstexprKind);
@@ -4286,6 +4343,7 @@ TagDecl::TagDecl(Kind DK, TagKind TK, const ASTContext &C, DeclContext *DC,
setEmbeddedInDeclarator(false);
setFreeStanding(false);
setCompleteDefinitionRequired(false);
+ TagDeclBits.IsThisDeclarationADemotedDefinition = false;
}
SourceLocation TagDecl::getOuterLocStart() const {
@@ -4551,6 +4609,7 @@ RecordDecl::RecordDecl(Kind DK, TagKind TK, const ASTContext &C,
setHasNonTrivialToPrimitiveCopyCUnion(false);
setParamDestroyedInCallee(false);
setArgPassingRestrictions(APK_CanPassInRegs);
+ setIsRandomized(false);
}
RecordDecl *RecordDecl::Create(const ASTContext &C, TagKind TK, DeclContext *DC,
@@ -4634,6 +4693,12 @@ bool RecordDecl::isMsStruct(const ASTContext &C) const {
return hasAttr<MSStructAttr>() || C.getLangOpts().MSBitfields == 1;
}
+void RecordDecl::reorderDecls(const SmallVectorImpl<Decl *> &Decls) {
+ std::tie(FirstDecl, LastDecl) = DeclContext::BuildDeclChain(Decls, false);
+ LastDecl->NextInContextAndBits.setPointer(nullptr);
+ setIsRandomized(true);
+}
+
void RecordDecl::LoadFieldsFromExternalStorage() const {
ExternalASTSource *Source = getASTContext().getExternalSource();
assert(hasExternalLexicalStorage() && Source && "No external storage?");
diff --git a/contrib/llvm-project/clang/lib/AST/DeclBase.cpp b/contrib/llvm-project/clang/lib/AST/DeclBase.cpp
index 9ee1cc083086..13dd6da3f24f 100644
--- a/contrib/llvm-project/clang/lib/AST/DeclBase.cpp
+++ b/contrib/llvm-project/clang/lib/AST/DeclBase.cpp
@@ -838,6 +838,7 @@ unsigned Decl::getIdentifierNamespaceForKind(Kind DeclKind) {
case ExternCContext:
case Decomposition:
case MSGuid:
+ case UnnamedGlobalConstant:
case TemplateParamObject:
case UsingDirective:
@@ -1161,6 +1162,8 @@ bool DeclContext::isDependentContext() const {
if (Record->isDependentLambda())
return true;
+ if (Record->isNeverDependentLambda())
+ return false;
}
if (const auto *Function = dyn_cast<FunctionDecl>(this)) {
@@ -1534,7 +1537,11 @@ void DeclContext::removeDecl(Decl *D) {
if (Map) {
StoredDeclsMap::iterator Pos = Map->find(ND->getDeclName());
assert(Pos != Map->end() && "no lookup entry for decl");
- Pos->second.remove(ND);
+ StoredDeclsList &List = Pos->second;
+ List.remove(ND);
+ // Clean up the entry if there are no more decls.
+ if (List.isNull())
+ Map->erase(Pos);
}
} while (DC->isTransparentContext() && (DC = DC->getParent()));
}
diff --git a/contrib/llvm-project/clang/lib/AST/DeclCXX.cpp b/contrib/llvm-project/clang/lib/AST/DeclCXX.cpp
index 0cf6e60b2a6c..6fc9a86bc3cf 100644
--- a/contrib/llvm-project/clang/lib/AST/DeclCXX.cpp
+++ b/contrib/llvm-project/clang/lib/AST/DeclCXX.cpp
@@ -146,16 +146,16 @@ CXXRecordDecl *CXXRecordDecl::Create(const ASTContext &C, TagKind TK,
CXXRecordDecl *
CXXRecordDecl::CreateLambda(const ASTContext &C, DeclContext *DC,
TypeSourceInfo *Info, SourceLocation Loc,
- bool Dependent, bool IsGeneric,
+ unsigned DependencyKind, bool IsGeneric,
LambdaCaptureDefault CaptureDefault) {
auto *R = new (C, DC) CXXRecordDecl(CXXRecord, TTK_Class, C, DC, Loc, Loc,
nullptr, nullptr);
R->setBeingDefined(true);
- R->DefinitionData =
- new (C) struct LambdaDefinitionData(R, Info, Dependent, IsGeneric,
- CaptureDefault);
+ R->DefinitionData = new (C) struct LambdaDefinitionData(
+ R, Info, DependencyKind, IsGeneric, CaptureDefault);
R->setMayHaveOutOfDateDef(false);
R->setImplicit(true);
+
C.getTypeDeclType(R, /*PrevDecl=*/nullptr);
return R;
}
@@ -825,29 +825,11 @@ void CXXRecordDecl::addedMember(Decl *D) {
data().HasInheritedDefaultConstructor = true;
}
- // Handle destructors.
- if (const auto *DD = dyn_cast<CXXDestructorDecl>(D)) {
- SMKind |= SMF_Destructor;
-
- if (DD->isUserProvided())
- data().HasIrrelevantDestructor = false;
- // If the destructor is explicitly defaulted and not trivial or not public
- // or if the destructor is deleted, we clear HasIrrelevantDestructor in
- // finishedDefaultedOrDeletedMember.
-
- // C++11 [class.dtor]p5:
- // A destructor is trivial if [...] the destructor is not virtual.
- if (DD->isVirtual()) {
- data().HasTrivialSpecialMembers &= ~SMF_Destructor;
- data().HasTrivialSpecialMembersForCall &= ~SMF_Destructor;
- }
-
- if (DD->isNoReturn())
- data().IsAnyDestructorNoReturn = true;
- }
-
// Handle member functions.
if (const auto *Method = dyn_cast<CXXMethodDecl>(D)) {
+ if (isa<CXXDestructorDecl>(D))
+ SMKind |= SMF_Destructor;
+
if (Method->isCopyAssignmentOperator()) {
SMKind |= SMF_CopyAssignment;
@@ -893,31 +875,9 @@ void CXXRecordDecl::addedMember(Decl *D) {
data().HasTrivialSpecialMembersForCall &=
data().DeclaredSpecialMembers | ~SMKind;
- if (!Method->isImplicit() && !Method->isUserProvided()) {
- // This method is user-declared but not user-provided. We can't work out
- // whether it's trivial yet (not until we get to the end of the class).
- // We'll handle this method in finishedDefaultedOrDeletedMember.
- } else if (Method->isTrivial()) {
- data().HasTrivialSpecialMembers |= SMKind;
- data().HasTrivialSpecialMembersForCall |= SMKind;
- } else if (Method->isTrivialForCall()) {
- data().HasTrivialSpecialMembersForCall |= SMKind;
- data().DeclaredNonTrivialSpecialMembers |= SMKind;
- } else {
- data().DeclaredNonTrivialSpecialMembers |= SMKind;
- // If this is a user-provided function, do not set
- // DeclaredNonTrivialSpecialMembersForCall here since we don't know
- // yet whether the method would be considered non-trivial for the
- // purpose of calls (attribute "trivial_abi" can be dropped from the
- // class later, which can change the special method's triviality).
- if (!Method->isUserProvided())
- data().DeclaredNonTrivialSpecialMembersForCall |= SMKind;
- }
-
// Note when we have declared a declared special member, and suppress the
// implicit declaration of this special member.
data().DeclaredSpecialMembers |= SMKind;
-
if (!Method->isImplicit()) {
data().UserDeclaredSpecialMembers |= SMKind;
@@ -934,6 +894,12 @@ void CXXRecordDecl::addedMember(Decl *D) {
// This is an extension in C++03.
data().PlainOldData = false;
}
+ // We delay updating destructor relevant properties until
+ // addedSelectedDestructor.
+ // FIXME: Defer this for the other special member functions as well.
+ if (!Method->isIneligibleOrNotSelected()) {
+ addedEligibleSpecialMemberFunction(Method, SMKind);
+ }
}
return;
@@ -1393,6 +1359,54 @@ void CXXRecordDecl::addedMember(Decl *D) {
}
}
+void CXXRecordDecl::addedSelectedDestructor(CXXDestructorDecl *DD) {
+ DD->setIneligibleOrNotSelected(false);
+ addedEligibleSpecialMemberFunction(DD, SMF_Destructor);
+}
+
+void CXXRecordDecl::addedEligibleSpecialMemberFunction(const CXXMethodDecl *MD,
+ unsigned SMKind) {
+ if (const auto *DD = dyn_cast<CXXDestructorDecl>(MD)) {
+ if (DD->isUserProvided())
+ data().HasIrrelevantDestructor = false;
+ // If the destructor is explicitly defaulted and not trivial or not public
+ // or if the destructor is deleted, we clear HasIrrelevantDestructor in
+ // finishedDefaultedOrDeletedMember.
+
+ // C++11 [class.dtor]p5:
+ // A destructor is trivial if [...] the destructor is not virtual.
+ if (DD->isVirtual()) {
+ data().HasTrivialSpecialMembers &= ~SMF_Destructor;
+ data().HasTrivialSpecialMembersForCall &= ~SMF_Destructor;
+ }
+
+ if (DD->isNoReturn())
+ data().IsAnyDestructorNoReturn = true;
+ }
+
+ if (!MD->isImplicit() && !MD->isUserProvided()) {
+ // This method is user-declared but not user-provided. We can't work
+ // out whether it's trivial yet (not until we get to the end of the
+ // class). We'll handle this method in
+ // finishedDefaultedOrDeletedMember.
+ } else if (MD->isTrivial()) {
+ data().HasTrivialSpecialMembers |= SMKind;
+ data().HasTrivialSpecialMembersForCall |= SMKind;
+ } else if (MD->isTrivialForCall()) {
+ data().HasTrivialSpecialMembersForCall |= SMKind;
+ data().DeclaredNonTrivialSpecialMembers |= SMKind;
+ } else {
+ data().DeclaredNonTrivialSpecialMembers |= SMKind;
+ // If this is a user-provided function, do not set
+ // DeclaredNonTrivialSpecialMembersForCall here since we don't know
+ // yet whether the method would be considered non-trivial for the
+ // purpose of calls (attribute "trivial_abi" can be dropped from the
+ // class later, which can change the special method's triviality).
+ if (!MD->isUserProvided())
+ data().DeclaredNonTrivialSpecialMembersForCall |= SMKind;
+ }
+}
+
void CXXRecordDecl::finishedDefaultedOrDeletedMember(CXXMethodDecl *D) {
assert(!D->isImplicit() && !D->isUserProvided());
@@ -1895,7 +1909,14 @@ CXXDestructorDecl *CXXRecordDecl::getDestructor() const {
DeclContext::lookup_result R = lookup(Name);
- return R.empty() ? nullptr : dyn_cast<CXXDestructorDecl>(R.front());
+ // If a destructor was marked as not selected, we skip it. We don't always
+ // have a selected destructor: dependent types, unnamed structs.
+ for (auto *Decl : R) {
+ auto* DD = dyn_cast<CXXDestructorDecl>(Decl);
+ if (DD && !DD->isIneligibleOrNotSelected())
+ return DD;
+ }
+ return nullptr;
}
static bool isDeclContextInNamespace(const DeclContext *DC) {
@@ -2968,8 +2989,10 @@ UsingShadowDecl::UsingShadowDecl(Kind K, ASTContext &C, DeclContext *DC,
BaseUsingDecl *Introducer, NamedDecl *Target)
: NamedDecl(K, DC, Loc, Name), redeclarable_base(C),
UsingOrNextShadow(Introducer) {
- if (Target)
+ if (Target) {
+ assert(!isa<UsingShadowDecl>(Target));
setTargetDecl(Target);
+ }
setImplicit();
}
@@ -3363,6 +3386,38 @@ APValue &MSGuidDecl::getAsAPValue() const {
return APVal;
}
+void UnnamedGlobalConstantDecl::anchor() {}
+
+UnnamedGlobalConstantDecl::UnnamedGlobalConstantDecl(const ASTContext &C,
+ DeclContext *DC,
+ QualType Ty,
+ const APValue &Val)
+ : ValueDecl(Decl::UnnamedGlobalConstant, DC, SourceLocation(),
+ DeclarationName(), Ty),
+ Value(Val) {
+ // Cleanup the embedded APValue if required (note that our destructor is never
+ // run)
+ if (Value.needsCleanup())
+ C.addDestruction(&Value);
+}
+
+UnnamedGlobalConstantDecl *
+UnnamedGlobalConstantDecl::Create(const ASTContext &C, QualType T,
+ const APValue &Value) {
+ DeclContext *DC = C.getTranslationUnitDecl();
+ return new (C, DC) UnnamedGlobalConstantDecl(C, DC, T, Value);
+}
+
+UnnamedGlobalConstantDecl *
+UnnamedGlobalConstantDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
+ return new (C, ID)
+ UnnamedGlobalConstantDecl(C, nullptr, QualType(), APValue());
+}
+
+void UnnamedGlobalConstantDecl::printName(llvm::raw_ostream &OS) const {
+ OS << "unnamed-global-constant";
+}
+
static const char *getAccessName(AccessSpecifier AS) {
switch (AS) {
case AS_none:
diff --git a/contrib/llvm-project/clang/lib/AST/DeclObjC.cpp b/contrib/llvm-project/clang/lib/AST/DeclObjC.cpp
index f15dd78929e2..15c545b59c81 100644
--- a/contrib/llvm-project/clang/lib/AST/DeclObjC.cpp
+++ b/contrib/llvm-project/clang/lib/AST/DeclObjC.cpp
@@ -1647,6 +1647,11 @@ ObjCIvarDecl *ObjCInterfaceDecl::all_declared_ivar_begin() {
ObjCIvarDecl *curIvar = nullptr;
if (!data().IvarList) {
+ // Force ivar deserialization upfront, before building IvarList.
+ (void)ivar_empty();
+ for (const auto *Ext : known_extensions()) {
+ (void)Ext->ivar_empty();
+ }
if (!ivar_empty()) {
ObjCInterfaceDecl::ivar_iterator I = ivar_begin(), E = ivar_end();
data().IvarList = *I; ++I;
@@ -1838,8 +1843,8 @@ ObjCIvarDecl *ObjCIvarDecl::CreateDeserialized(ASTContext &C, unsigned ID) {
ObjCIvarDecl::None, nullptr, false);
}
-const ObjCInterfaceDecl *ObjCIvarDecl::getContainingInterface() const {
- const auto *DC = cast<ObjCContainerDecl>(getDeclContext());
+ObjCInterfaceDecl *ObjCIvarDecl::getContainingInterface() {
+ auto *DC = cast<ObjCContainerDecl>(getDeclContext());
switch (DC->getKind()) {
default:
@@ -1849,7 +1854,7 @@ const ObjCInterfaceDecl *ObjCIvarDecl::getContainingInterface() const {
// Ivars can only appear in class extension categories.
case ObjCCategory: {
- const auto *CD = cast<ObjCCategoryDecl>(DC);
+ auto *CD = cast<ObjCCategoryDecl>(DC);
assert(CD->IsClassExtension() && "invalid container for ivar!");
return CD->getClassInterface();
}
diff --git a/contrib/llvm-project/clang/lib/AST/DeclPrinter.cpp b/contrib/llvm-project/clang/lib/AST/DeclPrinter.cpp
index c3f1d1544f79..c6a392c9c01b 100644
--- a/contrib/llvm-project/clang/lib/AST/DeclPrinter.cpp
+++ b/contrib/llvm-project/clang/lib/AST/DeclPrinter.cpp
@@ -680,6 +680,10 @@ void DeclPrinter::VisitFunctionDecl(FunctionDecl *D) {
if (FT->isVariadic()) {
if (D->getNumParams()) POut << ", ";
POut << "...";
+ } else if (!D->getNumParams() && !Context.getLangOpts().CPlusPlus) {
+ // The function has a prototype, so it needs to retain the prototype
+ // in C.
+ POut << "void";
}
} else if (D->doesThisDeclarationHaveABody() && !D->hasPrototype()) {
for (unsigned i = 0, e = D->getNumParams(); i != e; ++i) {
@@ -891,12 +895,15 @@ void DeclPrinter::VisitVarDecl(VarDecl *D) {
Expr *Init = D->getInit();
if (!Policy.SuppressInitializers && Init) {
bool ImplicitInit = false;
- if (CXXConstructExpr *Construct =
- dyn_cast<CXXConstructExpr>(Init->IgnoreImplicit())) {
+ if (D->isCXXForRangeDecl()) {
+ // FIXME: We should print the range expression instead.
+ ImplicitInit = true;
+ } else if (CXXConstructExpr *Construct =
+ dyn_cast<CXXConstructExpr>(Init->IgnoreImplicit())) {
if (D->getInitStyle() == VarDecl::CallInit &&
!Construct->isListInitialization()) {
ImplicitInit = Construct->getNumArgs() == 0 ||
- Construct->getArg(0)->isDefaultArgument();
+ Construct->getArg(0)->isDefaultArgument();
}
}
if (!ImplicitInit) {
diff --git a/contrib/llvm-project/clang/lib/AST/DeclTemplate.cpp b/contrib/llvm-project/clang/lib/AST/DeclTemplate.cpp
index d9ff3517a589..e7e5f355809b 100755
--- a/contrib/llvm-project/clang/lib/AST/DeclTemplate.cpp
+++ b/contrib/llvm-project/clang/lib/AST/DeclTemplate.cpp
@@ -1335,10 +1335,14 @@ VarTemplateDecl *VarTemplateSpecializationDecl::getSpecializedTemplate() const {
void VarTemplateSpecializationDecl::setTemplateArgsInfo(
const TemplateArgumentListInfo &ArgsInfo) {
- TemplateArgsInfo.setLAngleLoc(ArgsInfo.getLAngleLoc());
- TemplateArgsInfo.setRAngleLoc(ArgsInfo.getRAngleLoc());
- for (const TemplateArgumentLoc &Loc : ArgsInfo.arguments())
- TemplateArgsInfo.addArgument(Loc);
+ TemplateArgsInfo =
+ ASTTemplateArgumentListInfo::Create(getASTContext(), ArgsInfo);
+}
+
+void VarTemplateSpecializationDecl::setTemplateArgsInfo(
+ const ASTTemplateArgumentListInfo *ArgsInfo) {
+ TemplateArgsInfo =
+ ASTTemplateArgumentListInfo::Create(getASTContext(), ArgsInfo);
}
//===----------------------------------------------------------------------===//
@@ -1514,12 +1518,20 @@ void TemplateParamObjectDecl::printName(llvm::raw_ostream &OS) const {
}
void TemplateParamObjectDecl::printAsExpr(llvm::raw_ostream &OS) const {
- const ASTContext &Ctx = getASTContext();
- getType().getUnqualifiedType().print(OS, Ctx.getPrintingPolicy());
- printAsInit(OS);
+ printAsExpr(OS, getASTContext().getPrintingPolicy());
+}
+
+void TemplateParamObjectDecl::printAsExpr(llvm::raw_ostream &OS,
+ const PrintingPolicy &Policy) const {
+ getType().getUnqualifiedType().print(OS, Policy);
+ printAsInit(OS, Policy);
}
void TemplateParamObjectDecl::printAsInit(llvm::raw_ostream &OS) const {
- const ASTContext &Ctx = getASTContext();
- getValue().printPretty(OS, Ctx, getType());
+ printAsInit(OS, getASTContext().getPrintingPolicy());
+}
+
+void TemplateParamObjectDecl::printAsInit(llvm::raw_ostream &OS,
+ const PrintingPolicy &Policy) const {
+ getValue().printPretty(OS, Policy, getType(), &getASTContext());
}
diff --git a/contrib/llvm-project/clang/lib/AST/Expr.cpp b/contrib/llvm-project/clang/lib/AST/Expr.cpp
index 45e94847caee..ca477e6500c5 100644
--- a/contrib/llvm-project/clang/lib/AST/Expr.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Expr.cpp
@@ -31,6 +31,7 @@
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/Lexer.h"
#include "clang/Lex/LiteralSupport.h"
+#include "clang/Lex/Preprocessor.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
@@ -960,40 +961,10 @@ void CharacterLiteral::print(unsigned Val, CharacterKind Kind,
break;
}
- switch (Val) {
- case '\\':
- OS << "'\\\\'";
- break;
- case '\'':
- OS << "'\\''";
- break;
- case '\a':
- // TODO: K&R: the meaning of '\\a' is different in traditional C
- OS << "'\\a'";
- break;
- case '\b':
- OS << "'\\b'";
- break;
- // Nonstandard escape sequence.
- /*case '\e':
- OS << "'\\e'";
- break;*/
- case '\f':
- OS << "'\\f'";
- break;
- case '\n':
- OS << "'\\n'";
- break;
- case '\r':
- OS << "'\\r'";
- break;
- case '\t':
- OS << "'\\t'";
- break;
- case '\v':
- OS << "'\\v'";
- break;
- default:
+ StringRef Escaped = escapeCStyle<EscapeChar::Single>(Val);
+ if (!Escaped.empty()) {
+ OS << "'" << Escaped << "'";
+ } else {
// A character literal might be sign-extended, which
// would result in an invalid \U escape sequence.
// FIXME: multicharacter literals such as '\xFF\xFF\xFF\xFF'
@@ -1052,7 +1023,7 @@ unsigned StringLiteral::mapCharByteWidth(TargetInfo const &Target,
StringKind SK) {
unsigned CharByteWidth = 0;
switch (SK) {
- case Ascii:
+ case Ordinary:
case UTF8:
CharByteWidth = Target.getCharWidth();
break;
@@ -1152,7 +1123,8 @@ StringLiteral *StringLiteral::CreateEmpty(const ASTContext &Ctx,
void StringLiteral::outputString(raw_ostream &OS) const {
switch (getKind()) {
- case Ascii: break; // no prefix.
+ case Ordinary:
+ break; // no prefix.
case Wide: OS << 'L'; break;
case UTF8: OS << "u8"; break;
case UTF16: OS << 'u'; break;
@@ -1163,8 +1135,9 @@ void StringLiteral::outputString(raw_ostream &OS) const {
unsigned LastSlashX = getLength();
for (unsigned I = 0, N = getLength(); I != N; ++I) {
- switch (uint32_t Char = getCodeUnit(I)) {
- default:
+ uint32_t Char = getCodeUnit(I);
+ StringRef Escaped = escapeCStyle<EscapeChar::Double>(Char);
+ if (Escaped.empty()) {
// FIXME: Convert UTF-8 back to codepoints before rendering.
// Convert UTF-16 surrogate pairs back to codepoints before rendering.
@@ -1192,7 +1165,7 @@ void StringLiteral::outputString(raw_ostream &OS) const {
for (/**/; Shift >= 0; Shift -= 4)
OS << Hex[(Char >> Shift) & 15];
LastSlashX = I;
- break;
+ continue;
}
if (Char > 0xffff)
@@ -1205,7 +1178,7 @@ void StringLiteral::outputString(raw_ostream &OS) const {
<< Hex[(Char >> 8) & 15]
<< Hex[(Char >> 4) & 15]
<< Hex[(Char >> 0) & 15];
- break;
+ continue;
}
// If we used \x... for the previous character, and this character is a
@@ -1230,17 +1203,9 @@ void StringLiteral::outputString(raw_ostream &OS) const {
<< (char)('0' + ((Char >> 6) & 7))
<< (char)('0' + ((Char >> 3) & 7))
<< (char)('0' + ((Char >> 0) & 7));
- break;
- // Handle some common non-printable cases to make dumps prettier.
- case '\\': OS << "\\\\"; break;
- case '"': OS << "\\\""; break;
- case '\a': OS << "\\a"; break;
- case '\b': OS << "\\b"; break;
- case '\f': OS << "\\f"; break;
- case '\n': OS << "\\n"; break;
- case '\r': OS << "\\r"; break;
- case '\t': OS << "\\t"; break;
- case '\v': OS << "\\v"; break;
+ } else {
+ // Handle some common non-printable cases to make dumps prettier.
+ OS << Escaped;
}
}
OS << '"';
@@ -1267,7 +1232,7 @@ StringLiteral::getLocationOfByte(unsigned ByteNo, const SourceManager &SM,
const LangOptions &Features,
const TargetInfo &Target, unsigned *StartToken,
unsigned *StartTokenByteOffset) const {
- assert((getKind() == StringLiteral::Ascii ||
+ assert((getKind() == StringLiteral::Ordinary ||
getKind() == StringLiteral::UTF8) &&
"Only narrow string literals are currently supported");
@@ -1515,8 +1480,7 @@ Decl *Expr::getReferencedDeclOfCallee() {
/// If this is a call to a builtin, return the builtin ID. If not, return 0.
unsigned CallExpr::getBuiltinCallee() const {
- auto *FDecl =
- dyn_cast_or_null<FunctionDecl>(getCallee()->getReferencedDeclOfCallee());
+ auto *FDecl = getDirectCallee();
return FDecl ? FDecl->getBuiltinID() : 0;
}
@@ -1559,6 +1523,11 @@ const Attr *CallExpr::getUnusedResultAttr(const ASTContext &Ctx) const {
if (const auto *A = TD->getAttr<WarnUnusedResultAttr>())
return A;
+ for (const auto *TD = getCallReturnType(Ctx)->getAs<TypedefType>(); TD;
+ TD = TD->desugar()->getAs<TypedefType>())
+ if (const auto *A = TD->getDecl()->getAttr<WarnUnusedResultAttr>())
+ return A;
+
// Otherwise, see if the callee is marked nodiscard and return that attribute
// instead.
const Decl *D = getCalleeDecl();
@@ -1899,51 +1868,49 @@ const char *CastExpr::getCastKindName(CastKind CK) {
}
namespace {
- const Expr *skipImplicitTemporary(const Expr *E) {
- // Skip through reference binding to temporary.
- if (auto *Materialize = dyn_cast<MaterializeTemporaryExpr>(E))
- E = Materialize->getSubExpr();
+// Skip over implicit nodes produced as part of semantic analysis.
+// Designed for use with IgnoreExprNodes.
+Expr *ignoreImplicitSemaNodes(Expr *E) {
+ if (auto *Materialize = dyn_cast<MaterializeTemporaryExpr>(E))
+ return Materialize->getSubExpr();
- // Skip any temporary bindings; they're implicit.
- if (auto *Binder = dyn_cast<CXXBindTemporaryExpr>(E))
- E = Binder->getSubExpr();
+ if (auto *Binder = dyn_cast<CXXBindTemporaryExpr>(E))
+ return Binder->getSubExpr();
- return E;
- }
+ if (auto *Full = dyn_cast<FullExpr>(E))
+ return Full->getSubExpr();
+
+ return E;
}
+} // namespace
Expr *CastExpr::getSubExprAsWritten() {
const Expr *SubExpr = nullptr;
- const CastExpr *E = this;
- do {
- SubExpr = skipImplicitTemporary(E->getSubExpr());
+
+ for (const CastExpr *E = this; E; E = dyn_cast<ImplicitCastExpr>(SubExpr)) {
+ SubExpr = IgnoreExprNodes(E->getSubExpr(), ignoreImplicitSemaNodes);
// Conversions by constructor and conversion functions have a
// subexpression describing the call; strip it off.
- if (E->getCastKind() == CK_ConstructorConversion)
- SubExpr =
- skipImplicitTemporary(cast<CXXConstructExpr>(SubExpr->IgnoreImplicit())->getArg(0));
- else if (E->getCastKind() == CK_UserDefinedConversion) {
- SubExpr = SubExpr->IgnoreImplicit();
- assert((isa<CXXMemberCallExpr>(SubExpr) ||
- isa<BlockExpr>(SubExpr)) &&
+ if (E->getCastKind() == CK_ConstructorConversion) {
+ SubExpr = IgnoreExprNodes(cast<CXXConstructExpr>(SubExpr)->getArg(0),
+ ignoreImplicitSemaNodes);
+ } else if (E->getCastKind() == CK_UserDefinedConversion) {
+ assert((isa<CXXMemberCallExpr>(SubExpr) || isa<BlockExpr>(SubExpr)) &&
"Unexpected SubExpr for CK_UserDefinedConversion.");
if (auto *MCE = dyn_cast<CXXMemberCallExpr>(SubExpr))
SubExpr = MCE->getImplicitObjectArgument();
}
+ }
- // If the subexpression we're left with is an implicit cast, look
- // through that, too.
- } while ((E = dyn_cast<ImplicitCastExpr>(SubExpr)));
-
- return const_cast<Expr*>(SubExpr);
+ return const_cast<Expr *>(SubExpr);
}
NamedDecl *CastExpr::getConversionFunction() const {
const Expr *SubExpr = nullptr;
for (const CastExpr *E = this; E; E = dyn_cast<ImplicitCastExpr>(SubExpr)) {
- SubExpr = skipImplicitTemporary(E->getSubExpr());
+ SubExpr = IgnoreExprNodes(E->getSubExpr(), ignoreImplicitSemaNodes);
if (E->getCastKind() == CK_ConstructorConversion)
return cast<CXXConstructExpr>(SubExpr)->getConstructor();
@@ -2175,26 +2142,11 @@ bool BinaryOperator::isNullPointerArithmeticExtension(ASTContext &Ctx,
return true;
}
-static QualType getDecayedSourceLocExprType(const ASTContext &Ctx,
- SourceLocExpr::IdentKind Kind) {
- switch (Kind) {
- case SourceLocExpr::File:
- case SourceLocExpr::Function: {
- QualType ArrTy = Ctx.getStringLiteralArrayType(Ctx.CharTy, 0);
- return Ctx.getPointerType(ArrTy->getAsArrayTypeUnsafe()->getElementType());
- }
- case SourceLocExpr::Line:
- case SourceLocExpr::Column:
- return Ctx.UnsignedIntTy;
- }
- llvm_unreachable("unhandled case");
-}
-
SourceLocExpr::SourceLocExpr(const ASTContext &Ctx, IdentKind Kind,
- SourceLocation BLoc, SourceLocation RParenLoc,
+ QualType ResultTy, SourceLocation BLoc,
+ SourceLocation RParenLoc,
DeclContext *ParentContext)
- : Expr(SourceLocExprClass, getDecayedSourceLocExprType(Ctx, Kind),
- VK_PRValue, OK_Ordinary),
+ : Expr(SourceLocExprClass, ResultTy, VK_PRValue, OK_Ordinary),
BuiltinLoc(BLoc), RParenLoc(RParenLoc), ParentContext(ParentContext) {
SourceLocExprBits.Kind = Kind;
setDependence(ExprDependence::None);
@@ -2210,6 +2162,8 @@ StringRef SourceLocExpr::getBuiltinStr() const {
return "__builtin_LINE";
case Column:
return "__builtin_COLUMN";
+ case SourceLocStruct:
+ return "__builtin_source_location";
}
llvm_unreachable("unexpected IdentKind!");
}
@@ -2242,11 +2196,12 @@ APValue SourceLocExpr::EvaluateInContext(const ASTContext &Ctx,
switch (getIdentKind()) {
case SourceLocExpr::File: {
SmallString<256> Path(PLoc.getFilename());
- Ctx.getLangOpts().remapPathPrefix(Path);
+ clang::Preprocessor::processPathForFileMacro(Path, Ctx.getLangOpts(),
+ Ctx.getTargetInfo());
return MakeStringLiteral(Path);
}
case SourceLocExpr::Function: {
- const Decl *CurDecl = dyn_cast_or_null<Decl>(Context);
+ const auto *CurDecl = dyn_cast<Decl>(Context);
return MakeStringLiteral(
CurDecl ? PredefinedExpr::ComputeName(PredefinedExpr::Function, CurDecl)
: std::string(""));
@@ -2259,6 +2214,55 @@ APValue SourceLocExpr::EvaluateInContext(const ASTContext &Ctx,
: PLoc.getColumn();
return APValue(IntVal);
}
+ case SourceLocExpr::SourceLocStruct: {
+ // Fill in a std::source_location::__impl structure, by creating an
+ // artificial file-scoped CompoundLiteralExpr, and returning a pointer to
+ // that.
+ const CXXRecordDecl *ImplDecl = getType()->getPointeeCXXRecordDecl();
+ assert(ImplDecl);
+
+ // Construct an APValue for the __impl struct, and get or create a Decl
+ // corresponding to that. Note that we've already verified that the shape of
+ // the ImplDecl type is as expected.
+
+ APValue Value(APValue::UninitStruct(), 0, 4);
+ for (FieldDecl *F : ImplDecl->fields()) {
+ StringRef Name = F->getName();
+ if (Name == "_M_file_name") {
+ SmallString<256> Path(PLoc.getFilename());
+ clang::Preprocessor::processPathForFileMacro(Path, Ctx.getLangOpts(),
+ Ctx.getTargetInfo());
+ Value.getStructField(F->getFieldIndex()) = MakeStringLiteral(Path);
+ } else if (Name == "_M_function_name") {
+ // Note: this emits the PrettyFunction name -- different than what
+ // __builtin_FUNCTION() above returns!
+ const auto *CurDecl = dyn_cast<Decl>(Context);
+ Value.getStructField(F->getFieldIndex()) = MakeStringLiteral(
+ CurDecl && !isa<TranslationUnitDecl>(CurDecl)
+ ? StringRef(PredefinedExpr::ComputeName(
+ PredefinedExpr::PrettyFunction, CurDecl))
+ : "");
+ } else if (Name == "_M_line") {
+ QualType Ty = F->getType();
+ llvm::APSInt IntVal(Ctx.getIntWidth(Ty),
+ Ty->hasUnsignedIntegerRepresentation());
+ IntVal = PLoc.getLine();
+ Value.getStructField(F->getFieldIndex()) = APValue(IntVal);
+ } else if (Name == "_M_column") {
+ QualType Ty = F->getType();
+ llvm::APSInt IntVal(Ctx.getIntWidth(Ty),
+ Ty->hasUnsignedIntegerRepresentation());
+ IntVal = PLoc.getColumn();
+ Value.getStructField(F->getFieldIndex()) = APValue(IntVal);
+ }
+ }
+
+ UnnamedGlobalConstantDecl *GV =
+ Ctx.getUnnamedGlobalConstantDecl(getType()->getPointeeType(), Value);
+
+ return APValue(GV, CharUnits::Zero(), ArrayRef<APValue::LValuePathEntry>{},
+ false);
+ }
}
llvm_unreachable("unhandled case");
}
@@ -2462,8 +2466,12 @@ bool Expr::isReadIfDiscardedInCPlusPlus11() const {
}
// Objective-C++ extensions to the rule.
- if (isa<PseudoObjectExpr>(E) || isa<ObjCIvarRefExpr>(E))
+ if (isa<ObjCIvarRefExpr>(E))
return true;
+ if (const auto *POE = dyn_cast<PseudoObjectExpr>(E)) {
+ if (isa<ObjCPropertyRefExpr, ObjCSubscriptRefExpr>(POE->getSyntacticForm()))
+ return true;
+ }
return false;
}
@@ -2713,23 +2721,35 @@ bool Expr::isUnusedResultAWarning(const Expr *&WarnE, SourceLocation &Loc,
}
case ObjCPropertyRefExprClass:
+ case ObjCSubscriptRefExprClass:
WarnE = this;
Loc = getExprLoc();
R1 = getSourceRange();
return true;
case PseudoObjectExprClass: {
- const PseudoObjectExpr *PO = cast<PseudoObjectExpr>(this);
+ const auto *POE = cast<PseudoObjectExpr>(this);
- // Only complain about things that have the form of a getter.
- if (isa<UnaryOperator>(PO->getSyntacticForm()) ||
- isa<BinaryOperator>(PO->getSyntacticForm()))
- return false;
+ // For some syntactic forms, we should always warn.
+ if (isa<ObjCPropertyRefExpr, ObjCSubscriptRefExpr>(
+ POE->getSyntacticForm())) {
+ WarnE = this;
+ Loc = getExprLoc();
+ R1 = getSourceRange();
+ return true;
+ }
- WarnE = this;
- Loc = getExprLoc();
- R1 = getSourceRange();
- return true;
+ // For others, we should never warn.
+ if (auto *BO = dyn_cast<BinaryOperator>(POE->getSyntacticForm()))
+ if (BO->isAssignmentOp())
+ return false;
+ if (auto *UO = dyn_cast<UnaryOperator>(POE->getSyntacticForm()))
+ if (UO->isIncrementDecrementOp())
+ return false;
+
+ // Otherwise, warn if the result expression would warn.
+ const Expr *Result = POE->getResultExpr();
+ return Result && Result->isUnusedResultAWarning(WarnE, Loc, R1, R2, Ctx);
}
case StmtExprClass: {
@@ -3341,9 +3361,9 @@ bool Expr::isConstantInitializer(ASTContext &Ctx, bool IsForRef,
}
bool CallExpr::isBuiltinAssumeFalse(const ASTContext &Ctx) const {
- const FunctionDecl* FD = getDirectCallee();
- if (!FD || (FD->getBuiltinID() != Builtin::BI__assume &&
- FD->getBuiltinID() != Builtin::BI__builtin_assume))
+ unsigned BuiltinID = getBuiltinCallee();
+ if (BuiltinID != Builtin::BI__assume &&
+ BuiltinID != Builtin::BI__builtin_assume)
return false;
const Expr* Arg = getArg(0);
@@ -3352,6 +3372,10 @@ bool CallExpr::isBuiltinAssumeFalse(const ASTContext &Ctx) const {
Arg->EvaluateAsBooleanCondition(ArgVal, Ctx) && !ArgVal;
}
+bool CallExpr::isCallToStdMove() const {
+ return getBuiltinCallee() == Builtin::BImove;
+}
+
namespace {
/// Look for any side effects within a Stmt.
class SideEffectFinder : public ConstEvaluatedExprVisitor<SideEffectFinder> {
diff --git a/contrib/llvm-project/clang/lib/AST/ExprCXX.cpp b/contrib/llvm-project/clang/lib/AST/ExprCXX.cpp
index 55f3c3541b7f..891105692980 100644
--- a/contrib/llvm-project/clang/lib/AST/ExprCXX.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ExprCXX.cpp
@@ -196,7 +196,7 @@ CXXNewExpr::CXXNewExpr(bool IsGlobalNew, FunctionDecl *OperatorNew,
"Only NoInit can have no initializer!");
CXXNewExprBits.IsGlobalNew = IsGlobalNew;
- CXXNewExprBits.IsArray = ArraySize.hasValue();
+ CXXNewExprBits.IsArray = ArraySize.has_value();
CXXNewExprBits.ShouldPassAlignment = ShouldPassAlignment;
CXXNewExprBits.UsualArrayDeleteWantsSize = UsualArrayDeleteWantsSize;
CXXNewExprBits.StoredInitializationStyle =
@@ -248,7 +248,7 @@ CXXNewExpr::Create(const ASTContext &Ctx, bool IsGlobalNew,
InitializationStyle InitializationStyle, Expr *Initializer,
QualType Ty, TypeSourceInfo *AllocatedTypeInfo,
SourceRange Range, SourceRange DirectInitRange) {
- bool IsArray = ArraySize.hasValue();
+ bool IsArray = ArraySize.has_value();
bool HasInit = Initializer != nullptr;
unsigned NumPlacementArgs = PlacementArgs.size();
bool IsParenTypeId = TypeIdParens.isValid();
diff --git a/contrib/llvm-project/clang/lib/AST/ExprClassification.cpp b/contrib/llvm-project/clang/lib/AST/ExprClassification.cpp
index 6998e28fd2ea..6c122cac2c60 100644
--- a/contrib/llvm-project/clang/lib/AST/ExprClassification.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ExprClassification.cpp
@@ -465,14 +465,11 @@ static Cl::Kinds ClassifyDecl(ASTContext &Ctx, const Decl *D) {
islvalue = NTTParm->getType()->isReferenceType() ||
NTTParm->getType()->isRecordType();
else
- islvalue = isa<VarDecl>(D) || isa<FieldDecl>(D) ||
- isa<IndirectFieldDecl>(D) ||
- isa<BindingDecl>(D) ||
- isa<MSGuidDecl>(D) ||
- isa<TemplateParamObjectDecl>(D) ||
- (Ctx.getLangOpts().CPlusPlus &&
- (isa<FunctionDecl>(D) || isa<MSPropertyDecl>(D) ||
- isa<FunctionTemplateDecl>(D)));
+ islvalue =
+ isa<VarDecl, FieldDecl, IndirectFieldDecl, BindingDecl, MSGuidDecl,
+ UnnamedGlobalConstantDecl, TemplateParamObjectDecl>(D) ||
+ (Ctx.getLangOpts().CPlusPlus &&
+ (isa<FunctionDecl, MSPropertyDecl, FunctionTemplateDecl>(D)));
return islvalue ? Cl::CL_LValue : Cl::CL_PRValue;
}
diff --git a/contrib/llvm-project/clang/lib/AST/ExprConstant.cpp b/contrib/llvm-project/clang/lib/AST/ExprConstant.cpp
index 6ebf9f6a2578..40e044534f0c 100644
--- a/contrib/llvm-project/clang/lib/AST/ExprConstant.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ExprConstant.cpp
@@ -1978,7 +1978,8 @@ static bool IsGlobalLValue(APValue::LValueBase B) {
return true;
// ... the address of a function,
// ... the address of a GUID [MS extension],
- return isa<FunctionDecl>(D) || isa<MSGuidDecl>(D);
+ // ... the address of an unnamed global constant
+ return isa<FunctionDecl, MSGuidDecl, UnnamedGlobalConstantDecl>(D);
}
if (B.is<TypeInfoLValue>() || B.is<DynamicAllocLValue>())
@@ -2013,6 +2014,10 @@ static bool IsGlobalLValue(APValue::LValueBase B) {
// Block variables at global or local static scope.
case Expr::BlockExprClass:
return !cast<BlockExpr>(E)->getBlockDecl()->hasCaptures();
+ // The APValue generated from a __builtin_source_location will be emitted as a
+ // literal.
+ case Expr::SourceLocExprClass:
+ return true;
case Expr::ImplicitValueInitExprClass:
// FIXME:
// We can never form an lvalue with an implicit value initialization as its
@@ -2547,18 +2552,15 @@ static bool HandleFloatToIntCast(EvalInfo &Info, const Expr *E,
return true;
}
-/// Get rounding mode used for evaluation of the specified expression.
-/// \param[out] DynamicRM Is set to true is the requested rounding mode is
-/// dynamic.
+/// Get rounding mode to use in evaluation of the specified expression.
+///
/// If rounding mode is unknown at compile time, still try to evaluate the
/// expression. If the result is exact, it does not depend on rounding mode.
/// So return "tonearest" mode instead of "dynamic".
-static llvm::RoundingMode getActiveRoundingMode(EvalInfo &Info, const Expr *E,
- bool &DynamicRM) {
+static llvm::RoundingMode getActiveRoundingMode(EvalInfo &Info, const Expr *E) {
llvm::RoundingMode RM =
E->getFPFeaturesInEffect(Info.Ctx.getLangOpts()).getRoundingMode();
- DynamicRM = (RM == llvm::RoundingMode::Dynamic);
- if (DynamicRM)
+ if (RM == llvm::RoundingMode::Dynamic)
RM = llvm::RoundingMode::NearestTiesToEven;
return RM;
}
@@ -2582,14 +2584,14 @@ static bool checkFloatingPointResult(EvalInfo &Info, const Expr *E,
if ((St != APFloat::opOK) &&
(FPO.getRoundingMode() == llvm::RoundingMode::Dynamic ||
- FPO.getFPExceptionMode() != LangOptions::FPE_Ignore ||
+ FPO.getExceptionMode() != LangOptions::FPE_Ignore ||
FPO.getAllowFEnvAccess())) {
Info.FFDiag(E, diag::note_constexpr_float_arithmetic_strict);
return false;
}
if ((St & APFloat::opStatus::opInvalidOp) &&
- FPO.getFPExceptionMode() != LangOptions::FPE_Ignore) {
+ FPO.getExceptionMode() != LangOptions::FPE_Ignore) {
// There is no usefully definable result.
Info.FFDiag(E);
return false;
@@ -2608,8 +2610,7 @@ static bool HandleFloatToFloatCast(EvalInfo &Info, const Expr *E,
QualType SrcType, QualType DestType,
APFloat &Result) {
assert(isa<CastExpr>(E) || isa<CompoundAssignOperator>(E));
- bool DynamicRM;
- llvm::RoundingMode RM = getActiveRoundingMode(Info, E, DynamicRM);
+ llvm::RoundingMode RM = getActiveRoundingMode(Info, E);
APFloat::opStatus St;
APFloat Value = Result;
bool ignored;
@@ -2844,8 +2845,7 @@ static bool handleIntIntBinOp(EvalInfo &Info, const Expr *E, const APSInt &LHS,
static bool handleFloatFloatBinOp(EvalInfo &Info, const BinaryOperator *E,
APFloat &LHS, BinaryOperatorKind Opcode,
const APFloat &RHS) {
- bool DynamicRM;
- llvm::RoundingMode RM = getActiveRoundingMode(Info, E, DynamicRM);
+ llvm::RoundingMode RM = getActiveRoundingMode(Info, E);
APFloat::opStatus St;
switch (Opcode) {
default:
@@ -4027,6 +4027,16 @@ static CompleteObject findCompleteObject(EvalInfo &Info, const Expr *E,
return CompleteObject(LVal.Base, &V, GD->getType());
}
+ // Allow reading the APValue from an UnnamedGlobalConstantDecl.
+ if (auto *GCD = dyn_cast<UnnamedGlobalConstantDecl>(D)) {
+ if (isModification(AK)) {
+ Info.FFDiag(E, diag::note_constexpr_modify_global);
+ return CompleteObject();
+ }
+ return CompleteObject(LVal.Base, const_cast<APValue *>(&GCD->getValue()),
+ GCD->getType());
+ }
+
// Allow reading from template parameter objects.
if (auto *TPO = dyn_cast<TemplateParamObjectDecl>(D)) {
if (isModification(AK)) {
@@ -4247,9 +4257,33 @@ handleLValueToRValueConversion(EvalInfo &Info, const Expr *Conv, QualType Type,
Info.FFDiag(Conv);
return false;
}
+
APValue Lit;
if (!Evaluate(Lit, Info, CLE->getInitializer()))
return false;
+
+ // According to GCC info page:
+ //
+ // 6.28 Compound Literals
+ //
+ // As an optimization, G++ sometimes gives array compound literals longer
+ // lifetimes: when the array either appears outside a function or has a
+ // const-qualified type. If foo and its initializer had elements of type
+ // char *const rather than char *, or if foo were a global variable, the
+ // array would have static storage duration. But it is probably safest
+ // just to avoid the use of array compound literals in C++ code.
+ //
+ // Obey that rule by checking constness for converted array types.
+
+ QualType CLETy = CLE->getType();
+ if (CLETy->isArrayType() && !Type->isArrayType()) {
+ if (!CLETy.isConstant(Info.Ctx)) {
+ Info.FFDiag(Conv);
+ Info.Note(CLE->getExprLoc(), diag::note_declared_at);
+ return false;
+ }
+ }
+
CompleteObject LitObj(LVal.Base, &Lit, Base->getType());
return extractSubobject(Info, Conv, LitObj, LVal.Designator, RVal, AK);
} else if (isa<StringLiteral>(Base) || isa<PredefinedExpr>(Base)) {
@@ -5006,6 +5040,18 @@ static EvalStmtResult EvaluateSwitch(StmtResult &Result, EvalInfo &Info,
llvm_unreachable("Invalid EvalStmtResult!");
}
+static bool CheckLocalVariableDeclaration(EvalInfo &Info, const VarDecl *VD) {
+ // An expression E is a core constant expression unless the evaluation of E
+ // would evaluate one of the following: [C++2b] - a control flow that passes
+ // through a declaration of a variable with static or thread storage duration.
+ if (VD->isLocalVarDecl() && VD->isStaticLocal()) {
+ Info.CCEDiag(VD->getLocation(), diag::note_constexpr_static_local)
+ << (VD->getTSCSpec() == TSCS_unspecified ? 0 : 1) << VD;
+ return false;
+ }
+ return true;
+}
+
// Evaluate a statement.
static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info,
const Stmt *S, const SwitchCase *Case) {
@@ -5116,6 +5162,8 @@ static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info,
const DeclStmt *DS = cast<DeclStmt>(S);
for (const auto *D : DS->decls()) {
if (const auto *VD = dyn_cast<VarDecl>(D)) {
+ if (!CheckLocalVariableDeclaration(Info, VD))
+ return ESR_Failed;
if (VD->hasLocalStorage() && !VD->getInit())
if (!EvaluateVarDecl(Info, VD))
return ESR_Failed;
@@ -5159,6 +5207,9 @@ static EvalStmtResult EvaluateStmt(StmtResult &Result, EvalInfo &Info,
case Stmt::DeclStmtClass: {
const DeclStmt *DS = cast<DeclStmt>(S);
for (const auto *D : DS->decls()) {
+ const VarDecl *VD = dyn_cast_or_null<VarDecl>(D);
+ if (VD && !CheckLocalVariableDeclaration(Info, VD))
+ return ESR_Failed;
// Each declaration initialization is its own full-expression.
FullExpressionRAII Scope(Info);
if (!EvaluateDecl(Info, D) && !Info.noteFailure())
@@ -6507,7 +6558,7 @@ static bool HandleDestructionImpl(EvalInfo &Info, SourceLocation CallLoc,
// We don't have a good way to iterate fields in reverse, so collect all the
// fields first and then walk them backwards.
- SmallVector<FieldDecl*, 16> Fields(RD->field_begin(), RD->field_end());
+ SmallVector<FieldDecl*, 16> Fields(RD->fields());
for (const FieldDecl *FD : llvm::reverse(Fields)) {
if (FD->isUnnamedBitfield())
continue;
@@ -8098,6 +8149,7 @@ public:
bool VisitVarDecl(const Expr *E, const VarDecl *VD);
bool VisitUnaryPreIncDec(const UnaryOperator *UO);
+ bool VisitCallExpr(const CallExpr *E);
bool VisitDeclRefExpr(const DeclRefExpr *E);
bool VisitPredefinedExpr(const PredefinedExpr *E) { return Success(E); }
bool VisitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
@@ -8161,7 +8213,8 @@ static bool EvaluateLValue(const Expr *E, LValue &Result, EvalInfo &Info,
bool LValueExprEvaluator::VisitDeclRefExpr(const DeclRefExpr *E) {
const NamedDecl *D = E->getDecl();
- if (isa<FunctionDecl, MSGuidDecl, TemplateParamObjectDecl>(D))
+ if (isa<FunctionDecl, MSGuidDecl, TemplateParamObjectDecl,
+ UnnamedGlobalConstantDecl>(D))
return Success(cast<ValueDecl>(D));
if (const VarDecl *VD = dyn_cast<VarDecl>(D))
return VisitVarDecl(E, VD);
@@ -8262,6 +8315,20 @@ bool LValueExprEvaluator::VisitVarDecl(const Expr *E, const VarDecl *VD) {
return Success(*V, E);
}
+bool LValueExprEvaluator::VisitCallExpr(const CallExpr *E) {
+ switch (E->getBuiltinCallee()) {
+ case Builtin::BIas_const:
+ case Builtin::BIforward:
+ case Builtin::BImove:
+ case Builtin::BImove_if_noexcept:
+ if (cast<FunctionDecl>(E->getCalleeDecl())->isConstexpr())
+ return Visit(E->getArg(0));
+ break;
+ }
+
+ return ExprEvaluatorBaseTy::VisitCallExpr(E);
+}
+
bool LValueExprEvaluator::VisitMaterializeTemporaryExpr(
const MaterializeTemporaryExpr *E) {
// Walk through the expression to find the materialized temporary itself.
@@ -8393,7 +8460,8 @@ bool LValueExprEvaluator::VisitMemberExpr(const MemberExpr *E) {
bool LValueExprEvaluator::VisitArraySubscriptExpr(const ArraySubscriptExpr *E) {
// FIXME: Deal with vectors as array subscript bases.
- if (E->getBase()->getType()->isVectorType())
+ if (E->getBase()->getType()->isVectorType() ||
+ E->getBase()->getType()->isVLSTBuiltinType())
return Error(E);
APSInt Index;
@@ -8526,7 +8594,7 @@ static bool getBytesReturnedByAllocSizeCall(const ASTContext &Ctx,
Into = ExprResult.Val.getInt();
if (Into.isNegative() || !Into.isIntN(BitsInSizeT))
return false;
- Into = Into.zextOrSelf(BitsInSizeT);
+ Into = Into.zext(BitsInSizeT);
return true;
};
@@ -8585,7 +8653,7 @@ static bool evaluateLValueAsAllocSize(EvalInfo &Info, APValue::LValueBase Base,
return false;
const Expr *Init = VD->getAnyInitializer();
- if (!Init)
+ if (!Init || Init->getType().isNull())
return false;
const Expr *E = Init->IgnoreParens();
@@ -8701,7 +8769,7 @@ public:
bool VisitCXXNewExpr(const CXXNewExpr *E);
bool VisitSourceLocExpr(const SourceLocExpr *E) {
- assert(E->isStringType() && "SourceLocExpr isn't a pointer type?");
+ assert(!E->isIntType() && "SourceLocExpr isn't a pointer type?");
APValue LValResult = E->EvaluateInContext(
Info.Ctx, Info.CurrentCall->CurSourceLocExprScope.getDefaultExpr());
Result.setFrom(Info.Ctx, LValResult);
@@ -8718,7 +8786,7 @@ public:
ArrayType::Normal, 0);
StringLiteral *SL =
- StringLiteral::Create(Info.Ctx, ResultStr, StringLiteral::Ascii,
+ StringLiteral::Create(Info.Ctx, ResultStr, StringLiteral::Ordinary,
/*Pascal*/ false, ArrayTy, E->getLocation());
evaluateLValue(SL, Result);
@@ -8766,6 +8834,22 @@ bool PointerExprEvaluator::VisitUnaryAddrOf(const UnaryOperator *E) {
return evaluateLValue(E->getSubExpr(), Result);
}
+// Is the provided decl 'std::source_location::current'?
+static bool IsDeclSourceLocationCurrent(const FunctionDecl *FD) {
+ if (!FD)
+ return false;
+ const IdentifierInfo *FnII = FD->getIdentifier();
+ if (!FnII || !FnII->isStr("current"))
+ return false;
+
+ const auto *RD = dyn_cast<RecordDecl>(FD->getParent());
+ if (!RD)
+ return false;
+
+ const IdentifierInfo *ClassII = RD->getIdentifier();
+ return RD->isInStdNamespace() && ClassII && ClassII->isStr("source_location");
+}
+
bool PointerExprEvaluator::VisitCastExpr(const CastExpr *E) {
const Expr *SubExpr = E->getSubExpr();
@@ -8783,14 +8867,24 @@ bool PointerExprEvaluator::VisitCastExpr(const CastExpr *E) {
// permitted in constant expressions in C++11. Bitcasts from cv void* are
// also static_casts, but we disallow them as a resolution to DR1312.
if (!E->getType()->isVoidPointerType()) {
- if (!Result.InvalidBase && !Result.Designator.Invalid &&
+ // In some circumstances, we permit casting from void* to cv1 T*, when the
+ // actual pointee object is actually a cv2 T.
+ bool VoidPtrCastMaybeOK =
+ !Result.InvalidBase && !Result.Designator.Invalid &&
!Result.IsNullPtr &&
Info.Ctx.hasSameUnqualifiedType(Result.Designator.getType(Info.Ctx),
- E->getType()->getPointeeType()) &&
- Info.getStdAllocatorCaller("allocate")) {
- // Inside a call to std::allocator::allocate and friends, we permit
- // casting from void* back to cv1 T* for a pointer that points to a
- // cv2 T.
+ E->getType()->getPointeeType());
+ // 1. We'll allow it in std::allocator::allocate, and anything which that
+ // calls.
+ // 2. HACK 2022-03-28: Work around an issue with libstdc++'s
+ // <source_location> header. Fixed in GCC 12 and later (2022-04-??).
+ // We'll allow it in the body of std::source_location::current. GCC's
+ // implementation had a parameter of type `void*`, and casts from
+ // that back to `const __impl*` in its body.
+ if (VoidPtrCastMaybeOK &&
+ (Info.getStdAllocatorCaller("allocate") ||
+ IsDeclSourceLocationCurrent(Info.CurrentCall->Callee))) {
+ // Permitted.
} else {
Result.Designator.setInvalid();
if (SubExpr->getType()->isVoidPointerType())
@@ -9013,6 +9107,8 @@ static bool isOneByteCharacterType(QualType T) {
bool PointerExprEvaluator::VisitBuiltinCallExpr(const CallExpr *E,
unsigned BuiltinOp) {
switch (BuiltinOp) {
+ case Builtin::BIaddressof:
+ case Builtin::BI__addressof:
case Builtin::BI__builtin_addressof:
return evaluateLValue(E->getArg(0), Result);
case Builtin::BI__builtin_assume_aligned: {
@@ -9430,7 +9526,7 @@ bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) {
bool ValueInit = false;
QualType AllocType = E->getAllocatedType();
- if (Optional<const Expr*> ArraySize = E->getArraySize()) {
+ if (Optional<const Expr *> ArraySize = E->getArraySize()) {
const Expr *Stripped = *ArraySize;
for (; auto *ICE = dyn_cast<ImplicitCastExpr>(Stripped);
Stripped = ICE->getSubExpr())
@@ -9484,8 +9580,8 @@ bool PointerExprEvaluator::VisitCXXNewExpr(const CXXNewExpr *E) {
unsigned Bits =
std::max(CAT->getSize().getBitWidth(), ArrayBound.getBitWidth());
- llvm::APInt InitBound = CAT->getSize().zextOrSelf(Bits);
- llvm::APInt AllocBound = ArrayBound.zextOrSelf(Bits);
+ llvm::APInt InitBound = CAT->getSize().zext(Bits);
+ llvm::APInt AllocBound = ArrayBound.zext(Bits);
if (InitBound.ugt(AllocBound)) {
if (IsNothrow)
return ZeroInitialization(E);
@@ -9923,6 +10019,17 @@ bool RecordExprEvaluator::VisitInitListExpr(const InitListExpr *E) {
ImplicitValueInitExpr VIE(HaveInit ? Info.Ctx.IntTy : Field->getType());
const Expr *Init = HaveInit ? E->getInit(ElementNo++) : &VIE;
+ if (Field->getType()->isIncompleteArrayType()) {
+ if (auto *CAT = Info.Ctx.getAsConstantArrayType(Init->getType())) {
+ if (!CAT->getSize().isZero()) {
+ // Bail out for now. This might sort of "work", but the rest of the
+ // code isn't really prepared to handle it.
+ Info.FFDiag(Init, diag::note_constexpr_unsupported_flexible_array);
+ return false;
+ }
+ }
+ }
+
// Temporarily override This, in case there's a CXXDefaultInitExpr in here.
ThisOverrideRAII ThisOverride(*Info.CurrentCall, &This,
isa<CXXDefaultInitExpr>(Init));
@@ -10088,7 +10195,6 @@ bool RecordExprEvaluator::VisitLambdaExpr(const LambdaExpr *E) {
// Iterate through all the lambda's closure object's fields and initialize
// them.
auto *CaptureInitIt = E->capture_init_begin();
- const LambdaCapture *CaptureIt = ClosureClass->captures_begin();
bool Success = true;
const ASTRecordLayout &Layout = Info.Ctx.getASTRecordLayout(ClosureClass);
for (const auto *Field : ClosureClass->fields()) {
@@ -10112,7 +10218,6 @@ bool RecordExprEvaluator::VisitLambdaExpr(const LambdaExpr *E) {
return false;
Success = false;
}
- ++CaptureIt;
}
return Success;
}
@@ -10270,9 +10375,9 @@ bool VectorExprEvaluator::VisitCastExpr(const CastExpr *E) {
for (unsigned i = 0; i < NElts; i++) {
llvm::APInt Elt;
if (BigEndian)
- Elt = SValInt.rotl(i*EltSize+FloatEltSize).trunc(FloatEltSize);
+ Elt = SValInt.rotl(i * EltSize + FloatEltSize).trunc(FloatEltSize);
else
- Elt = SValInt.rotr(i*EltSize).trunc(FloatEltSize);
+ Elt = SValInt.rotr(i * EltSize).trunc(FloatEltSize);
Elts.push_back(APValue(APFloat(Sem, Elt)));
}
} else if (EltTy->isIntegerType()) {
@@ -13773,10 +13878,12 @@ bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) {
case Builtin::BI__builtin_huge_val:
case Builtin::BI__builtin_huge_valf:
case Builtin::BI__builtin_huge_vall:
+ case Builtin::BI__builtin_huge_valf16:
case Builtin::BI__builtin_huge_valf128:
case Builtin::BI__builtin_inf:
case Builtin::BI__builtin_inff:
case Builtin::BI__builtin_infl:
+ case Builtin::BI__builtin_inff16:
case Builtin::BI__builtin_inff128: {
const llvm::fltSemantics &Sem =
Info.Ctx.getFloatTypeSemantics(E->getType());
@@ -13787,6 +13894,7 @@ bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) {
case Builtin::BI__builtin_nans:
case Builtin::BI__builtin_nansf:
case Builtin::BI__builtin_nansl:
+ case Builtin::BI__builtin_nansf16:
case Builtin::BI__builtin_nansf128:
if (!TryEvaluateBuiltinNaN(Info.Ctx, E->getType(), E->getArg(0),
true, Result))
@@ -13796,6 +13904,7 @@ bool FloatExprEvaluator::VisitCallExpr(const CallExpr *E) {
case Builtin::BI__builtin_nan:
case Builtin::BI__builtin_nanf:
case Builtin::BI__builtin_nanl:
+ case Builtin::BI__builtin_nanf16:
case Builtin::BI__builtin_nanf128:
// If this is __builtin_nan() turn this into a nan, otherwise we
// can't constant fold it.
diff --git a/contrib/llvm-project/clang/lib/AST/FormatString.cpp b/contrib/llvm-project/clang/lib/AST/FormatString.cpp
index 102bcca96a38..4977aaa51319 100644
--- a/contrib/llvm-project/clang/lib/AST/FormatString.cpp
+++ b/contrib/llvm-project/clang/lib/AST/FormatString.cpp
@@ -760,7 +760,7 @@ bool FormatSpecifier::hasValidLengthModifier(const TargetInfo &Target,
return true;
case ConversionSpecifier::FreeBSDrArg:
case ConversionSpecifier::FreeBSDyArg:
- return Target.getTriple().isOSFreeBSD() || Target.getTriple().isPS4();
+ return Target.getTriple().isOSFreeBSD() || Target.getTriple().isPS();
default:
return false;
}
@@ -795,7 +795,7 @@ bool FormatSpecifier::hasValidLengthModifier(const TargetInfo &Target,
return true;
case ConversionSpecifier::FreeBSDrArg:
case ConversionSpecifier::FreeBSDyArg:
- return Target.getTriple().isOSFreeBSD() || Target.getTriple().isPS4();
+ return Target.getTriple().isOSFreeBSD() || Target.getTriple().isPS();
default:
return false;
}
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.h b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.h
index 124a6ff03f18..82aa413dabbc 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.h
+++ b/contrib/llvm-project/clang/lib/AST/Interp/ByteCodeExprGen.h
@@ -284,7 +284,7 @@ public:
~LocalScope() override { this->emitDestruction(); }
void addLocal(const Scope::Local &Local) override {
- if (!Idx.hasValue()) {
+ if (!Idx) {
Idx = this->Ctx->Descriptors.size();
this->Ctx->Descriptors.emplace_back();
}
@@ -293,7 +293,7 @@ public:
}
void emitDestruction() override {
- if (!Idx.hasValue())
+ if (!Idx)
return;
this->Ctx->emitDestroy(*Idx, SourceInfo{});
}
diff --git a/contrib/llvm-project/clang/lib/AST/Interp/Function.cpp b/contrib/llvm-project/clang/lib/AST/Interp/Function.cpp
index 0ed13a92aa38..6ba97df1cd30 100644
--- a/contrib/llvm-project/clang/lib/AST/Interp/Function.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Interp/Function.cpp
@@ -34,8 +34,7 @@ Function::ParamDescriptor Function::getParamDescriptor(unsigned Offset) const {
SourceInfo Function::getSource(CodePtr PC) const {
unsigned Offset = PC - getCodeBegin();
using Elem = std::pair<unsigned, SourceInfo>;
- auto It = std::lower_bound(SrcMap.begin(), SrcMap.end(), Elem{Offset, {}},
- [](Elem A, Elem B) { return A.first < B.first; });
+ auto It = llvm::lower_bound(SrcMap, Elem{Offset, {}}, llvm::less_first());
if (It == SrcMap.end() || It->first != Offset)
llvm::report_fatal_error("missing source location");
return It->second;
diff --git a/contrib/llvm-project/clang/lib/AST/ItaniumMangle.cpp b/contrib/llvm-project/clang/lib/AST/ItaniumMangle.cpp
index 68d4d1271cdb..91f41778ee68 100644
--- a/contrib/llvm-project/clang/lib/AST/ItaniumMangle.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ItaniumMangle.cpp
@@ -78,8 +78,8 @@ class ItaniumMangleContextImpl : public ItaniumMangleContext {
public:
explicit ItaniumMangleContextImpl(
ASTContext &Context, DiagnosticsEngine &Diags,
- DiscriminatorOverrideTy DiscriminatorOverride)
- : ItaniumMangleContext(Context, Diags),
+ DiscriminatorOverrideTy DiscriminatorOverride, bool IsAux = false)
+ : ItaniumMangleContext(Context, Diags, IsAux),
DiscriminatorOverride(DiscriminatorOverride) {}
/// @name Mangler Entry Points
@@ -130,6 +130,8 @@ public:
void mangleLambdaSig(const CXXRecordDecl *Lambda, raw_ostream &) override;
+ void mangleModuleInitializer(const Module *Module, raw_ostream &) override;
+
bool getNextDiscriminator(const NamedDecl *ND, unsigned &disc) {
// Lambda closure types are already numbered.
if (isLambda(ND))
@@ -143,7 +145,7 @@ public:
// Use the canonical number for externally visible decls.
if (ND->isExternallyVisible()) {
- unsigned discriminator = getASTContext().getManglingNumber(ND);
+ unsigned discriminator = getASTContext().getManglingNumber(ND, isAux());
if (discriminator == 1)
return false;
disc = discriminator - 2;
@@ -223,10 +225,10 @@ class CXXNameMangler {
/// that's not a template specialization; otherwise it's the pattern
/// for that specialization.
const NamedDecl *Structor;
- unsigned StructorType;
+ unsigned StructorType = 0;
/// The next substitution sequence number.
- unsigned SeqID;
+ unsigned SeqID = 0;
class FunctionTypeDepthState {
unsigned Bits;
@@ -395,32 +397,32 @@ class CXXNameMangler {
public:
CXXNameMangler(ItaniumMangleContextImpl &C, raw_ostream &Out_,
const NamedDecl *D = nullptr, bool NullOut_ = false)
- : Context(C), Out(Out_), NullOut(NullOut_), Structor(getStructor(D)),
- StructorType(0), SeqID(0), AbiTagsRoot(AbiTags) {
+ : Context(C), Out(Out_), NullOut(NullOut_), Structor(getStructor(D)),
+ AbiTagsRoot(AbiTags) {
// These can't be mangled without a ctor type or dtor type.
assert(!D || (!isa<CXXDestructorDecl>(D) &&
!isa<CXXConstructorDecl>(D)));
}
CXXNameMangler(ItaniumMangleContextImpl &C, raw_ostream &Out_,
const CXXConstructorDecl *D, CXXCtorType Type)
- : Context(C), Out(Out_), Structor(getStructor(D)), StructorType(Type),
- SeqID(0), AbiTagsRoot(AbiTags) { }
+ : Context(C), Out(Out_), Structor(getStructor(D)), StructorType(Type),
+ AbiTagsRoot(AbiTags) {}
CXXNameMangler(ItaniumMangleContextImpl &C, raw_ostream &Out_,
const CXXDestructorDecl *D, CXXDtorType Type)
- : Context(C), Out(Out_), Structor(getStructor(D)), StructorType(Type),
- SeqID(0), AbiTagsRoot(AbiTags) { }
+ : Context(C), Out(Out_), Structor(getStructor(D)), StructorType(Type),
+ AbiTagsRoot(AbiTags) {}
CXXNameMangler(CXXNameMangler &Outer, raw_ostream &Out_)
- : Context(Outer.Context), Out(Out_), NullOut(false),
- Structor(Outer.Structor), StructorType(Outer.StructorType),
- SeqID(Outer.SeqID), FunctionTypeDepth(Outer.FunctionTypeDepth),
- AbiTagsRoot(AbiTags), Substitutions(Outer.Substitutions) {}
+ : Context(Outer.Context), Out(Out_), Structor(Outer.Structor),
+ StructorType(Outer.StructorType), SeqID(Outer.SeqID),
+ FunctionTypeDepth(Outer.FunctionTypeDepth), AbiTagsRoot(AbiTags),
+ Substitutions(Outer.Substitutions),
+ ModuleSubstitutions(Outer.ModuleSubstitutions) {}
CXXNameMangler(CXXNameMangler &Outer, llvm::raw_null_ostream &Out_)
- : Context(Outer.Context), Out(Out_), NullOut(true),
- Structor(Outer.Structor), StructorType(Outer.StructorType),
- SeqID(Outer.SeqID), FunctionTypeDepth(Outer.FunctionTypeDepth),
- AbiTagsRoot(AbiTags), Substitutions(Outer.Substitutions) {}
+ : CXXNameMangler(Outer, (raw_ostream &)Out_) {
+ NullOut = true;
+ }
raw_ostream &getStream() { return Out; }
@@ -438,10 +440,12 @@ public:
void mangleType(QualType T);
void mangleNameOrStandardSubstitution(const NamedDecl *ND);
void mangleLambdaSig(const CXXRecordDecl *Lambda);
+ void mangleModuleNamePrefix(StringRef Name, bool IsPartition = false);
private:
bool mangleSubstitution(const NamedDecl *ND);
+ bool mangleSubstitution(NestedNameSpecifier *NNS);
bool mangleSubstitution(QualType T);
bool mangleSubstitution(TemplateName Template);
bool mangleSubstitution(uintptr_t Ptr);
@@ -455,6 +459,11 @@ private:
addSubstitution(reinterpret_cast<uintptr_t>(ND));
}
+ void addSubstitution(NestedNameSpecifier *NNS) {
+ NNS = Context.getASTContext().getCanonicalNestedNameSpecifier(NNS);
+
+ addSubstitution(reinterpret_cast<uintptr_t>(NNS));
+ }
void addSubstitution(QualType T);
void addSubstitution(TemplateName Template);
void addSubstitution(uintptr_t Ptr);
@@ -473,22 +482,21 @@ private:
void mangleNameWithAbiTags(GlobalDecl GD,
const AbiTagList *AdditionalAbiTags);
- void mangleModuleName(const Module *M);
- void mangleModuleNamePrefix(StringRef Name);
+ void mangleModuleName(const NamedDecl *ND);
void mangleTemplateName(const TemplateDecl *TD,
const TemplateArgument *TemplateArgs,
unsigned NumTemplateArgs);
- void mangleUnqualifiedName(GlobalDecl GD,
+ void mangleUnqualifiedName(GlobalDecl GD, const DeclContext *DC,
const AbiTagList *AdditionalAbiTags) {
- mangleUnqualifiedName(GD, cast<NamedDecl>(GD.getDecl())->getDeclName(), UnknownArity,
- AdditionalAbiTags);
+ mangleUnqualifiedName(GD, cast<NamedDecl>(GD.getDecl())->getDeclName(), DC,
+ UnknownArity, AdditionalAbiTags);
}
void mangleUnqualifiedName(GlobalDecl GD, DeclarationName Name,
- unsigned KnownArity,
+ const DeclContext *DC, unsigned KnownArity,
const AbiTagList *AdditionalAbiTags);
- void mangleUnscopedName(GlobalDecl GD,
+ void mangleUnscopedName(GlobalDecl GD, const DeclContext *DC,
const AbiTagList *AdditionalAbiTags);
- void mangleUnscopedTemplateName(GlobalDecl GD,
+ void mangleUnscopedTemplateName(GlobalDecl GD, const DeclContext *DC,
const AbiTagList *AdditionalAbiTags);
void mangleSourceName(const IdentifierInfo *II);
void mangleRegCallName(const IdentifierInfo *II);
@@ -733,7 +741,8 @@ bool ItaniumMangleContextImpl::shouldMangleCXXName(const NamedDecl *D) {
if (VD->isExternC())
return false;
- // Variables at global scope with non-internal linkage are not mangled.
+ // Variables at global scope are not mangled unless they have internal
+ // linkage or are specializations or are attached to a named module.
const DeclContext *DC = getEffectiveDeclContext(D);
// Check for extern variable declared locally.
if (DC->isFunctionOrMethod() && D->hasLinkage())
@@ -741,7 +750,8 @@ bool ItaniumMangleContextImpl::shouldMangleCXXName(const NamedDecl *D) {
DC = getEffectiveParentContext(DC);
if (DC->isTranslationUnit() && D->getFormalLinkage() != InternalLinkage &&
!CXXNameMangler::shouldHaveAbiTags(*this, VD) &&
- !isa<VarTemplateSpecializationDecl>(VD))
+ !isa<VarTemplateSpecializationDecl>(VD) &&
+ !VD->getOwningModuleForLinkage())
return false;
}
@@ -1016,14 +1026,6 @@ void CXXNameMangler::mangleNameWithAbiTags(GlobalDecl GD,
return;
}
- // Do not mangle the owning module for an external linkage declaration.
- // This enables backwards-compatibility with non-modular code, and is
- // a valid choice since conflicts are not permitted by C++ Modules TS
- // [basic.def.odr]/6.2.
- if (!ND->hasExternalFormalLinkage())
- if (Module *M = ND->getOwningModuleForLinkage())
- mangleModuleName(M);
-
// Closures can require a nested-name mangling even if they're semantically
// in the global namespace.
if (const NamedDecl *PrefixND = getClosurePrefix(ND)) {
@@ -1035,38 +1037,35 @@ void CXXNameMangler::mangleNameWithAbiTags(GlobalDecl GD,
// Check if we have a template.
const TemplateArgumentList *TemplateArgs = nullptr;
if (GlobalDecl TD = isTemplate(GD, TemplateArgs)) {
- mangleUnscopedTemplateName(TD, AdditionalAbiTags);
+ mangleUnscopedTemplateName(TD, DC, AdditionalAbiTags);
mangleTemplateArgs(asTemplateName(TD), *TemplateArgs);
return;
}
- mangleUnscopedName(GD, AdditionalAbiTags);
+ mangleUnscopedName(GD, DC, AdditionalAbiTags);
return;
}
mangleNestedName(GD, DC, AdditionalAbiTags);
}
-void CXXNameMangler::mangleModuleName(const Module *M) {
- // Implement the C++ Modules TS name mangling proposal; see
- // https://gcc.gnu.org/wiki/cxx-modules?action=AttachFile
- //
- // <module-name> ::= W <unscoped-name>+ E
- // ::= W <module-subst> <unscoped-name>* E
- Out << 'W';
- mangleModuleNamePrefix(M->Name);
- Out << 'E';
+void CXXNameMangler::mangleModuleName(const NamedDecl *ND) {
+ if (ND->isExternallyVisible())
+ if (Module *M = ND->getOwningModuleForLinkage())
+ mangleModuleNamePrefix(M->getPrimaryModuleInterfaceName());
}
-void CXXNameMangler::mangleModuleNamePrefix(StringRef Name) {
- // <module-subst> ::= _ <seq-id> # 0 < seq-id < 10
- // ::= W <seq-id - 10> _ # otherwise
+// <module-name> ::= <module-subname>
+// ::= <module-name> <module-subname>
+// ::= <substitution>
+// <module-subname> ::= W <source-name>
+// ::= W P <source-name>
+void CXXNameMangler::mangleModuleNamePrefix(StringRef Name, bool IsPartition) {
+ // <substitution> ::= S <seq-id> _
auto It = ModuleSubstitutions.find(Name);
if (It != ModuleSubstitutions.end()) {
- if (It->second < 10)
- Out << '_' << static_cast<char>('0' + It->second);
- else
- Out << 'W' << (It->second - 10) << '_';
+ Out << 'S';
+ mangleSeqID(It->second);
return;
}
@@ -1075,11 +1074,16 @@ void CXXNameMangler::mangleModuleNamePrefix(StringRef Name) {
auto Parts = Name.rsplit('.');
if (Parts.second.empty())
Parts.second = Parts.first;
- else
- mangleModuleNamePrefix(Parts.first);
+ else {
+ mangleModuleNamePrefix(Parts.first, IsPartition);
+ IsPartition = false;
+ }
+ Out << 'W';
+ if (IsPartition)
+ Out << 'P';
Out << Parts.second.size() << Parts.second;
- ModuleSubstitutions.insert({Name, ModuleSubstitutions.size()});
+ ModuleSubstitutions.insert({Name, SeqID++});
}
void CXXNameMangler::mangleTemplateName(const TemplateDecl *TD,
@@ -1088,27 +1092,27 @@ void CXXNameMangler::mangleTemplateName(const TemplateDecl *TD,
const DeclContext *DC = Context.getEffectiveDeclContext(TD);
if (DC->isTranslationUnit() || isStdNamespace(DC)) {
- mangleUnscopedTemplateName(TD, nullptr);
+ mangleUnscopedTemplateName(TD, DC, nullptr);
mangleTemplateArgs(asTemplateName(TD), TemplateArgs, NumTemplateArgs);
} else {
mangleNestedName(TD, TemplateArgs, NumTemplateArgs);
}
}
-void CXXNameMangler::mangleUnscopedName(GlobalDecl GD,
+void CXXNameMangler::mangleUnscopedName(GlobalDecl GD, const DeclContext *DC,
const AbiTagList *AdditionalAbiTags) {
- const NamedDecl *ND = cast<NamedDecl>(GD.getDecl());
// <unscoped-name> ::= <unqualified-name>
// ::= St <unqualified-name> # ::std::
- if (isStdNamespace(Context.getEffectiveDeclContext(ND)))
+ assert(!isa<LinkageSpecDecl>(DC) && "unskipped LinkageSpecDecl");
+ if (isStdNamespace(DC))
Out << "St";
- mangleUnqualifiedName(GD, AdditionalAbiTags);
+ mangleUnqualifiedName(GD, DC, AdditionalAbiTags);
}
void CXXNameMangler::mangleUnscopedTemplateName(
- GlobalDecl GD, const AbiTagList *AdditionalAbiTags) {
+ GlobalDecl GD, const DeclContext *DC, const AbiTagList *AdditionalAbiTags) {
const TemplateDecl *ND = cast<TemplateDecl>(GD.getDecl());
// <unscoped-template-name> ::= <unscoped-name>
// ::= <substitution>
@@ -1121,9 +1125,10 @@ void CXXNameMangler::mangleUnscopedTemplateName(
"template template param cannot have abi tags");
mangleTemplateParameter(TTP->getDepth(), TTP->getIndex());
} else if (isa<BuiltinTemplateDecl>(ND) || isa<ConceptDecl>(ND)) {
- mangleUnscopedName(GD, AdditionalAbiTags);
+ mangleUnscopedName(GD, DC, AdditionalAbiTags);
} else {
- mangleUnscopedName(GD.getWithDecl(ND->getTemplatedDecl()), AdditionalAbiTags);
+ mangleUnscopedName(GD.getWithDecl(ND->getTemplatedDecl()), DC,
+ AdditionalAbiTags);
}
addSubstitution(ND);
@@ -1399,15 +1404,19 @@ void CXXNameMangler::mangleUnresolvedName(
mangleTemplateArgs(TemplateName(), TemplateArgs, NumTemplateArgs);
}
-void CXXNameMangler::mangleUnqualifiedName(GlobalDecl GD,
- DeclarationName Name,
- unsigned KnownArity,
- const AbiTagList *AdditionalAbiTags) {
+void CXXNameMangler::mangleUnqualifiedName(
+ GlobalDecl GD, DeclarationName Name, const DeclContext *DC,
+ unsigned KnownArity, const AbiTagList *AdditionalAbiTags) {
const NamedDecl *ND = cast_or_null<NamedDecl>(GD.getDecl());
- unsigned Arity = KnownArity;
- // <unqualified-name> ::= <operator-name>
+ // <unqualified-name> ::= [<module-name>] <operator-name>
// ::= <ctor-dtor-name>
- // ::= <source-name>
+ // ::= [<module-name>] <source-name>
+ // ::= [<module-name>] DC <source-name>* E
+
+ if (ND && DC && DC->isFileContext())
+ mangleModuleName(ND);
+
+ unsigned Arity = KnownArity;
switch (Name.getNameKind()) {
case DeclarationName::Identifier: {
const IdentifierInfo *II = Name.getAsIdentifierInfo();
@@ -1418,8 +1427,6 @@ void CXXNameMangler::mangleUnqualifiedName(GlobalDecl GD,
//
// <unqualified-name> ::= DC <source-name>* E
//
- // These can never be referenced across translation units, so we do
- // not need a cross-vendor mangling for anything other than demanglers.
// Proposed on cxx-abi-dev on 2016-08-12
Out << "DC";
for (auto *BD : DD->bindings())
@@ -1569,7 +1576,8 @@ void CXXNameMangler::mangleUnqualifiedName(GlobalDecl GD,
}
if (TD->isExternallyVisible()) {
- unsigned UnnamedMangle = getASTContext().getManglingNumber(TD);
+ unsigned UnnamedMangle =
+ getASTContext().getManglingNumber(TD, Context.isAux());
Out << "Ut";
if (UnnamedMangle > 1)
Out << UnnamedMangle - 2;
@@ -1716,7 +1724,7 @@ void CXXNameMangler::mangleNestedName(GlobalDecl GD,
mangleTemplateArgs(asTemplateName(TD), *TemplateArgs);
} else {
manglePrefix(DC, NoFunction);
- mangleUnqualifiedName(GD, AdditionalAbiTags);
+ mangleUnqualifiedName(GD, DC, AdditionalAbiTags);
}
Out << 'E';
@@ -1746,7 +1754,7 @@ void CXXNameMangler::mangleNestedNameWithClosurePrefix(
Out << 'N';
mangleClosurePrefix(PrefixND);
- mangleUnqualifiedName(GD, AdditionalAbiTags);
+ mangleUnqualifiedName(GD, nullptr, AdditionalAbiTags);
Out << 'E';
}
@@ -1824,7 +1832,7 @@ void CXXNameMangler::mangleLocalName(GlobalDecl GD,
// Mangle the name relative to the closest enclosing function.
// equality ok because RD derived from ND above
if (D == RD) {
- mangleUnqualifiedName(RD, AdditionalAbiTags);
+ mangleUnqualifiedName(RD, DC, AdditionalAbiTags);
} else if (const BlockDecl *BD = dyn_cast<BlockDecl>(D)) {
if (const NamedDecl *PrefixND = getClosurePrefix(BD))
mangleClosurePrefix(PrefixND, true /*NoFunction*/);
@@ -1855,7 +1863,7 @@ void CXXNameMangler::mangleLocalName(GlobalDecl GD,
assert(!AdditionalAbiTags && "Block cannot have additional abi tags");
mangleUnqualifiedBlock(BD);
} else {
- mangleUnqualifiedName(GD, AdditionalAbiTags);
+ mangleUnqualifiedName(GD, DC, AdditionalAbiTags);
}
if (const NamedDecl *ND = dyn_cast<NamedDecl>(RD ? RD : D)) {
@@ -2041,12 +2049,21 @@ void CXXNameMangler::manglePrefix(NestedNameSpecifier *qualifier) {
return;
case NestedNameSpecifier::Identifier:
+ // Clang 14 and before did not consider this substitutable.
+ bool Clang14Compat = getASTContext().getLangOpts().getClangABICompat() <=
+ LangOptions::ClangABI::Ver14;
+ if (!Clang14Compat && mangleSubstitution(qualifier))
+ return;
+
// Member expressions can have these without prefixes, but that
// should end up in mangleUnresolvedPrefix instead.
assert(qualifier->getPrefix());
manglePrefix(qualifier->getPrefix());
mangleSourceName(qualifier->getAsIdentifier());
+
+ if (!Clang14Compat)
+ addSubstitution(qualifier);
return;
}
@@ -2082,10 +2099,11 @@ void CXXNameMangler::manglePrefix(const DeclContext *DC, bool NoFunction) {
mangleTemplateArgs(asTemplateName(TD), *TemplateArgs);
} else if (const NamedDecl *PrefixND = getClosurePrefix(ND)) {
mangleClosurePrefix(PrefixND, NoFunction);
- mangleUnqualifiedName(ND, nullptr);
+ mangleUnqualifiedName(ND, nullptr, nullptr);
} else {
- manglePrefix(Context.getEffectiveDeclContext(ND), NoFunction);
- mangleUnqualifiedName(ND, nullptr);
+ const DeclContext *DC = Context.getEffectiveDeclContext(ND);
+ manglePrefix(DC, NoFunction);
+ mangleUnqualifiedName(ND, DC, nullptr);
}
addSubstitution(ND);
@@ -2138,11 +2156,13 @@ void CXXNameMangler::mangleTemplatePrefix(GlobalDecl GD,
if (const auto *TTP = dyn_cast<TemplateTemplateParmDecl>(ND)) {
mangleTemplateParameter(TTP->getDepth(), TTP->getIndex());
} else {
- manglePrefix(Context.getEffectiveDeclContext(ND), NoFunction);
+ const DeclContext *DC = Context.getEffectiveDeclContext(ND);
+ manglePrefix(DC, NoFunction);
if (isa<BuiltinTemplateDecl>(ND) || isa<ConceptDecl>(ND))
- mangleUnqualifiedName(GD, nullptr);
+ mangleUnqualifiedName(GD, DC, nullptr);
else
- mangleUnqualifiedName(GD.getWithDecl(ND->getTemplatedDecl()), nullptr);
+ mangleUnqualifiedName(GD.getWithDecl(ND->getTemplatedDecl()), DC,
+ nullptr);
}
addSubstitution(ND);
@@ -2183,8 +2203,9 @@ void CXXNameMangler::mangleClosurePrefix(const NamedDecl *ND, bool NoFunction) {
mangleTemplatePrefix(TD, NoFunction);
mangleTemplateArgs(asTemplateName(TD), *TemplateArgs);
} else {
- manglePrefix(Context.getEffectiveDeclContext(ND), NoFunction);
- mangleUnqualifiedName(ND, nullptr);
+ const auto *DC = Context.getEffectiveDeclContext(ND);
+ manglePrefix(DC, NoFunction);
+ mangleUnqualifiedName(ND, DC, nullptr);
}
Out << 'M';
@@ -2205,9 +2226,7 @@ void CXXNameMangler::mangleType(TemplateName TN) {
switch (TN.getKind()) {
case TemplateName::QualifiedTemplate:
- TD = TN.getAsQualifiedTemplateName()->getTemplateDecl();
- goto HaveDecl;
-
+ case TemplateName::UsingTemplate:
case TemplateName::Template:
TD = TN.getAsTemplateDecl();
goto HaveDecl;
@@ -2286,6 +2305,7 @@ bool CXXNameMangler::mangleUnresolvedTypeOrSimpleId(QualType Ty,
case Type::FunctionNoProto:
case Type::Paren:
case Type::Attributed:
+ case Type::BTFTagAttributed:
case Type::Auto:
case Type::DeducedTemplateSpecialization:
case Type::PackExpansion:
@@ -2383,6 +2403,12 @@ bool CXXNameMangler::mangleUnresolvedTypeOrSimpleId(QualType Ty,
Out << "_SUBSTPACK_";
break;
}
+ case TemplateName::UsingTemplate: {
+ TemplateDecl *TD = TN.getAsTemplateDecl();
+ assert(TD && !isa<TemplateTemplateParmDecl>(TD));
+ mangleSourceNameWithAbiTags(TD);
+ break;
+ }
}
// Note: we don't pass in the template name here. We are mangling the
@@ -3129,6 +3155,8 @@ StringRef CXXNameMangler::getCallingConvQualifierName(CallingConv CC) {
case CC_AAPCS:
case CC_AAPCS_VFP:
case CC_AArch64VectorCall:
+ case CC_AArch64SVEPCS:
+ case CC_AMDGPUKernelCall:
case CC_IntelOclBicc:
case CC_SpirFunction:
case CC_OpenCLKernel:
@@ -5545,6 +5573,47 @@ static QualType getLValueType(ASTContext &Ctx, const APValue &LV) {
return T;
}
+static IdentifierInfo *getUnionInitName(SourceLocation UnionLoc,
+ DiagnosticsEngine &Diags,
+ const FieldDecl *FD) {
+ // According to:
+ // http://itanium-cxx-abi.github.io/cxx-abi/abi.html#mangling.anonymous
+ // For the purposes of mangling, the name of an anonymous union is considered
+ // to be the name of the first named data member found by a pre-order,
+ // depth-first, declaration-order walk of the data members of the anonymous
+ // union.
+
+ if (FD->getIdentifier())
+ return FD->getIdentifier();
+
+ // The only cases where the identifer of a FieldDecl would be blank is if the
+ // field represents an anonymous record type or if it is an unnamed bitfield.
+ // There is no type to descend into in the case of a bitfield, so we can just
+ // return nullptr in that case.
+ if (FD->isBitField())
+ return nullptr;
+ const CXXRecordDecl *RD = FD->getType()->getAsCXXRecordDecl();
+
+ // Consider only the fields in declaration order, searched depth-first. We
+ // don't care about the active member of the union, as all we are doing is
+ // looking for a valid name. We also don't check bases, due to guidance from
+ // the Itanium ABI folks.
+ for (const FieldDecl *RDField : RD->fields()) {
+ if (IdentifierInfo *II = getUnionInitName(UnionLoc, Diags, RDField))
+ return II;
+ }
+
+ // According to the Itanium ABI: If there is no such data member (i.e., if all
+ // of the data members in the union are unnamed), then there is no way for a
+ // program to refer to the anonymous union, and there is therefore no need to
+ // mangle its name. However, we should diagnose this anyway.
+ unsigned DiagID = Diags.getCustomDiagID(
+ DiagnosticsEngine::Error, "cannot mangle this unnamed union NTTP yet");
+ Diags.Report(UnionLoc, DiagID);
+
+ return nullptr;
+}
+
void CXXNameMangler::mangleValueInTemplateArg(QualType T, const APValue &V,
bool TopLevel,
bool NeedExactType) {
@@ -5577,8 +5646,7 @@ void CXXNameMangler::mangleValueInTemplateArg(QualType T, const APValue &V,
assert(RD && "unexpected type for record value");
// Drop trailing zero-initialized elements.
- llvm::SmallVector<const FieldDecl *, 16> Fields(RD->field_begin(),
- RD->field_end());
+ llvm::SmallVector<const FieldDecl *, 16> Fields(RD->fields());
while (
!Fields.empty() &&
(Fields.back()->isUnnamedBitfield() ||
@@ -5628,7 +5696,10 @@ void CXXNameMangler::mangleValueInTemplateArg(QualType T, const APValue &V,
mangleType(T);
if (!isZeroInitialized(T, V)) {
Out << "di";
- mangleSourceName(FD->getIdentifier());
+ IdentifierInfo *II = (getUnionInitName(
+ T->getAsCXXRecordDecl()->getLocation(), Context.getDiags(), FD));
+ if (II)
+ mangleSourceName(II);
mangleValueInTemplateArg(FD->getType(), V.getUnionValue(), false);
}
Out << 'E';
@@ -5961,6 +6032,14 @@ bool CXXNameMangler::mangleSubstitution(const NamedDecl *ND) {
return mangleSubstitution(reinterpret_cast<uintptr_t>(ND));
}
+bool CXXNameMangler::mangleSubstitution(NestedNameSpecifier *NNS) {
+ assert(NNS->getKind() == NestedNameSpecifier::Identifier &&
+ "mangleSubstitution(NestedNameSpecifier *) is only used for "
+ "identifier nested name specifiers.");
+ NNS = Context.getASTContext().getCanonicalNestedNameSpecifier(NNS);
+ return mangleSubstitution(reinterpret_cast<uintptr_t>(NNS));
+}
+
/// Determine whether the given type has any qualifiers that are relevant for
/// substitutions.
static bool hasMangledSubstitutionQualifiers(QualType T) {
@@ -6026,6 +6105,9 @@ bool CXXNameMangler::isSpecializedAs(QualType S, llvm::StringRef Name,
if (TemplateArgs[0].getAsType() != A)
return false;
+ if (SD->getSpecializedTemplate()->getOwningModuleForLinkage())
+ return false;
+
return true;
}
@@ -6057,6 +6139,9 @@ bool CXXNameMangler::isStdCharSpecialization(
!isSpecializedAs(TemplateArgs[2].getAsType(), "allocator", A))
return false;
+ if (SD->getSpecializedTemplate()->getOwningModuleForLinkage())
+ return false;
+
return true;
}
@@ -6074,6 +6159,9 @@ bool CXXNameMangler::mangleStandardSubstitution(const NamedDecl *ND) {
if (!isStdNamespace(Context.getEffectiveDeclContext(TD)))
return false;
+ if (TD->getOwningModuleForLinkage())
+ return false;
+
// <substitution> ::= Sa # ::std::allocator
if (TD->getIdentifier()->isStr("allocator")) {
Out << "Sa";
@@ -6093,6 +6181,9 @@ bool CXXNameMangler::mangleStandardSubstitution(const NamedDecl *ND) {
if (!isStdNamespace(Context.getEffectiveDeclContext(SD)))
return false;
+ if (SD->getSpecializedTemplate()->getOwningModuleForLinkage())
+ return false;
+
// <substitution> ::= Ss # ::std::basic_string<char,
// ::std::char_traits<char>,
// ::std::allocator<char> >
@@ -6447,17 +6538,36 @@ void ItaniumMangleContextImpl::mangleLambdaSig(const CXXRecordDecl *Lambda,
Mangler.mangleLambdaSig(Lambda);
}
+void ItaniumMangleContextImpl::mangleModuleInitializer(const Module *M,
+ raw_ostream &Out) {
+ // <special-name> ::= GI <module-name> # module initializer function
+ CXXNameMangler Mangler(*this, Out);
+ Mangler.getStream() << "_ZGI";
+ Mangler.mangleModuleNamePrefix(M->getPrimaryModuleInterfaceName());
+ if (M->isModulePartition()) {
+ // The partition needs including, as partitions can have them too.
+ auto Partition = M->Name.find(':');
+ Mangler.mangleModuleNamePrefix(
+ StringRef(&M->Name[Partition + 1], M->Name.size() - Partition - 1),
+ /*IsPartition*/ true);
+ }
+}
+
ItaniumMangleContext *ItaniumMangleContext::create(ASTContext &Context,
- DiagnosticsEngine &Diags) {
+ DiagnosticsEngine &Diags,
+ bool IsAux) {
return new ItaniumMangleContextImpl(
Context, Diags,
[](ASTContext &, const NamedDecl *) -> llvm::Optional<unsigned> {
return llvm::None;
- });
+ },
+ IsAux);
}
ItaniumMangleContext *
ItaniumMangleContext::create(ASTContext &Context, DiagnosticsEngine &Diags,
- DiscriminatorOverrideTy DiscriminatorOverride) {
- return new ItaniumMangleContextImpl(Context, Diags, DiscriminatorOverride);
+ DiscriminatorOverrideTy DiscriminatorOverride,
+ bool IsAux) {
+ return new ItaniumMangleContextImpl(Context, Diags, DiscriminatorOverride,
+ IsAux);
}
diff --git a/contrib/llvm-project/clang/lib/AST/JSONNodeDumper.cpp b/contrib/llvm-project/clang/lib/AST/JSONNodeDumper.cpp
index ae585def4d07..87e4255c2b93 100644
--- a/contrib/llvm-project/clang/lib/AST/JSONNodeDumper.cpp
+++ b/contrib/llvm-project/clang/lib/AST/JSONNodeDumper.cpp
@@ -1692,3 +1692,18 @@ void JSONNodeDumper::visitVerbatimLineComment(
const comments::VerbatimLineComment *C, const comments::FullComment *) {
JOS.attribute("text", C->getText());
}
+
+llvm::json::Object JSONNodeDumper::createFPOptions(FPOptionsOverride FPO) {
+ llvm::json::Object Ret;
+#define OPTION(NAME, TYPE, WIDTH, PREVIOUS) \
+ if (FPO.has##NAME##Override()) \
+ Ret.try_emplace(#NAME, static_cast<unsigned>(FPO.get##NAME##Override()));
+#include "clang/Basic/FPOptions.def"
+ return Ret;
+}
+
+void JSONNodeDumper::VisitCompoundStmt(const CompoundStmt *S) {
+ VisitStmt(S);
+ if (S->hasStoredFPFeatures())
+ JOS.attribute("fpoptions", createFPOptions(S->getStoredFPFeatures()));
+}
diff --git a/contrib/llvm-project/clang/lib/AST/Mangle.cpp b/contrib/llvm-project/clang/lib/AST/Mangle.cpp
index 984da9909ce2..7ea569c63d9e 100644
--- a/contrib/llvm-project/clang/lib/AST/Mangle.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Mangle.cpp
@@ -70,11 +70,9 @@ static CCMangling getCallingConvMangling(const ASTContext &Context,
// On wasm, the argc/argv form of "main" is renamed so that the startup code
// can call it with the correct function signature.
- // On Emscripten, users may be exporting "main" and expecting to call it
- // themselves, so we can't mangle it.
- if (Triple.isWasm() && !Triple.isOSEmscripten())
+ if (Triple.isWasm())
if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(ND))
- if (FD->isMain() && FD->hasPrototype() && FD->param_size() == 2)
+ if (FD->isMain() && FD->getNumParams() == 2)
return CCM_WasmMainArgcArgv;
if (!Triple.isOSWindows() || !Triple.isX86())
diff --git a/contrib/llvm-project/clang/lib/AST/MicrosoftCXXABI.cpp b/contrib/llvm-project/clang/lib/AST/MicrosoftCXXABI.cpp
index b7dc0e62e66a..7f4a7b2b9381 100644
--- a/contrib/llvm-project/clang/lib/AST/MicrosoftCXXABI.cpp
+++ b/contrib/llvm-project/clang/lib/AST/MicrosoftCXXABI.cpp
@@ -69,6 +69,7 @@ class MSHIPNumberingContext : public MicrosoftNumberingContext {
std::unique_ptr<MangleNumberingContext> DeviceCtx;
public:
+ using MicrosoftNumberingContext::getManglingNumber;
MSHIPNumberingContext(MangleContext *DeviceMangler) {
DeviceCtx = createItaniumNumberingContext(DeviceMangler);
}
@@ -76,6 +77,20 @@ public:
unsigned getDeviceManglingNumber(const CXXMethodDecl *CallOperator) override {
return DeviceCtx->getManglingNumber(CallOperator);
}
+
+ unsigned getManglingNumber(const TagDecl *TD,
+ unsigned MSLocalManglingNumber) override {
+ unsigned DeviceN = DeviceCtx->getManglingNumber(TD, MSLocalManglingNumber);
+ unsigned HostN =
+ MicrosoftNumberingContext::getManglingNumber(TD, MSLocalManglingNumber);
+ if (DeviceN > 0xFFFF || HostN > 0xFFFF) {
+ DiagnosticsEngine &Diags = TD->getASTContext().getDiagnostics();
+ unsigned DiagID = Diags.getCustomDiagID(
+ DiagnosticsEngine::Error, "Mangling number exceeds limit (65535)");
+ Diags.Report(TD->getLocation(), DiagID);
+ }
+ return (DeviceN << 16) | HostN;
+ }
};
class MSSYCLNumberingContext : public MicrosoftNumberingContext {
diff --git a/contrib/llvm-project/clang/lib/AST/MicrosoftMangle.cpp b/contrib/llvm-project/clang/lib/AST/MicrosoftMangle.cpp
index 8802b6e500a6..09075e60142a 100644
--- a/contrib/llvm-project/clang/lib/AST/MicrosoftMangle.cpp
+++ b/contrib/llvm-project/clang/lib/AST/MicrosoftMangle.cpp
@@ -147,7 +147,8 @@ class MicrosoftMangleContextImpl : public MicrosoftMangleContext {
SmallString<16> AnonymousNamespaceHash;
public:
- MicrosoftMangleContextImpl(ASTContext &Context, DiagnosticsEngine &Diags);
+ MicrosoftMangleContextImpl(ASTContext &Context, DiagnosticsEngine &Diags,
+ bool IsAux = false);
bool shouldMangleCXXName(const NamedDecl *D) override;
bool shouldMangleStringLiteral(const StringLiteral *SL) override;
void mangleCXXName(GlobalDecl GD, raw_ostream &Out) override;
@@ -221,7 +222,7 @@ public:
// Use the canonical number for externally visible decls.
if (ND->isExternallyVisible()) {
- disc = getASTContext().getManglingNumber(ND);
+ disc = getASTContext().getManglingNumber(ND, isAux());
return true;
}
@@ -459,8 +460,9 @@ private:
}
MicrosoftMangleContextImpl::MicrosoftMangleContextImpl(ASTContext &Context,
- DiagnosticsEngine &Diags)
- : MicrosoftMangleContext(Context, Diags) {
+ DiagnosticsEngine &Diags,
+ bool IsAux)
+ : MicrosoftMangleContext(Context, Diags, IsAux) {
// To mangle anonymous namespaces, hash the path to the main source file. The
// path should be whatever (probably relative) path was passed on the command
// line. The goal is for the compiler to produce the same output regardless of
@@ -806,8 +808,8 @@ void MicrosoftCXXNameMangler::mangleNumber(llvm::APSInt Number) {
// to convert every integer to signed 64 bit before mangling (including
// unsigned 64 bit values). Do the same, but preserve bits beyond the bottom
// 64.
- llvm::APInt Value =
- Number.isSigned() ? Number.sextOrSelf(64) : Number.zextOrSelf(64);
+ unsigned Width = std::max(Number.getBitWidth(), 64U);
+ llvm::APInt Value = Number.extend(Width);
// <non-negative integer> ::= A@ # when Number == 0
// ::= <decimal digit> # when 1 <= Number <= 10
@@ -1943,7 +1945,7 @@ void MicrosoftCXXNameMangler::mangleObjCKindOfType(const ObjCObjectType *T,
Extra.mangleSourceName("KindOf");
Extra.mangleType(QualType(T, 0)
.stripObjCKindOfType(getASTContext())
- ->getAs<ObjCObjectType>(),
+ ->castAs<ObjCObjectType>(),
Quals, Range);
mangleArtificialTagType(TTK_Struct, TemplateMangling, {"__ObjC"});
@@ -2459,7 +2461,12 @@ void MicrosoftCXXNameMangler::mangleType(const BuiltinType *T, Qualifiers,
break;
case BuiltinType::Half:
- mangleArtificialTagType(TTK_Struct, "_Half", {"__clang"});
+ if (!getASTContext().getLangOpts().HLSL)
+ mangleArtificialTagType(TTK_Struct, "_Half", {"__clang"});
+ else if (getASTContext().getLangOpts().NativeHalfType)
+ Out << "$f16@";
+ else
+ Out << "$halff@";
break;
#define SVE_TYPE(Name, Id, SingletonId) \
@@ -3944,7 +3951,8 @@ void MicrosoftMangleContextImpl::mangleStringLiteral(const StringLiteral *SL,
Mangler.getStream() << '@';
}
-MicrosoftMangleContext *
-MicrosoftMangleContext::create(ASTContext &Context, DiagnosticsEngine &Diags) {
- return new MicrosoftMangleContextImpl(Context, Diags);
+MicrosoftMangleContext *MicrosoftMangleContext::create(ASTContext &Context,
+ DiagnosticsEngine &Diags,
+ bool IsAux) {
+ return new MicrosoftMangleContextImpl(Context, Diags, IsAux);
}
diff --git a/contrib/llvm-project/clang/lib/AST/ODRHash.cpp b/contrib/llvm-project/clang/lib/AST/ODRHash.cpp
index 735bcff8f113..04cbb09356d7 100644
--- a/contrib/llvm-project/clang/lib/AST/ODRHash.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ODRHash.cpp
@@ -150,6 +150,7 @@ void ODRHash::AddTemplateName(TemplateName Name) {
case TemplateName::DependentTemplate:
case TemplateName::SubstTemplateTemplateParm:
case TemplateName::SubstTemplateTemplateParmPack:
+ case TemplateName::UsingTemplate:
break;
}
}
diff --git a/contrib/llvm-project/clang/lib/AST/OSLog.cpp b/contrib/llvm-project/clang/lib/AST/OSLog.cpp
index 4cc5def0651f..40fa8c3802c3 100644
--- a/contrib/llvm-project/clang/lib/AST/OSLog.cpp
+++ b/contrib/llvm-project/clang/lib/AST/OSLog.cpp
@@ -201,7 +201,7 @@ bool clang::analyze_os_log::computeOSLogBufferLayout(
}
const StringLiteral *Lit = cast<StringLiteral>(StringArg->IgnoreParenCasts());
- assert(Lit && (Lit->isAscii() || Lit->isUTF8()));
+ assert(Lit && (Lit->isOrdinary() || Lit->isUTF8()));
StringRef Data = Lit->getString();
OSLogFormatStringHandler H(VarArgs);
ParsePrintfString(H, Data.begin(), Data.end(), Ctx.getLangOpts(),
diff --git a/contrib/llvm-project/clang/lib/AST/OpenMPClause.cpp b/contrib/llvm-project/clang/lib/AST/OpenMPClause.cpp
index 1bd049b88005..dc2d90e366bc 100644
--- a/contrib/llvm-project/clang/lib/AST/OpenMPClause.cpp
+++ b/contrib/llvm-project/clang/lib/AST/OpenMPClause.cpp
@@ -146,6 +146,7 @@ const OMPClauseWithPreInit *OMPClauseWithPreInit::get(const OMPClause *C) {
case OMPC_use_device_ptr:
case OMPC_use_device_addr:
case OMPC_is_device_ptr:
+ case OMPC_has_device_addr:
case OMPC_unified_address:
case OMPC_unified_shared_memory:
case OMPC_reverse_offload:
@@ -244,6 +245,7 @@ const OMPClauseWithPostUpdate *OMPClauseWithPostUpdate::get(const OMPClause *C)
case OMPC_use_device_ptr:
case OMPC_use_device_addr:
case OMPC_is_device_ptr:
+ case OMPC_has_device_addr:
case OMPC_unified_address:
case OMPC_unified_shared_memory:
case OMPC_reverse_offload:
@@ -1038,19 +1040,19 @@ OMPDepobjClause *OMPDepobjClause::CreateEmpty(const ASTContext &C) {
OMPDependClause *
OMPDependClause::Create(const ASTContext &C, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation EndLoc,
- Expr *DepModifier, OpenMPDependClauseKind DepKind,
- SourceLocation DepLoc, SourceLocation ColonLoc,
+ DependDataTy Data, Expr *DepModifier,
ArrayRef<Expr *> VL, unsigned NumLoops) {
void *Mem = C.Allocate(
totalSizeToAlloc<Expr *>(VL.size() + /*depend-modifier*/ 1 + NumLoops),
alignof(OMPDependClause));
OMPDependClause *Clause = new (Mem)
OMPDependClause(StartLoc, LParenLoc, EndLoc, VL.size(), NumLoops);
- Clause->setVarRefs(VL);
- Clause->setDependencyKind(DepKind);
- Clause->setDependencyLoc(DepLoc);
- Clause->setColonLoc(ColonLoc);
+ Clause->setDependencyKind(Data.DepKind);
+ Clause->setDependencyLoc(Data.DepLoc);
+ Clause->setColonLoc(Data.ColonLoc);
+ Clause->setOmpAllMemoryLoc(Data.OmpAllMemoryLoc);
Clause->setModifier(DepModifier);
+ Clause->setVarRefs(VL);
for (unsigned I = 0 ; I < NumLoops; ++I)
Clause->setLoopData(I, nullptr);
return Clause;
@@ -1432,6 +1434,53 @@ OMPIsDevicePtrClause::CreateEmpty(const ASTContext &C,
return new (Mem) OMPIsDevicePtrClause(Sizes);
}
+OMPHasDeviceAddrClause *
+OMPHasDeviceAddrClause::Create(const ASTContext &C, const OMPVarListLocTy &Locs,
+ ArrayRef<Expr *> Vars,
+ ArrayRef<ValueDecl *> Declarations,
+ MappableExprComponentListsRef ComponentLists) {
+ OMPMappableExprListSizeTy Sizes;
+ Sizes.NumVars = Vars.size();
+ Sizes.NumUniqueDeclarations = getUniqueDeclarationsTotalNumber(Declarations);
+ Sizes.NumComponentLists = ComponentLists.size();
+ Sizes.NumComponents = getComponentsTotalNumber(ComponentLists);
+
+ // We need to allocate:
+ // NumVars x Expr* - we have an original list expression for each clause list
+ // entry.
+ // NumUniqueDeclarations x ValueDecl* - unique base declarations associated
+ // with each component list.
+ // (NumUniqueDeclarations + NumComponentLists) x unsigned - we specify the
+ // number of lists for each unique declaration and the size of each component
+ // list.
+ // NumComponents x MappableComponent - the total of all the components in all
+ // the lists.
+ void *Mem = C.Allocate(
+ totalSizeToAlloc<Expr *, ValueDecl *, unsigned,
+ OMPClauseMappableExprCommon::MappableComponent>(
+ Sizes.NumVars, Sizes.NumUniqueDeclarations,
+ Sizes.NumUniqueDeclarations + Sizes.NumComponentLists,
+ Sizes.NumComponents));
+
+ auto *Clause = new (Mem) OMPHasDeviceAddrClause(Locs, Sizes);
+
+ Clause->setVarRefs(Vars);
+ Clause->setClauseInfo(Declarations, ComponentLists);
+ return Clause;
+}
+
+OMPHasDeviceAddrClause *
+OMPHasDeviceAddrClause::CreateEmpty(const ASTContext &C,
+ const OMPMappableExprListSizeTy &Sizes) {
+ void *Mem = C.Allocate(
+ totalSizeToAlloc<Expr *, ValueDecl *, unsigned,
+ OMPClauseMappableExprCommon::MappableComponent>(
+ Sizes.NumVars, Sizes.NumUniqueDeclarations,
+ Sizes.NumUniqueDeclarations + Sizes.NumComponentLists,
+ Sizes.NumComponents));
+ return new (Mem) OMPHasDeviceAddrClause(Sizes);
+}
+
OMPNontemporalClause *OMPNontemporalClause::Create(const ASTContext &C,
SourceLocation StartLoc,
SourceLocation LParenLoc,
@@ -2134,11 +2183,23 @@ void OMPClausePrinter::VisitOMPDependClause(OMPDependClause *Node) {
DepModifier->printPretty(OS, nullptr, Policy);
OS << ", ";
}
- OS << getOpenMPSimpleClauseTypeName(Node->getClauseKind(),
- Node->getDependencyKind());
- if (!Node->varlist_empty()) {
+ OpenMPDependClauseKind DepKind = Node->getDependencyKind();
+ OpenMPDependClauseKind PrintKind = DepKind;
+ bool IsOmpAllMemory = false;
+ if (PrintKind == OMPC_DEPEND_outallmemory) {
+ PrintKind = OMPC_DEPEND_out;
+ IsOmpAllMemory = true;
+ } else if (PrintKind == OMPC_DEPEND_inoutallmemory) {
+ PrintKind = OMPC_DEPEND_inout;
+ IsOmpAllMemory = true;
+ }
+ OS << getOpenMPSimpleClauseTypeName(Node->getClauseKind(), PrintKind);
+ if (!Node->varlist_empty() || IsOmpAllMemory)
OS << " :";
- VisitOMPClauseList(Node, ' ');
+ VisitOMPClauseList(Node, ' ');
+ if (IsOmpAllMemory) {
+ OS << (Node->varlist_empty() ? " " : ",");
+ OS << "omp_all_memory";
}
OS << ")";
}
@@ -2259,6 +2320,14 @@ void OMPClausePrinter::VisitOMPIsDevicePtrClause(OMPIsDevicePtrClause *Node) {
}
}
+void OMPClausePrinter::VisitOMPHasDeviceAddrClause(OMPHasDeviceAddrClause *Node) {
+ if (!Node->varlist_empty()) {
+ OS << "has_device_addr";
+ VisitOMPClauseList(Node, '(');
+ OS << ")";
+ }
+}
+
void OMPClausePrinter::VisitOMPNontemporalClause(OMPNontemporalClause *Node) {
if (!Node->varlist_empty()) {
OS << "nontemporal";
diff --git a/contrib/llvm-project/clang/lib/AST/ParentMapContext.cpp b/contrib/llvm-project/clang/lib/AST/ParentMapContext.cpp
index d216be5b59e8..e0d4700e4b10 100644
--- a/contrib/llvm-project/clang/lib/AST/ParentMapContext.cpp
+++ b/contrib/llvm-project/clang/lib/AST/ParentMapContext.cpp
@@ -330,6 +330,9 @@ template <>
DynTypedNode createDynTypedNode(const NestedNameSpecifierLoc &Node) {
return DynTypedNode::create(Node);
}
+template <> DynTypedNode createDynTypedNode(const ObjCProtocolLoc &Node) {
+ return DynTypedNode::create(Node);
+}
/// @}
/// A \c RecursiveASTVisitor that builds a map from nodes to their
@@ -398,11 +401,14 @@ private:
}
}
+ template <typename T> static bool isNull(T Node) { return !Node; }
+ static bool isNull(ObjCProtocolLoc Node) { return false; }
+
template <typename T, typename MapNodeTy, typename BaseTraverseFn,
typename MapTy>
bool TraverseNode(T Node, MapNodeTy MapNode, BaseTraverseFn BaseTraverse,
MapTy *Parents) {
- if (!Node)
+ if (isNull(Node))
return true;
addParent(MapNode, Parents);
ParentStack.push_back(createDynTypedNode(Node));
@@ -433,6 +439,12 @@ private:
AttrNode, AttrNode, [&] { return VisitorBase::TraverseAttr(AttrNode); },
&Map.PointerParents);
}
+ bool TraverseObjCProtocolLoc(ObjCProtocolLoc ProtocolLocNode) {
+ return TraverseNode(
+ ProtocolLocNode, DynTypedNode::create(ProtocolLocNode),
+ [&] { return VisitorBase::TraverseObjCProtocolLoc(ProtocolLocNode); },
+ &Map.OtherParents);
+ }
// Using generic TraverseNode for Stmt would prevent data-recursion.
bool dataTraverseStmtPre(Stmt *StmtNode) {
diff --git a/contrib/llvm-project/clang/lib/AST/QualTypeNames.cpp b/contrib/llvm-project/clang/lib/AST/QualTypeNames.cpp
index 561757b1ba64..26aaa96a1dc6 100644
--- a/contrib/llvm-project/clang/lib/AST/QualTypeNames.cpp
+++ b/contrib/llvm-project/clang/lib/AST/QualTypeNames.cpp
@@ -80,8 +80,12 @@ static bool getFullyQualifiedTemplateName(const ASTContext &Ctx,
Ctx, ArgTDecl, true, WithGlobalNsPrefix);
}
if (NNS) {
- TName = Ctx.getQualifiedTemplateName(NNS,
- /*TemplateKeyword=*/false, ArgTDecl);
+ TemplateName UnderlyingTN(ArgTDecl);
+ if (UsingShadowDecl *USD = TName.getAsUsingShadowDecl())
+ UnderlyingTN = TemplateName(USD);
+ TName =
+ Ctx.getQualifiedTemplateName(NNS,
+ /*TemplateKeyword=*/false, UnderlyingTN);
Changed = true;
}
return Changed;
diff --git a/contrib/llvm-project/clang/lib/AST/Randstruct.cpp b/contrib/llvm-project/clang/lib/AST/Randstruct.cpp
new file mode 100644
index 000000000000..99c665f420e6
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/AST/Randstruct.cpp
@@ -0,0 +1,231 @@
+//===--- Randstruct.cpp ---------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the implementation for Clang's structure field layout
+// randomization.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Randstruct.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/ASTDiagnostic.h"
+#include "clang/AST/Attr.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h" // For StaticAssertDecl
+#include "clang/Basic/Diagnostic.h"
+#include "llvm/ADT/SmallVector.h"
+
+#include <algorithm>
+#include <random>
+#include <set>
+#include <sstream>
+#include <string>
+
+using clang::ASTContext;
+using clang::FieldDecl;
+using llvm::SmallVector;
+
+namespace {
+
+// FIXME: Replace this with some discovery once that mechanism exists.
+enum { CACHE_LINE = 64 };
+
+// The Bucket class holds the struct fields we're trying to fill to a
+// cache-line.
+class Bucket {
+ SmallVector<FieldDecl *, 64> Fields;
+ int Size = 0;
+
+public:
+ virtual ~Bucket() = default;
+
+ SmallVector<FieldDecl *, 64> &fields() { return Fields; }
+ void addField(FieldDecl *Field, int FieldSize);
+ virtual bool canFit(int FieldSize) const {
+ return Size + FieldSize <= CACHE_LINE;
+ }
+ virtual bool isBitfieldRun() const { return false; }
+ bool full() const { return Size >= CACHE_LINE; }
+};
+
+void Bucket::addField(FieldDecl *Field, int FieldSize) {
+ Size += FieldSize;
+ Fields.push_back(Field);
+}
+
+struct BitfieldRunBucket : public Bucket {
+ bool canFit(int FieldSize) const override { return true; }
+ bool isBitfieldRun() const override { return true; }
+};
+
+void randomizeStructureLayoutImpl(const ASTContext &Context,
+ llvm::SmallVectorImpl<FieldDecl *> &FieldsOut,
+ std::mt19937 &RNG) {
+ // All of the Buckets produced by best-effort cache-line algorithm.
+ SmallVector<std::unique_ptr<Bucket>, 16> Buckets;
+
+ // The current bucket of fields that we are trying to fill to a cache-line.
+ std::unique_ptr<Bucket> CurrentBucket;
+
+ // The current bucket containing the run of adjacent bitfields to ensure they
+ // remain adjacent.
+ std::unique_ptr<BitfieldRunBucket> CurrentBitfieldRun;
+
+ // Tracks the number of fields that we failed to fit to the current bucket,
+ // and thus still need to be added later.
+ size_t Skipped = 0;
+
+ while (!FieldsOut.empty()) {
+ // If we've Skipped more fields than we have remaining to place, that means
+ // that they can't fit in our current bucket, and we need to start a new
+ // one.
+ if (Skipped >= FieldsOut.size()) {
+ Skipped = 0;
+ Buckets.push_back(std::move(CurrentBucket));
+ }
+
+ // Take the first field that needs to be put in a bucket.
+ auto FieldIter = FieldsOut.begin();
+ FieldDecl *FD = *FieldIter;
+
+ if (FD->isBitField() && !FD->isZeroLengthBitField(Context)) {
+ // Start a bitfield run if this is the first bitfield we have found.
+ if (!CurrentBitfieldRun)
+ CurrentBitfieldRun = std::make_unique<BitfieldRunBucket>();
+
+ // We've placed the field, and can remove it from the "awaiting Buckets"
+ // vector called "Fields."
+ CurrentBitfieldRun->addField(FD, /*FieldSize is irrelevant here*/ 1);
+ FieldsOut.erase(FieldIter);
+ continue;
+ }
+
+ // Else, current field is not a bitfield. If we were previously in a
+ // bitfield run, end it.
+ if (CurrentBitfieldRun)
+ Buckets.push_back(std::move(CurrentBitfieldRun));
+
+ // If we don't have a bucket, make one.
+ if (!CurrentBucket)
+ CurrentBucket = std::make_unique<Bucket>();
+
+ uint64_t Width = Context.getTypeInfo(FD->getType()).Width;
+ if (Width >= CACHE_LINE) {
+ std::unique_ptr<Bucket> OverSized = std::make_unique<Bucket>();
+ OverSized->addField(FD, Width);
+ FieldsOut.erase(FieldIter);
+ Buckets.push_back(std::move(OverSized));
+ continue;
+ }
+
+ // If it fits, add it.
+ if (CurrentBucket->canFit(Width)) {
+ CurrentBucket->addField(FD, Width);
+ FieldsOut.erase(FieldIter);
+
+ // If it's now full, tie off the bucket.
+ if (CurrentBucket->full()) {
+ Skipped = 0;
+ Buckets.push_back(std::move(CurrentBucket));
+ }
+ } else {
+ // We can't fit it in our current bucket. Move to the end for processing
+ // later.
+ ++Skipped; // Mark it skipped.
+ FieldsOut.push_back(FD);
+ FieldsOut.erase(FieldIter);
+ }
+ }
+
+ // Done processing the fields awaiting a bucket.
+
+ // If we were filling a bucket, tie it off.
+ if (CurrentBucket)
+ Buckets.push_back(std::move(CurrentBucket));
+
+ // If we were processing a bitfield run bucket, tie it off.
+ if (CurrentBitfieldRun)
+ Buckets.push_back(std::move(CurrentBitfieldRun));
+
+ std::shuffle(std::begin(Buckets), std::end(Buckets), RNG);
+
+ // Produce the new ordering of the elements from the Buckets.
+ SmallVector<FieldDecl *, 16> FinalOrder;
+ for (const std::unique_ptr<Bucket> &B : Buckets) {
+ llvm::SmallVectorImpl<FieldDecl *> &RandFields = B->fields();
+ if (!B->isBitfieldRun())
+ std::shuffle(std::begin(RandFields), std::end(RandFields), RNG);
+
+ FinalOrder.insert(FinalOrder.end(), RandFields.begin(), RandFields.end());
+ }
+
+ FieldsOut = FinalOrder;
+}
+
+} // anonymous namespace
+
+namespace clang {
+namespace randstruct {
+
+bool randomizeStructureLayout(const ASTContext &Context, RecordDecl *RD,
+ SmallVectorImpl<Decl *> &FinalOrdering) {
+ SmallVector<FieldDecl *, 64> RandomizedFields;
+ SmallVector<Decl *, 8> PostRandomizedFields;
+
+ unsigned TotalNumFields = 0;
+ for (Decl *D : RD->decls()) {
+ ++TotalNumFields;
+ if (auto *FD = dyn_cast<FieldDecl>(D))
+ RandomizedFields.push_back(FD);
+ else if (isa<StaticAssertDecl>(D) || isa<IndirectFieldDecl>(D))
+ PostRandomizedFields.push_back(D);
+ else
+ FinalOrdering.push_back(D);
+ }
+
+ if (RandomizedFields.empty())
+ return false;
+
+ // Struct might end with a flexible array or an array of size 0 or 1,
+ // in which case we don't want to randomize it.
+ FieldDecl *FlexibleArray =
+ RD->hasFlexibleArrayMember() ? RandomizedFields.pop_back_val() : nullptr;
+ if (!FlexibleArray) {
+ if (const auto *CA =
+ dyn_cast<ConstantArrayType>(RandomizedFields.back()->getType()))
+ if (CA->getSize().sle(2))
+ FlexibleArray = RandomizedFields.pop_back_val();
+ }
+
+ std::string Seed =
+ Context.getLangOpts().RandstructSeed + RD->getNameAsString();
+ std::seed_seq SeedSeq(Seed.begin(), Seed.end());
+ std::mt19937 RNG(SeedSeq);
+
+ randomizeStructureLayoutImpl(Context, RandomizedFields, RNG);
+
+ // Plorp the randomized decls into the final ordering.
+ FinalOrdering.insert(FinalOrdering.end(), RandomizedFields.begin(),
+ RandomizedFields.end());
+
+ // Add fields that belong towards the end of the RecordDecl.
+ FinalOrdering.insert(FinalOrdering.end(), PostRandomizedFields.begin(),
+ PostRandomizedFields.end());
+
+ // Add back the flexible array.
+ if (FlexibleArray)
+ FinalOrdering.push_back(FlexibleArray);
+
+ assert(TotalNumFields == FinalOrdering.size() &&
+ "Decl count has been altered after Randstruct randomization!");
+ (void)TotalNumFields;
+ return true;
+}
+
+} // end namespace randstruct
+} // end namespace clang
diff --git a/contrib/llvm-project/clang/lib/AST/RawCommentList.cpp b/contrib/llvm-project/clang/lib/AST/RawCommentList.cpp
index a8d15036cab9..c3beb2322888 100644
--- a/contrib/llvm-project/clang/lib/AST/RawCommentList.cpp
+++ b/contrib/llvm-project/clang/lib/AST/RawCommentList.cpp
@@ -16,6 +16,7 @@
#include "clang/AST/CommentSema.h"
#include "clang/Basic/CharInfo.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/Allocator.h"
using namespace clang;
@@ -362,6 +363,24 @@ std::string RawComment::getFormattedText(const SourceManager &SourceMgr,
if (CommentText.empty())
return "";
+ std::string Result;
+ for (const RawComment::CommentLine &Line :
+ getFormattedLines(SourceMgr, Diags))
+ Result += Line.Text + "\n";
+
+ auto LastChar = Result.find_last_not_of('\n');
+ Result.erase(LastChar + 1, Result.size());
+
+ return Result;
+}
+
+std::vector<RawComment::CommentLine>
+RawComment::getFormattedLines(const SourceManager &SourceMgr,
+ DiagnosticsEngine &Diags) const {
+ llvm::StringRef CommentText = getRawText(SourceMgr);
+ if (CommentText.empty())
+ return {};
+
llvm::BumpPtrAllocator Allocator;
// We do not parse any commands, so CommentOptions are ignored by
// comments::Lexer. Therefore, we just use default-constructed options.
@@ -371,13 +390,23 @@ std::string RawComment::getFormattedText(const SourceManager &SourceMgr,
CommentText.begin(), CommentText.end(),
/*ParseCommands=*/false);
- std::string Result;
+ std::vector<RawComment::CommentLine> Result;
// A column number of the first non-whitespace token in the comment text.
// We skip whitespace up to this column, but keep the whitespace after this
// column. IndentColumn is calculated when lexing the first line and reused
// for the rest of lines.
unsigned IndentColumn = 0;
+ // Record the line number of the last processed comment line.
+ // For block-style comments, an extra newline token will be produced after
+ // the end-comment marker, e.g.:
+ // /** This is a multi-line comment block.
+ // The lexer will produce two newline tokens here > */
+ // previousLine will record the line number when we previously saw a newline
+ // token and recorded a comment line. If we see another newline token on the
+ // same line, don't record anything in between.
+ unsigned PreviousLine = 0;
+
// Processes one line of the comment and adds it to the result.
// Handles skipping the indent at the start of the line.
// Returns false when eof is reached and true otherwise.
@@ -389,9 +418,14 @@ std::string RawComment::getFormattedText(const SourceManager &SourceMgr,
if (Tok.is(comments::tok::eof))
return false;
if (Tok.is(comments::tok::newline)) {
- Result += "\n";
+ PresumedLoc Loc = SourceMgr.getPresumedLoc(Tok.getLocation());
+ if (Loc.getLine() != PreviousLine) {
+ Result.emplace_back("", Loc, Loc);
+ PreviousLine = Loc.getLine();
+ }
return true;
}
+ SmallString<124> Line;
llvm::StringRef TokText = L.getSpelling(Tok, SourceMgr);
bool LocInvalid = false;
unsigned TokColumn =
@@ -417,32 +451,35 @@ std::string RawComment::getFormattedText(const SourceManager &SourceMgr,
WhitespaceLen,
std::max<int>(static_cast<int>(IndentColumn) - TokColumn, 0));
llvm::StringRef Trimmed = TokText.drop_front(SkipLen);
- Result += Trimmed;
+ Line += Trimmed;
+ // Get the beginning location of the adjusted comment line.
+ PresumedLoc Begin =
+ SourceMgr.getPresumedLoc(Tok.getLocation().getLocWithOffset(SkipLen));
+
// Lex all tokens in the rest of the line.
for (L.lex(Tok); Tok.isNot(comments::tok::eof); L.lex(Tok)) {
if (Tok.is(comments::tok::newline)) {
- Result += "\n";
+ // Get the ending location of the comment line.
+ PresumedLoc End = SourceMgr.getPresumedLoc(Tok.getLocation());
+ if (End.getLine() != PreviousLine) {
+ Result.emplace_back(Line, Begin, End);
+ PreviousLine = End.getLine();
+ }
return true;
}
- Result += L.getSpelling(Tok, SourceMgr);
+ Line += L.getSpelling(Tok, SourceMgr);
}
+ PresumedLoc End = SourceMgr.getPresumedLoc(Tok.getLocation());
+ Result.emplace_back(Line, Begin, End);
// We've reached the end of file token.
return false;
};
- auto DropTrailingNewLines = [](std::string &Str) {
- while (!Str.empty() && Str.back() == '\n')
- Str.pop_back();
- };
-
// Process first line separately to remember indent for the following lines.
- if (!LexLine(/*IsFirstLine=*/true)) {
- DropTrailingNewLines(Result);
+ if (!LexLine(/*IsFirstLine=*/true))
return Result;
- }
// Process the rest of the lines.
while (LexLine(/*IsFirstLine=*/false))
;
- DropTrailingNewLines(Result);
return Result;
}
diff --git a/contrib/llvm-project/clang/lib/AST/RecordLayoutBuilder.cpp b/contrib/llvm-project/clang/lib/AST/RecordLayoutBuilder.cpp
index 61a30ead165e..6f3ede2ce42a 100644
--- a/contrib/llvm-project/clang/lib/AST/RecordLayoutBuilder.cpp
+++ b/contrib/llvm-project/clang/lib/AST/RecordLayoutBuilder.cpp
@@ -1223,7 +1223,7 @@ ItaniumRecordLayoutBuilder::LayoutBase(const BaseSubobjectInfo *Base) {
// Per GCC's documentation, it only applies to non-static data members.
return (Packed && ((Context.getLangOpts().getClangABICompat() <=
LangOptions::ClangABI::Ver6) ||
- Context.getTargetInfo().getTriple().isPS4() ||
+ Context.getTargetInfo().getTriple().isPS() ||
Context.getTargetInfo().getTriple().isOSAIX()))
? CharUnits::One()
: UnpackedAlign;
@@ -1261,7 +1261,9 @@ ItaniumRecordLayoutBuilder::LayoutBase(const BaseSubobjectInfo *Base) {
(!HasExternalLayout || Offset == CharUnits::Zero()) &&
EmptySubobjects->CanPlaceBaseAtOffset(Base, CharUnits::Zero())) {
setSize(std::max(getSize(), Layout.getSize()));
- UpdateAlignment(BaseAlign, UnpackedAlignTo, PreferredBaseAlign);
+ // On PS4/PS5, don't update the alignment, to preserve compatibility.
+ if (!Context.getTargetInfo().getTriple().isPS())
+ UpdateAlignment(BaseAlign, UnpackedAlignTo, PreferredBaseAlign);
return CharUnits::Zero();
}
@@ -1887,7 +1889,12 @@ void ItaniumRecordLayoutBuilder::LayoutField(const FieldDecl *D,
UnfilledBitsInLastUnit = 0;
LastBitfieldStorageUnitSize = 0;
- bool FieldPacked = Packed || D->hasAttr<PackedAttr>();
+ llvm::Triple Target = Context.getTargetInfo().getTriple();
+ bool FieldPacked = (Packed && (!FieldClass || FieldClass->isPOD() ||
+ Context.getLangOpts().getClangABICompat() <=
+ LangOptions::ClangABI::Ver14 ||
+ Target.isPS() || Target.isOSDarwin())) ||
+ D->hasAttr<PackedAttr>();
AlignRequirementKind AlignRequirement = AlignRequirementKind::None;
CharUnits FieldSize;
@@ -3540,7 +3547,7 @@ static void DumpRecordLayout(raw_ostream &OS, const RecordDecl *RD,
auto CXXRD = dyn_cast<CXXRecordDecl>(RD);
PrintOffset(OS, Offset, IndentLevel);
- OS << C.getTypeDeclType(const_cast<RecordDecl*>(RD)).getAsString();
+ OS << C.getTypeDeclType(const_cast<RecordDecl *>(RD));
if (Description)
OS << ' ' << Description;
if (CXXRD && CXXRD->isEmpty())
@@ -3625,7 +3632,7 @@ static void DumpRecordLayout(raw_ostream &OS, const RecordDecl *RD,
const QualType &FieldType = C.getLangOpts().DumpRecordLayoutsCanonical
? Field.getType().getCanonicalType()
: Field.getType();
- OS << FieldType.getAsString() << ' ' << Field << '\n';
+ OS << FieldType << ' ' << Field << '\n';
}
// Dump virtual bases.
@@ -3691,7 +3698,7 @@ void ASTContext::DumpRecordLayout(const RecordDecl *RD, raw_ostream &OS,
// in libFrontend.
const ASTRecordLayout &Info = getASTRecordLayout(RD);
- OS << "Type: " << getTypeDeclType(RD).getAsString() << "\n";
+ OS << "Type: " << getTypeDeclType(RD) << "\n";
OS << "\nLayout: ";
OS << "<ASTRecordLayout\n";
OS << " Size:" << toBits(Info.getSize()) << "\n";
diff --git a/contrib/llvm-project/clang/lib/AST/Stmt.cpp b/contrib/llvm-project/clang/lib/AST/Stmt.cpp
index be19d3b2cce2..8eae04d0d9fd 100644
--- a/contrib/llvm-project/clang/lib/AST/Stmt.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Stmt.cpp
@@ -361,12 +361,14 @@ int64_t Stmt::getID(const ASTContext &Context) const {
return Context.getAllocator().identifyKnownAlignedObject<Stmt>(this);
}
-CompoundStmt::CompoundStmt(ArrayRef<Stmt *> Stmts, SourceLocation LB,
- SourceLocation RB)
- : Stmt(CompoundStmtClass), RBraceLoc(RB) {
+CompoundStmt::CompoundStmt(ArrayRef<Stmt *> Stmts, FPOptionsOverride FPFeatures,
+ SourceLocation LB, SourceLocation RB)
+ : Stmt(CompoundStmtClass), LBraceLoc(LB), RBraceLoc(RB) {
CompoundStmtBits.NumStmts = Stmts.size();
+ CompoundStmtBits.HasFPFeatures = FPFeatures.requiresTrailingStorage();
setStmts(Stmts);
- CompoundStmtBits.LBraceLoc = LB;
+ if (hasStoredFPFeatures())
+ setStoredFPFeatures(FPFeatures);
}
void CompoundStmt::setStmts(ArrayRef<Stmt *> Stmts) {
@@ -377,18 +379,23 @@ void CompoundStmt::setStmts(ArrayRef<Stmt *> Stmts) {
}
CompoundStmt *CompoundStmt::Create(const ASTContext &C, ArrayRef<Stmt *> Stmts,
+ FPOptionsOverride FPFeatures,
SourceLocation LB, SourceLocation RB) {
void *Mem =
- C.Allocate(totalSizeToAlloc<Stmt *>(Stmts.size()), alignof(CompoundStmt));
- return new (Mem) CompoundStmt(Stmts, LB, RB);
+ C.Allocate(totalSizeToAlloc<Stmt *, FPOptionsOverride>(
+ Stmts.size(), FPFeatures.requiresTrailingStorage()),
+ alignof(CompoundStmt));
+ return new (Mem) CompoundStmt(Stmts, FPFeatures, LB, RB);
}
-CompoundStmt *CompoundStmt::CreateEmpty(const ASTContext &C,
- unsigned NumStmts) {
- void *Mem =
- C.Allocate(totalSizeToAlloc<Stmt *>(NumStmts), alignof(CompoundStmt));
+CompoundStmt *CompoundStmt::CreateEmpty(const ASTContext &C, unsigned NumStmts,
+ bool HasFPFeatures) {
+ void *Mem = C.Allocate(
+ totalSizeToAlloc<Stmt *, FPOptionsOverride>(NumStmts, HasFPFeatures),
+ alignof(CompoundStmt));
CompoundStmt *New = new (Mem) CompoundStmt(EmptyShell());
New->CompoundStmtBits.NumStmts = NumStmts;
+ New->CompoundStmtBits.HasFPFeatures = HasFPFeatures;
return New;
}
diff --git a/contrib/llvm-project/clang/lib/AST/StmtCXX.cpp b/contrib/llvm-project/clang/lib/AST/StmtCXX.cpp
index 060d090fc06a..33b0421ad101 100644
--- a/contrib/llvm-project/clang/lib/AST/StmtCXX.cpp
+++ b/contrib/llvm-project/clang/lib/AST/StmtCXX.cpp
@@ -118,7 +118,6 @@ CoroutineBodyStmt::CoroutineBodyStmt(CoroutineBodyStmt::CtorArgs const &Args)
SubStmts[CoroutineBodyStmt::Allocate] = Args.Allocate;
SubStmts[CoroutineBodyStmt::Deallocate] = Args.Deallocate;
SubStmts[CoroutineBodyStmt::ReturnValue] = Args.ReturnValue;
- SubStmts[CoroutineBodyStmt::ResultDecl] = Args.ResultDecl;
SubStmts[CoroutineBodyStmt::ReturnStmt] = Args.ReturnStmt;
SubStmts[CoroutineBodyStmt::ReturnStmtOnAllocFailure] =
Args.ReturnStmtOnAllocFailure;
diff --git a/contrib/llvm-project/clang/lib/AST/StmtOpenMP.cpp b/contrib/llvm-project/clang/lib/AST/StmtOpenMP.cpp
index 8a9f73d3dbf0..e0a4221db7ec 100644
--- a/contrib/llvm-project/clang/lib/AST/StmtOpenMP.cpp
+++ b/contrib/llvm-project/clang/lib/AST/StmtOpenMP.cpp
@@ -682,6 +682,22 @@ OMPParallelMasterDirective::CreateEmpty(const ASTContext &C,
C, NumClauses, /*HasAssociatedStmt=*/true, /*NumChildren=*/1);
}
+OMPParallelMaskedDirective *OMPParallelMaskedDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef) {
+ auto *Dir = createDirective<OMPParallelMaskedDirective>(
+ C, Clauses, AssociatedStmt, /*NumChildren=*/1, StartLoc, EndLoc);
+ Dir->setTaskReductionRefExpr(TaskRedRef);
+ return Dir;
+}
+
+OMPParallelMaskedDirective *
+OMPParallelMaskedDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses, EmptyShell) {
+ return createEmptyDirective<OMPParallelMaskedDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true, /*NumChildren=*/1);
+}
+
OMPParallelSectionsDirective *OMPParallelSectionsDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *TaskRedRef,
@@ -863,18 +879,22 @@ OMPOrderedDirective *OMPOrderedDirective::CreateEmpty(const ASTContext &C,
!IsStandalone);
}
-OMPAtomicDirective *OMPAtomicDirective::Create(
- const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
- ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt, Expr *X, Expr *V,
- Expr *E, Expr *UE, bool IsXLHSInRHSPart, bool IsPostfixUpdate) {
+OMPAtomicDirective *
+OMPAtomicDirective::Create(const ASTContext &C, SourceLocation StartLoc,
+ SourceLocation EndLoc, ArrayRef<OMPClause *> Clauses,
+ Stmt *AssociatedStmt, Expressions Exprs) {
auto *Dir = createDirective<OMPAtomicDirective>(
- C, Clauses, AssociatedStmt, /*NumChildren=*/4, StartLoc, EndLoc);
- Dir->setX(X);
- Dir->setV(V);
- Dir->setExpr(E);
- Dir->setUpdateExpr(UE);
- Dir->IsXLHSInRHSPart = IsXLHSInRHSPart;
- Dir->IsPostfixUpdate = IsPostfixUpdate;
+ C, Clauses, AssociatedStmt, /*NumChildren=*/7, StartLoc, EndLoc);
+ Dir->setX(Exprs.X);
+ Dir->setV(Exprs.V);
+ Dir->setR(Exprs.R);
+ Dir->setExpr(Exprs.E);
+ Dir->setUpdateExpr(Exprs.UE);
+ Dir->setD(Exprs.D);
+ Dir->setCond(Exprs.Cond);
+ Dir->Flags.IsXLHSInRHSPart = Exprs.IsXLHSInRHSPart ? 1 : 0;
+ Dir->Flags.IsPostfixUpdate = Exprs.IsPostfixUpdate ? 1 : 0;
+ Dir->Flags.IsFailOnly = Exprs.IsFailOnly ? 1 : 0;
return Dir;
}
@@ -882,7 +902,7 @@ OMPAtomicDirective *OMPAtomicDirective::CreateEmpty(const ASTContext &C,
unsigned NumClauses,
EmptyShell) {
return createEmptyDirective<OMPAtomicDirective>(
- C, NumClauses, /*HasAssociatedStmt=*/true, /*NumChildren=*/4);
+ C, NumClauses, /*HasAssociatedStmt=*/true, /*NumChildren=*/7);
}
OMPTargetDirective *OMPTargetDirective::Create(const ASTContext &C,
@@ -1156,6 +1176,51 @@ OMPMasterTaskLoopDirective::CreateEmpty(const ASTContext &C,
numLoopChildren(CollapsedNum, OMPD_master_taskloop), CollapsedNum);
}
+OMPMaskedTaskLoopDirective *OMPMaskedTaskLoopDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs, bool HasCancel) {
+ auto *Dir = createDirective<OMPMaskedTaskLoopDirective>(
+ C, Clauses, AssociatedStmt,
+ numLoopChildren(CollapsedNum, OMPD_masked_taskloop), StartLoc, EndLoc,
+ CollapsedNum);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setIsLastIterVariable(Exprs.IL);
+ Dir->setLowerBoundVariable(Exprs.LB);
+ Dir->setUpperBoundVariable(Exprs.UB);
+ Dir->setStrideVariable(Exprs.ST);
+ Dir->setEnsureUpperBound(Exprs.EUB);
+ Dir->setNextLowerBound(Exprs.NLB);
+ Dir->setNextUpperBound(Exprs.NUB);
+ Dir->setNumIterations(Exprs.NumIterations);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setPrivateCounters(Exprs.PrivateCounters);
+ Dir->setInits(Exprs.Inits);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ Dir->setDependentCounters(Exprs.DependentCounters);
+ Dir->setDependentInits(Exprs.DependentInits);
+ Dir->setFinalsConditions(Exprs.FinalsConditions);
+ Dir->setPreInits(Exprs.PreInits);
+ Dir->setHasCancel(HasCancel);
+ return Dir;
+}
+
+OMPMaskedTaskLoopDirective *
+OMPMaskedTaskLoopDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ unsigned CollapsedNum, EmptyShell) {
+ return createEmptyDirective<OMPMaskedTaskLoopDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum, OMPD_masked_taskloop), CollapsedNum);
+}
+
OMPMasterTaskLoopSimdDirective *OMPMasterTaskLoopSimdDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
@@ -1200,6 +1265,50 @@ OMPMasterTaskLoopSimdDirective::CreateEmpty(const ASTContext &C,
numLoopChildren(CollapsedNum, OMPD_master_taskloop_simd), CollapsedNum);
}
+OMPMaskedTaskLoopSimdDirective *OMPMaskedTaskLoopSimdDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs) {
+ auto *Dir = createDirective<OMPMaskedTaskLoopSimdDirective>(
+ C, Clauses, AssociatedStmt,
+ numLoopChildren(CollapsedNum, OMPD_masked_taskloop_simd), StartLoc,
+ EndLoc, CollapsedNum);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setIsLastIterVariable(Exprs.IL);
+ Dir->setLowerBoundVariable(Exprs.LB);
+ Dir->setUpperBoundVariable(Exprs.UB);
+ Dir->setStrideVariable(Exprs.ST);
+ Dir->setEnsureUpperBound(Exprs.EUB);
+ Dir->setNextLowerBound(Exprs.NLB);
+ Dir->setNextUpperBound(Exprs.NUB);
+ Dir->setNumIterations(Exprs.NumIterations);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setPrivateCounters(Exprs.PrivateCounters);
+ Dir->setInits(Exprs.Inits);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ Dir->setDependentCounters(Exprs.DependentCounters);
+ Dir->setDependentInits(Exprs.DependentInits);
+ Dir->setFinalsConditions(Exprs.FinalsConditions);
+ Dir->setPreInits(Exprs.PreInits);
+ return Dir;
+}
+
+OMPMaskedTaskLoopSimdDirective *
+OMPMaskedTaskLoopSimdDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ unsigned CollapsedNum, EmptyShell) {
+ return createEmptyDirective<OMPMaskedTaskLoopSimdDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum, OMPD_masked_taskloop_simd), CollapsedNum);
+}
+
OMPParallelMasterTaskLoopDirective *OMPParallelMasterTaskLoopDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
@@ -1247,6 +1356,53 @@ OMPParallelMasterTaskLoopDirective::CreateEmpty(const ASTContext &C,
CollapsedNum);
}
+OMPParallelMaskedTaskLoopDirective *OMPParallelMaskedTaskLoopDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs, bool HasCancel) {
+ auto *Dir = createDirective<OMPParallelMaskedTaskLoopDirective>(
+ C, Clauses, AssociatedStmt,
+ numLoopChildren(CollapsedNum, OMPD_parallel_masked_taskloop), StartLoc,
+ EndLoc, CollapsedNum);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setIsLastIterVariable(Exprs.IL);
+ Dir->setLowerBoundVariable(Exprs.LB);
+ Dir->setUpperBoundVariable(Exprs.UB);
+ Dir->setStrideVariable(Exprs.ST);
+ Dir->setEnsureUpperBound(Exprs.EUB);
+ Dir->setNextLowerBound(Exprs.NLB);
+ Dir->setNextUpperBound(Exprs.NUB);
+ Dir->setNumIterations(Exprs.NumIterations);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setPrivateCounters(Exprs.PrivateCounters);
+ Dir->setInits(Exprs.Inits);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ Dir->setDependentCounters(Exprs.DependentCounters);
+ Dir->setDependentInits(Exprs.DependentInits);
+ Dir->setFinalsConditions(Exprs.FinalsConditions);
+ Dir->setPreInits(Exprs.PreInits);
+ Dir->setHasCancel(HasCancel);
+ return Dir;
+}
+
+OMPParallelMaskedTaskLoopDirective *
+OMPParallelMaskedTaskLoopDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ unsigned CollapsedNum,
+ EmptyShell) {
+ return createEmptyDirective<OMPParallelMaskedTaskLoopDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum, OMPD_parallel_masked_taskloop),
+ CollapsedNum);
+}
+
OMPParallelMasterTaskLoopSimdDirective *
OMPParallelMasterTaskLoopSimdDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
@@ -1294,6 +1450,53 @@ OMPParallelMasterTaskLoopSimdDirective::CreateEmpty(const ASTContext &C,
CollapsedNum);
}
+OMPParallelMaskedTaskLoopSimdDirective *
+OMPParallelMaskedTaskLoopSimdDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs) {
+ auto *Dir = createDirective<OMPParallelMaskedTaskLoopSimdDirective>(
+ C, Clauses, AssociatedStmt,
+ numLoopChildren(CollapsedNum, OMPD_parallel_masked_taskloop_simd),
+ StartLoc, EndLoc, CollapsedNum);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setIsLastIterVariable(Exprs.IL);
+ Dir->setLowerBoundVariable(Exprs.LB);
+ Dir->setUpperBoundVariable(Exprs.UB);
+ Dir->setStrideVariable(Exprs.ST);
+ Dir->setEnsureUpperBound(Exprs.EUB);
+ Dir->setNextLowerBound(Exprs.NLB);
+ Dir->setNextUpperBound(Exprs.NUB);
+ Dir->setNumIterations(Exprs.NumIterations);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setPrivateCounters(Exprs.PrivateCounters);
+ Dir->setInits(Exprs.Inits);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ Dir->setDependentCounters(Exprs.DependentCounters);
+ Dir->setDependentInits(Exprs.DependentInits);
+ Dir->setFinalsConditions(Exprs.FinalsConditions);
+ Dir->setPreInits(Exprs.PreInits);
+ return Dir;
+}
+
+OMPParallelMaskedTaskLoopSimdDirective *
+OMPParallelMaskedTaskLoopSimdDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ unsigned CollapsedNum,
+ EmptyShell) {
+ return createEmptyDirective<OMPParallelMaskedTaskLoopSimdDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum, OMPD_parallel_masked_taskloop_simd),
+ CollapsedNum);
+}
+
OMPDistributeDirective *OMPDistributeDirective::Create(
const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
@@ -2132,3 +2335,181 @@ OMPGenericLoopDirective::CreateEmpty(const ASTContext &C, unsigned NumClauses,
C, NumClauses, /*HasAssociatedStmt=*/true,
numLoopChildren(CollapsedNum, OMPD_loop), CollapsedNum);
}
+
+OMPTeamsGenericLoopDirective *OMPTeamsGenericLoopDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs) {
+ auto *Dir = createDirective<OMPTeamsGenericLoopDirective>(
+ C, Clauses, AssociatedStmt,
+ numLoopChildren(CollapsedNum, OMPD_teams_loop), StartLoc, EndLoc,
+ CollapsedNum);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setIsLastIterVariable(Exprs.IL);
+ Dir->setLowerBoundVariable(Exprs.LB);
+ Dir->setUpperBoundVariable(Exprs.UB);
+ Dir->setStrideVariable(Exprs.ST);
+ Dir->setEnsureUpperBound(Exprs.EUB);
+ Dir->setNextLowerBound(Exprs.NLB);
+ Dir->setNextUpperBound(Exprs.NUB);
+ Dir->setNumIterations(Exprs.NumIterations);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setPrivateCounters(Exprs.PrivateCounters);
+ Dir->setInits(Exprs.Inits);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ Dir->setDependentCounters(Exprs.DependentCounters);
+ Dir->setDependentInits(Exprs.DependentInits);
+ Dir->setFinalsConditions(Exprs.FinalsConditions);
+ Dir->setPreInits(Exprs.PreInits);
+ return Dir;
+}
+
+OMPTeamsGenericLoopDirective *
+OMPTeamsGenericLoopDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ unsigned CollapsedNum, EmptyShell) {
+ return createEmptyDirective<OMPTeamsGenericLoopDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum, OMPD_teams_loop), CollapsedNum);
+}
+
+OMPTargetTeamsGenericLoopDirective *OMPTargetTeamsGenericLoopDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs) {
+ auto *Dir = createDirective<OMPTargetTeamsGenericLoopDirective>(
+ C, Clauses, AssociatedStmt,
+ numLoopChildren(CollapsedNum, OMPD_target_teams_loop), StartLoc, EndLoc,
+ CollapsedNum);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setIsLastIterVariable(Exprs.IL);
+ Dir->setLowerBoundVariable(Exprs.LB);
+ Dir->setUpperBoundVariable(Exprs.UB);
+ Dir->setStrideVariable(Exprs.ST);
+ Dir->setEnsureUpperBound(Exprs.EUB);
+ Dir->setNextLowerBound(Exprs.NLB);
+ Dir->setNextUpperBound(Exprs.NUB);
+ Dir->setNumIterations(Exprs.NumIterations);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setPrivateCounters(Exprs.PrivateCounters);
+ Dir->setInits(Exprs.Inits);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ Dir->setDependentCounters(Exprs.DependentCounters);
+ Dir->setDependentInits(Exprs.DependentInits);
+ Dir->setFinalsConditions(Exprs.FinalsConditions);
+ Dir->setPreInits(Exprs.PreInits);
+ return Dir;
+}
+
+OMPTargetTeamsGenericLoopDirective *
+OMPTargetTeamsGenericLoopDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ unsigned CollapsedNum,
+ EmptyShell) {
+ return createEmptyDirective<OMPTargetTeamsGenericLoopDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum, OMPD_target_teams_loop), CollapsedNum);
+}
+
+OMPParallelGenericLoopDirective *OMPParallelGenericLoopDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs) {
+ auto *Dir = createDirective<OMPParallelGenericLoopDirective>(
+ C, Clauses, AssociatedStmt,
+ numLoopChildren(CollapsedNum, OMPD_parallel_loop), StartLoc, EndLoc,
+ CollapsedNum);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setIsLastIterVariable(Exprs.IL);
+ Dir->setLowerBoundVariable(Exprs.LB);
+ Dir->setUpperBoundVariable(Exprs.UB);
+ Dir->setStrideVariable(Exprs.ST);
+ Dir->setEnsureUpperBound(Exprs.EUB);
+ Dir->setNextLowerBound(Exprs.NLB);
+ Dir->setNextUpperBound(Exprs.NUB);
+ Dir->setNumIterations(Exprs.NumIterations);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setPrivateCounters(Exprs.PrivateCounters);
+ Dir->setInits(Exprs.Inits);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ Dir->setDependentCounters(Exprs.DependentCounters);
+ Dir->setDependentInits(Exprs.DependentInits);
+ Dir->setFinalsConditions(Exprs.FinalsConditions);
+ Dir->setPreInits(Exprs.PreInits);
+ return Dir;
+}
+
+OMPParallelGenericLoopDirective *OMPParallelGenericLoopDirective::CreateEmpty(
+ const ASTContext &C, unsigned NumClauses, unsigned CollapsedNum,
+ EmptyShell) {
+ return createEmptyDirective<OMPParallelGenericLoopDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum, OMPD_parallel_loop), CollapsedNum);
+}
+
+OMPTargetParallelGenericLoopDirective *
+OMPTargetParallelGenericLoopDirective::Create(
+ const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc,
+ unsigned CollapsedNum, ArrayRef<OMPClause *> Clauses, Stmt *AssociatedStmt,
+ const HelperExprs &Exprs) {
+ auto *Dir = createDirective<OMPTargetParallelGenericLoopDirective>(
+ C, Clauses, AssociatedStmt,
+ numLoopChildren(CollapsedNum, OMPD_target_parallel_loop), StartLoc,
+ EndLoc, CollapsedNum);
+ Dir->setIterationVariable(Exprs.IterationVarRef);
+ Dir->setLastIteration(Exprs.LastIteration);
+ Dir->setCalcLastIteration(Exprs.CalcLastIteration);
+ Dir->setPreCond(Exprs.PreCond);
+ Dir->setCond(Exprs.Cond);
+ Dir->setInit(Exprs.Init);
+ Dir->setInc(Exprs.Inc);
+ Dir->setIsLastIterVariable(Exprs.IL);
+ Dir->setLowerBoundVariable(Exprs.LB);
+ Dir->setUpperBoundVariable(Exprs.UB);
+ Dir->setStrideVariable(Exprs.ST);
+ Dir->setEnsureUpperBound(Exprs.EUB);
+ Dir->setNextLowerBound(Exprs.NLB);
+ Dir->setNextUpperBound(Exprs.NUB);
+ Dir->setNumIterations(Exprs.NumIterations);
+ Dir->setCounters(Exprs.Counters);
+ Dir->setPrivateCounters(Exprs.PrivateCounters);
+ Dir->setInits(Exprs.Inits);
+ Dir->setUpdates(Exprs.Updates);
+ Dir->setFinals(Exprs.Finals);
+ Dir->setDependentCounters(Exprs.DependentCounters);
+ Dir->setDependentInits(Exprs.DependentInits);
+ Dir->setFinalsConditions(Exprs.FinalsConditions);
+ Dir->setPreInits(Exprs.PreInits);
+ return Dir;
+}
+
+OMPTargetParallelGenericLoopDirective *
+OMPTargetParallelGenericLoopDirective::CreateEmpty(const ASTContext &C,
+ unsigned NumClauses,
+ unsigned CollapsedNum,
+ EmptyShell) {
+ return createEmptyDirective<OMPTargetParallelGenericLoopDirective>(
+ C, NumClauses, /*HasAssociatedStmt=*/true,
+ numLoopChildren(CollapsedNum, OMPD_target_parallel_loop), CollapsedNum);
+}
diff --git a/contrib/llvm-project/clang/lib/AST/StmtPrinter.cpp b/contrib/llvm-project/clang/lib/AST/StmtPrinter.cpp
index adc0720fe000..8d778500d103 100644
--- a/contrib/llvm-project/clang/lib/AST/StmtPrinter.cpp
+++ b/contrib/llvm-project/clang/lib/AST/StmtPrinter.cpp
@@ -128,6 +128,7 @@ namespace {
void PrintRawSEHFinallyStmt(SEHFinallyStmt *S);
void PrintOMPExecutableDirective(OMPExecutableDirective *S,
bool ForceNoStmt = false);
+ void PrintFPPragmas(CompoundStmt *S);
void PrintExpr(Expr *E) {
if (E)
@@ -174,12 +175,73 @@ namespace {
/// with no newline after the }.
void StmtPrinter::PrintRawCompoundStmt(CompoundStmt *Node) {
OS << "{" << NL;
+ PrintFPPragmas(Node);
for (auto *I : Node->body())
PrintStmt(I);
Indent() << "}";
}
+void StmtPrinter::PrintFPPragmas(CompoundStmt *S) {
+ if (!S->hasStoredFPFeatures())
+ return;
+ FPOptionsOverride FPO = S->getStoredFPFeatures();
+ bool FEnvAccess = false;
+ if (FPO.hasAllowFEnvAccessOverride()) {
+ FEnvAccess = FPO.getAllowFEnvAccessOverride();
+ Indent() << "#pragma STDC FENV_ACCESS " << (FEnvAccess ? "ON" : "OFF")
+ << NL;
+ }
+ if (FPO.hasSpecifiedExceptionModeOverride()) {
+ LangOptions::FPExceptionModeKind EM =
+ FPO.getSpecifiedExceptionModeOverride();
+ if (!FEnvAccess || EM != LangOptions::FPE_Strict) {
+ Indent() << "#pragma clang fp exceptions(";
+ switch (FPO.getSpecifiedExceptionModeOverride()) {
+ default:
+ break;
+ case LangOptions::FPE_Ignore:
+ OS << "ignore";
+ break;
+ case LangOptions::FPE_MayTrap:
+ OS << "maytrap";
+ break;
+ case LangOptions::FPE_Strict:
+ OS << "strict";
+ break;
+ }
+ OS << ")\n";
+ }
+ }
+ if (FPO.hasConstRoundingModeOverride()) {
+ LangOptions::RoundingMode RM = FPO.getConstRoundingModeOverride();
+ Indent() << "#pragma STDC FENV_ROUND ";
+ switch (RM) {
+ case llvm::RoundingMode::TowardZero:
+ OS << "FE_TOWARDZERO";
+ break;
+ case llvm::RoundingMode::NearestTiesToEven:
+ OS << "FE_TONEAREST";
+ break;
+ case llvm::RoundingMode::TowardPositive:
+ OS << "FE_UPWARD";
+ break;
+ case llvm::RoundingMode::TowardNegative:
+ OS << "FE_DOWNWARD";
+ break;
+ case llvm::RoundingMode::NearestTiesToAway:
+ OS << "FE_TONEARESTFROMZERO";
+ break;
+ case llvm::RoundingMode::Dynamic:
+ OS << "FE_DYNAMIC";
+ break;
+ default:
+ llvm_unreachable("Invalid rounding mode");
+ }
+ OS << NL;
+ }
+}
+
void StmtPrinter::PrintRawDecl(Decl *D) {
D->print(OS, Policy, IndentLevel);
}
@@ -749,6 +811,12 @@ void StmtPrinter::VisitOMPParallelMasterDirective(
PrintOMPExecutableDirective(Node);
}
+void StmtPrinter::VisitOMPParallelMaskedDirective(
+ OMPParallelMaskedDirective *Node) {
+ Indent() << "#pragma omp parallel masked";
+ PrintOMPExecutableDirective(Node);
+}
+
void StmtPrinter::VisitOMPParallelSectionsDirective(
OMPParallelSectionsDirective *Node) {
Indent() << "#pragma omp parallel sections";
@@ -874,24 +942,48 @@ void StmtPrinter::VisitOMPMasterTaskLoopDirective(
PrintOMPExecutableDirective(Node);
}
+void StmtPrinter::VisitOMPMaskedTaskLoopDirective(
+ OMPMaskedTaskLoopDirective *Node) {
+ Indent() << "#pragma omp masked taskloop";
+ PrintOMPExecutableDirective(Node);
+}
+
void StmtPrinter::VisitOMPMasterTaskLoopSimdDirective(
OMPMasterTaskLoopSimdDirective *Node) {
Indent() << "#pragma omp master taskloop simd";
PrintOMPExecutableDirective(Node);
}
+void StmtPrinter::VisitOMPMaskedTaskLoopSimdDirective(
+ OMPMaskedTaskLoopSimdDirective *Node) {
+ Indent() << "#pragma omp masked taskloop simd";
+ PrintOMPExecutableDirective(Node);
+}
+
void StmtPrinter::VisitOMPParallelMasterTaskLoopDirective(
OMPParallelMasterTaskLoopDirective *Node) {
Indent() << "#pragma omp parallel master taskloop";
PrintOMPExecutableDirective(Node);
}
+void StmtPrinter::VisitOMPParallelMaskedTaskLoopDirective(
+ OMPParallelMaskedTaskLoopDirective *Node) {
+ Indent() << "#pragma omp parallel masked taskloop";
+ PrintOMPExecutableDirective(Node);
+}
+
void StmtPrinter::VisitOMPParallelMasterTaskLoopSimdDirective(
OMPParallelMasterTaskLoopSimdDirective *Node) {
Indent() << "#pragma omp parallel master taskloop simd";
PrintOMPExecutableDirective(Node);
}
+void StmtPrinter::VisitOMPParallelMaskedTaskLoopSimdDirective(
+ OMPParallelMaskedTaskLoopSimdDirective *Node) {
+ Indent() << "#pragma omp parallel masked taskloop simd";
+ PrintOMPExecutableDirective(Node);
+}
+
void StmtPrinter::VisitOMPDistributeDirective(OMPDistributeDirective *Node) {
Indent() << "#pragma omp distribute";
PrintOMPExecutableDirective(Node);
@@ -1005,6 +1097,30 @@ void StmtPrinter::VisitOMPGenericLoopDirective(OMPGenericLoopDirective *Node) {
PrintOMPExecutableDirective(Node);
}
+void StmtPrinter::VisitOMPTeamsGenericLoopDirective(
+ OMPTeamsGenericLoopDirective *Node) {
+ Indent() << "#pragma omp teams loop";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPTargetTeamsGenericLoopDirective(
+ OMPTargetTeamsGenericLoopDirective *Node) {
+ Indent() << "#pragma omp target teams loop";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPParallelGenericLoopDirective(
+ OMPParallelGenericLoopDirective *Node) {
+ Indent() << "#pragma omp parallel loop";
+ PrintOMPExecutableDirective(Node);
+}
+
+void StmtPrinter::VisitOMPTargetParallelGenericLoopDirective(
+ OMPTargetParallelGenericLoopDirective *Node) {
+ Indent() << "#pragma omp target parallel loop";
+ PrintOMPExecutableDirective(Node);
+}
+
//===----------------------------------------------------------------------===//
// Expr printing methods.
//===----------------------------------------------------------------------===//
@@ -1023,7 +1139,7 @@ void StmtPrinter::VisitDeclRefExpr(DeclRefExpr *Node) {
return;
}
if (const auto *TPOD = dyn_cast<TemplateParamObjectDecl>(Node->getDecl())) {
- TPOD->printAsExpr(OS);
+ TPOD->printAsExpr(OS, Policy);
return;
}
if (NestedNameSpecifier *Qualifier = Node->getQualifier())
@@ -1153,6 +1269,11 @@ void StmtPrinter::VisitIntegerLiteral(IntegerLiteral *Node) {
bool isSigned = Node->getType()->isSignedIntegerType();
OS << toString(Node->getValue(), 10, isSigned);
+ if (isa<BitIntType>(Node->getType())) {
+ OS << (isSigned ? "wb" : "uwb");
+ return;
+ }
+
// Emit suffixes. Integer literals are always a builtin integer type.
switch (Node->getType()->castAs<BuiltinType>()->getKind()) {
default: llvm_unreachable("Unexpected type for integer literal!");
@@ -1736,21 +1857,16 @@ void StmtPrinter::VisitCXXOperatorCallExpr(CXXOperatorCallExpr *Node) {
}
} else if (Kind == OO_Arrow) {
PrintExpr(Node->getArg(0));
- } else if (Kind == OO_Call) {
+ } else if (Kind == OO_Call || Kind == OO_Subscript) {
PrintExpr(Node->getArg(0));
- OS << '(';
+ OS << (Kind == OO_Call ? '(' : '[');
for (unsigned ArgIdx = 1; ArgIdx < Node->getNumArgs(); ++ArgIdx) {
if (ArgIdx > 1)
OS << ", ";
if (!isa<CXXDefaultArgExpr>(Node->getArg(ArgIdx)))
PrintExpr(Node->getArg(ArgIdx));
}
- OS << ')';
- } else if (Kind == OO_Subscript) {
- PrintExpr(Node->getArg(0));
- OS << '[';
- PrintExpr(Node->getArg(1));
- OS << ']';
+ OS << (Kind == OO_Call ? ')' : ']');
} else if (Node->getNumArgs() == 1) {
OS << getOperatorSpelling(Kind) << ' ';
PrintExpr(Node->getArg(0));
@@ -1945,14 +2061,23 @@ void StmtPrinter::VisitCXXDefaultInitExpr(CXXDefaultInitExpr *Node) {
}
void StmtPrinter::VisitCXXFunctionalCastExpr(CXXFunctionalCastExpr *Node) {
- Node->getType().print(OS, Policy);
- // If there are no parens, this is list-initialization, and the braces are
- // part of the syntax of the inner construct.
- if (Node->getLParenLoc().isValid())
- OS << "(";
+ auto TargetType = Node->getType();
+ auto *Auto = TargetType->getContainedDeducedType();
+ bool Bare = Auto && Auto->isDeduced();
+
+ // Parenthesize deduced casts.
+ if (Bare)
+ OS << '(';
+ TargetType.print(OS, Policy);
+ if (Bare)
+ OS << ')';
+
+ // No extra braces surrounding the inner construct.
+ if (!Node->isListInitialization())
+ OS << '(';
PrintExpr(Node->getSubExpr());
- if (Node->getLParenLoc().isValid())
- OS << ")";
+ if (!Node->isListInitialization())
+ OS << ')';
}
void StmtPrinter::VisitCXXBindTemporaryExpr(CXXBindTemporaryExpr *Node) {
@@ -2137,10 +2262,10 @@ void StmtPrinter::VisitCXXNewExpr(CXXNewExpr *E) {
if (E->isParenTypeId())
OS << "(";
std::string TypeS;
- if (Optional<Expr *> Size = E->getArraySize()) {
+ if (E->isArray()) {
llvm::raw_string_ostream s(TypeS);
s << '[';
- if (*Size)
+ if (Optional<Expr *> Size = E->getArraySize())
(*Size)->printPretty(s, Helper, Policy);
s << ']';
}
@@ -2149,11 +2274,13 @@ void StmtPrinter::VisitCXXNewExpr(CXXNewExpr *E) {
OS << ")";
CXXNewExpr::InitializationStyle InitStyle = E->getInitializationStyle();
- if (InitStyle) {
- if (InitStyle == CXXNewExpr::CallInit)
+ if (InitStyle != CXXNewExpr::NoInit) {
+ bool Bare = InitStyle == CXXNewExpr::CallInit &&
+ !isa<ParenListExpr>(E->getInitializer());
+ if (Bare)
OS << "(";
PrintExpr(E->getInitializer());
- if (InitStyle == CXXNewExpr::CallInit)
+ if (Bare)
OS << ")";
}
}
@@ -2215,19 +2342,19 @@ void StmtPrinter::VisitExprWithCleanups(ExprWithCleanups *E) {
PrintExpr(E->getSubExpr());
}
-void
-StmtPrinter::VisitCXXUnresolvedConstructExpr(
- CXXUnresolvedConstructExpr *Node) {
+void StmtPrinter::VisitCXXUnresolvedConstructExpr(
+ CXXUnresolvedConstructExpr *Node) {
Node->getTypeAsWritten().print(OS, Policy);
- OS << "(";
- for (CXXUnresolvedConstructExpr::arg_iterator Arg = Node->arg_begin(),
- ArgEnd = Node->arg_end();
- Arg != ArgEnd; ++Arg) {
+ if (!Node->isListInitialization())
+ OS << '(';
+ for (auto Arg = Node->arg_begin(), ArgEnd = Node->arg_end(); Arg != ArgEnd;
+ ++Arg) {
if (Arg != Node->arg_begin())
OS << ", ";
PrintExpr(*Arg);
}
- OS << ")";
+ if (!Node->isListInitialization())
+ OS << ')';
}
void StmtPrinter::VisitCXXDependentScopeMemberExpr(
diff --git a/contrib/llvm-project/clang/lib/AST/StmtProfile.cpp b/contrib/llvm-project/clang/lib/AST/StmtProfile.cpp
index 09853e0f0e49..92a8b18cf68a 100644
--- a/contrib/llvm-project/clang/lib/AST/StmtProfile.cpp
+++ b/contrib/llvm-project/clang/lib/AST/StmtProfile.cpp
@@ -38,6 +38,10 @@ namespace {
void VisitStmt(const Stmt *S);
+ void VisitStmtNoChildren(const Stmt *S) {
+ HandleStmtClass(S->getStmtClass());
+ }
+
virtual void HandleStmtClass(Stmt::StmtClass SC) = 0;
#define STMT(Node, Base) void Visit##Node(const Node *S);
@@ -218,7 +222,7 @@ namespace {
void StmtProfiler::VisitStmt(const Stmt *S) {
assert(S && "Requires non-null Stmt pointer");
- HandleStmtClass(S->getStmtClass());
+ VisitStmtNoChildren(S);
for (const Stmt *SubStmt : S->children()) {
if (SubStmt)
@@ -857,6 +861,10 @@ void OMPClauseProfiler::VisitOMPIsDevicePtrClause(
const OMPIsDevicePtrClause *C) {
VisitOMPClauseList(C);
}
+void OMPClauseProfiler::VisitOMPHasDeviceAddrClause(
+ const OMPHasDeviceAddrClause *C) {
+ VisitOMPClauseList(C);
+}
void OMPClauseProfiler::VisitOMPNontemporalClause(
const OMPNontemporalClause *C) {
VisitOMPClauseList(C);
@@ -980,6 +988,11 @@ void StmtProfiler::VisitOMPParallelMasterDirective(
VisitOMPExecutableDirective(S);
}
+void StmtProfiler::VisitOMPParallelMaskedDirective(
+ const OMPParallelMaskedDirective *S) {
+ VisitOMPExecutableDirective(S);
+}
+
void StmtProfiler::VisitOMPParallelSectionsDirective(
const OMPParallelSectionsDirective *S) {
VisitOMPExecutableDirective(S);
@@ -1082,21 +1095,41 @@ void StmtProfiler::VisitOMPMasterTaskLoopDirective(
VisitOMPLoopDirective(S);
}
+void StmtProfiler::VisitOMPMaskedTaskLoopDirective(
+ const OMPMaskedTaskLoopDirective *S) {
+ VisitOMPLoopDirective(S);
+}
+
void StmtProfiler::VisitOMPMasterTaskLoopSimdDirective(
const OMPMasterTaskLoopSimdDirective *S) {
VisitOMPLoopDirective(S);
}
+void StmtProfiler::VisitOMPMaskedTaskLoopSimdDirective(
+ const OMPMaskedTaskLoopSimdDirective *S) {
+ VisitOMPLoopDirective(S);
+}
+
void StmtProfiler::VisitOMPParallelMasterTaskLoopDirective(
const OMPParallelMasterTaskLoopDirective *S) {
VisitOMPLoopDirective(S);
}
+void StmtProfiler::VisitOMPParallelMaskedTaskLoopDirective(
+ const OMPParallelMaskedTaskLoopDirective *S) {
+ VisitOMPLoopDirective(S);
+}
+
void StmtProfiler::VisitOMPParallelMasterTaskLoopSimdDirective(
const OMPParallelMasterTaskLoopSimdDirective *S) {
VisitOMPLoopDirective(S);
}
+void StmtProfiler::VisitOMPParallelMaskedTaskLoopSimdDirective(
+ const OMPParallelMaskedTaskLoopSimdDirective *S) {
+ VisitOMPLoopDirective(S);
+}
+
void StmtProfiler::VisitOMPDistributeDirective(
const OMPDistributeDirective *S) {
VisitOMPLoopDirective(S);
@@ -1203,6 +1236,26 @@ void StmtProfiler::VisitOMPGenericLoopDirective(
VisitOMPLoopDirective(S);
}
+void StmtProfiler::VisitOMPTeamsGenericLoopDirective(
+ const OMPTeamsGenericLoopDirective *S) {
+ VisitOMPLoopDirective(S);
+}
+
+void StmtProfiler::VisitOMPTargetTeamsGenericLoopDirective(
+ const OMPTargetTeamsGenericLoopDirective *S) {
+ VisitOMPLoopDirective(S);
+}
+
+void StmtProfiler::VisitOMPParallelGenericLoopDirective(
+ const OMPParallelGenericLoopDirective *S) {
+ VisitOMPLoopDirective(S);
+}
+
+void StmtProfiler::VisitOMPTargetParallelGenericLoopDirective(
+ const OMPTargetParallelGenericLoopDirective *S) {
+ VisitOMPLoopDirective(S);
+}
+
void StmtProfiler::VisitExpr(const Expr *S) {
VisitStmt(S);
}
@@ -1945,7 +1998,11 @@ StmtProfiler::VisitCXXTemporaryObjectExpr(const CXXTemporaryObjectExpr *S) {
void
StmtProfiler::VisitLambdaExpr(const LambdaExpr *S) {
- VisitExpr(S);
+ // Do not recursively visit the children of this expression. Profiling the
+ // body would result in unnecessary work, and is not safe to do during
+ // deserialization.
+ VisitStmtNoChildren(S);
+
// C++20 [temp.over.link]p5:
// Two lambda-expressions are never considered equivalent.
VisitDecl(S->getLambdaClass());
diff --git a/contrib/llvm-project/clang/lib/AST/TemplateBase.cpp b/contrib/llvm-project/clang/lib/AST/TemplateBase.cpp
index 619ce42f9dd1..e0f5916a9a0b 100644
--- a/contrib/llvm-project/clang/lib/AST/TemplateBase.cpp
+++ b/contrib/llvm-project/clang/lib/AST/TemplateBase.cpp
@@ -59,15 +59,17 @@ static void printIntegral(const TemplateArgument &TemplArg, raw_ostream &Out,
const Type *T = TemplArg.getIntegralType().getTypePtr();
const llvm::APSInt &Val = TemplArg.getAsIntegral();
- if (const EnumType *ET = T->getAs<EnumType>()) {
- for (const EnumConstantDecl* ECD : ET->getDecl()->enumerators()) {
- // In Sema::CheckTemplateArugment, enum template arguments value are
- // extended to the size of the integer underlying the enum type. This
- // may create a size difference between the enum value and template
- // argument value, requiring isSameValue here instead of operator==.
- if (llvm::APSInt::isSameValue(ECD->getInitVal(), Val)) {
- ECD->printQualifiedName(Out, Policy);
- return;
+ if (Policy.UseEnumerators) {
+ if (const EnumType *ET = T->getAs<EnumType>()) {
+ for (const EnumConstantDecl *ECD : ET->getDecl()->enumerators()) {
+ // In Sema::CheckTemplateArugment, enum template arguments value are
+ // extended to the size of the integer underlying the enum type. This
+ // may create a size difference between the enum value and template
+ // argument value, requiring isSameValue here instead of operator==.
+ if (llvm::APSInt::isSameValue(ECD->getInitVal(), Val)) {
+ ECD->printQualifiedName(Out, Policy);
+ return;
+ }
}
}
}
@@ -434,7 +436,7 @@ void TemplateArgument::print(const PrintingPolicy &Policy, raw_ostream &Out,
NamedDecl *ND = getAsDecl();
if (getParamTypeForDecl()->isRecordType()) {
if (auto *TPO = dyn_cast<TemplateParamObjectDecl>(ND)) {
- TPO->printAsInit(Out);
+ TPO->printAsInit(Out, Policy);
break;
}
}
@@ -615,6 +617,17 @@ ASTTemplateArgumentListInfo::Create(const ASTContext &C,
return new (Mem) ASTTemplateArgumentListInfo(List);
}
+const ASTTemplateArgumentListInfo *
+ASTTemplateArgumentListInfo::Create(const ASTContext &C,
+ const ASTTemplateArgumentListInfo *List) {
+ if (!List)
+ return nullptr;
+ std::size_t size =
+ totalSizeToAlloc<TemplateArgumentLoc>(List->getNumTemplateArgs());
+ void *Mem = C.Allocate(size, alignof(ASTTemplateArgumentListInfo));
+ return new (Mem) ASTTemplateArgumentListInfo(List);
+}
+
ASTTemplateArgumentListInfo::ASTTemplateArgumentListInfo(
const TemplateArgumentListInfo &Info) {
LAngleLoc = Info.getLAngleLoc();
@@ -626,6 +639,17 @@ ASTTemplateArgumentListInfo::ASTTemplateArgumentListInfo(
new (&ArgBuffer[i]) TemplateArgumentLoc(Info[i]);
}
+ASTTemplateArgumentListInfo::ASTTemplateArgumentListInfo(
+ const ASTTemplateArgumentListInfo *Info) {
+ LAngleLoc = Info->getLAngleLoc();
+ RAngleLoc = Info->getRAngleLoc();
+ NumTemplateArgs = Info->getNumTemplateArgs();
+
+ TemplateArgumentLoc *ArgBuffer = getTrailingObjects<TemplateArgumentLoc>();
+ for (unsigned i = 0; i != NumTemplateArgs; ++i)
+ new (&ArgBuffer[i]) TemplateArgumentLoc((*Info)[i]);
+}
+
void ASTTemplateKWAndArgsInfo::initializeFrom(
SourceLocation TemplateKWLoc, const TemplateArgumentListInfo &Info,
TemplateArgumentLoc *OutArgArray) {
diff --git a/contrib/llvm-project/clang/lib/AST/TemplateName.cpp b/contrib/llvm-project/clang/lib/AST/TemplateName.cpp
index 05d7d58b71c4..11dc3d2e1985 100644
--- a/contrib/llvm-project/clang/lib/AST/TemplateName.cpp
+++ b/contrib/llvm-project/clang/lib/AST/TemplateName.cpp
@@ -13,6 +13,7 @@
#include "clang/AST/TemplateName.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclBase.h"
+#include "clang/AST/DeclCXX.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DependenceFlags.h"
#include "clang/AST/NestedNameSpecifier.h"
@@ -76,12 +77,18 @@ TemplateName::TemplateName(SubstTemplateTemplateParmPackStorage *Storage)
: Storage(Storage) {}
TemplateName::TemplateName(QualifiedTemplateName *Qual) : Storage(Qual) {}
TemplateName::TemplateName(DependentTemplateName *Dep) : Storage(Dep) {}
+TemplateName::TemplateName(UsingShadowDecl *Using) : Storage(Using) {}
bool TemplateName::isNull() const { return Storage.isNull(); }
TemplateName::NameKind TemplateName::getKind() const {
- if (Storage.is<TemplateDecl *>())
+ if (auto *ND = Storage.dyn_cast<Decl *>()) {
+ if (isa<UsingShadowDecl>(ND))
+ return UsingTemplate;
+ assert(isa<TemplateDecl>(ND));
return Template;
+ }
+
if (Storage.is<DependentTemplateName *>())
return DependentTemplate;
if (Storage.is<QualifiedTemplateName *>())
@@ -99,15 +106,23 @@ TemplateName::NameKind TemplateName::getKind() const {
}
TemplateDecl *TemplateName::getAsTemplateDecl() const {
- if (TemplateDecl *Template = Storage.dyn_cast<TemplateDecl *>())
- return Template;
+ if (Decl *TemplateOrUsing = Storage.dyn_cast<Decl *>()) {
+ if (UsingShadowDecl *USD = dyn_cast<UsingShadowDecl>(TemplateOrUsing))
+ return cast<TemplateDecl>(USD->getTargetDecl());
+
+ assert(isa<TemplateDecl>(TemplateOrUsing));
+ return cast<TemplateDecl>(TemplateOrUsing);
+ }
if (QualifiedTemplateName *QTN = getAsQualifiedTemplateName())
- return QTN->getTemplateDecl();
+ return QTN->getUnderlyingTemplate().getAsTemplateDecl();
if (SubstTemplateTemplateParmStorage *sub = getAsSubstTemplateTemplateParm())
return sub->getReplacement().getAsTemplateDecl();
+ if (UsingShadowDecl *USD = getAsUsingShadowDecl())
+ return cast<TemplateDecl>(USD->getTargetDecl());
+
return nullptr;
}
@@ -153,6 +168,15 @@ DependentTemplateName *TemplateName::getAsDependentTemplateName() const {
return Storage.dyn_cast<DependentTemplateName *>();
}
+UsingShadowDecl *TemplateName::getAsUsingShadowDecl() const {
+ if (Decl *D = Storage.dyn_cast<Decl *>())
+ if (UsingShadowDecl *USD = dyn_cast<UsingShadowDecl>(D))
+ return USD;
+ if (QualifiedTemplateName *QTN = getAsQualifiedTemplateName())
+ return QTN->getUnderlyingTemplate().getAsUsingShadowDecl();
+ return nullptr;
+}
+
TemplateName TemplateName::getNameToSubstitute() const {
TemplateDecl *Decl = getAsTemplateDecl();
@@ -222,7 +246,22 @@ bool TemplateName::containsUnexpandedParameterPack() const {
void TemplateName::print(raw_ostream &OS, const PrintingPolicy &Policy,
Qualified Qual) const {
- if (TemplateDecl *Template = Storage.dyn_cast<TemplateDecl *>())
+ auto Kind = getKind();
+ TemplateDecl *Template = nullptr;
+ if (Kind == TemplateName::Template || Kind == TemplateName::UsingTemplate) {
+ // After `namespace ns { using std::vector }`, what is the fully-qualified
+ // name of the UsingTemplateName `vector` within ns?
+ //
+ // - ns::vector (the qualified name of the using-shadow decl)
+ // - std::vector (the qualified name of the underlying template decl)
+ //
+ // Similar to the UsingType behavior, using declarations are used to import
+ // names more often than to export them, thus using the original name is
+ // most useful in this case.
+ Template = getAsTemplateDecl();
+ }
+
+ if (Template)
if (Policy.CleanUglifiedParameters &&
isa<TemplateTemplateParmDecl>(Template) && Template->getIdentifier())
OS << Template->getIdentifier()->deuglifiedName();
@@ -236,14 +275,15 @@ void TemplateName::print(raw_ostream &OS, const PrintingPolicy &Policy,
if (Qual == Qualified::Fully &&
getDependence() !=
TemplateNameDependenceScope::DependentInstantiation) {
- QTN->getTemplateDecl()->printQualifiedName(OS, Policy);
+ QTN->getUnderlyingTemplate().getAsTemplateDecl()->printQualifiedName(
+ OS, Policy);
return;
}
if (Qual == Qualified::AsWritten)
QTN->getQualifier()->print(OS, Policy);
if (QTN->hasTemplateKeyword())
OS << "template ";
- OS << *QTN->getDecl();
+ OS << *QTN->getUnderlyingTemplate().getAsTemplateDecl();
} else if (DependentTemplateName *DTN = getAsDependentTemplateName()) {
if (Qual == Qualified::AsWritten && DTN->getQualifier())
DTN->getQualifier()->print(OS, Policy);
@@ -262,6 +302,7 @@ void TemplateName::print(raw_ostream &OS, const PrintingPolicy &Policy,
else if (AssumedTemplateStorage *Assumed = getAsAssumedTemplateName()) {
Assumed->getDeclName().print(OS, Policy);
} else {
+ assert(getKind() == TemplateName::OverloadedTemplate);
OverloadedTemplateStorage *OTS = getAsOverloadedTemplate();
(*OTS->begin())->printName(OS);
}
diff --git a/contrib/llvm-project/clang/lib/AST/TextNodeDumper.cpp b/contrib/llvm-project/clang/lib/AST/TextNodeDumper.cpp
index 67c934847c7f..79e9fa6ab86f 100644
--- a/contrib/llvm-project/clang/lib/AST/TextNodeDumper.cpp
+++ b/contrib/llvm-project/clang/lib/AST/TextNodeDumper.cpp
@@ -283,6 +283,8 @@ void TextNodeDumper::Visit(const Decl *D) {
OS << " constexpr";
if (FD->isConsteval())
OS << " consteval";
+ if (FD->isMultiVersion())
+ OS << " multiversion";
}
if (!isa<FunctionDecl>(*D)) {
@@ -898,12 +900,17 @@ void TextNodeDumper::VisitIntegralTemplateArgument(const TemplateArgument &TA) {
}
void TextNodeDumper::VisitTemplateTemplateArgument(const TemplateArgument &TA) {
+ if (TA.getAsTemplate().getKind() == TemplateName::UsingTemplate)
+ OS << " using";
OS << " template ";
TA.getAsTemplate().dump(OS);
}
void TextNodeDumper::VisitTemplateExpansionTemplateArgument(
const TemplateArgument &TA) {
+ if (TA.getAsTemplateOrTemplatePattern().getKind() ==
+ TemplateName::UsingTemplate)
+ OS << " using";
OS << " template expansion ";
TA.getAsTemplateOrTemplatePattern().dump(OS);
}
@@ -1573,10 +1580,18 @@ void TextNodeDumper::VisitAutoType(const AutoType *T) {
}
}
+void TextNodeDumper::VisitDeducedTemplateSpecializationType(
+ const DeducedTemplateSpecializationType *T) {
+ if (T->getTemplateName().getKind() == TemplateName::UsingTemplate)
+ OS << " using";
+}
+
void TextNodeDumper::VisitTemplateSpecializationType(
const TemplateSpecializationType *T) {
if (T->isTypeAlias())
OS << " alias";
+ if (T->getTemplateName().getKind() == TemplateName::UsingTemplate)
+ OS << " using";
OS << " ";
T->getTemplateName().dump(OS);
}
@@ -1666,6 +1681,9 @@ void TextNodeDumper::VisitFunctionDecl(const FunctionDecl *D) {
if (D->isTrivial())
OS << " trivial";
+ if (D->isIneligibleOrNotSelected())
+ OS << (isa<CXXDestructorDecl>(D) ? " not_selected" : " ineligible");
+
if (const auto *FPT = D->getType()->getAs<FunctionProtoType>()) {
FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
switch (EPI.ExceptionSpec.Type) {
@@ -2353,3 +2371,9 @@ void TextNodeDumper::VisitBlockDecl(const BlockDecl *D) {
void TextNodeDumper::VisitConceptDecl(const ConceptDecl *D) {
dumpName(D);
}
+
+void TextNodeDumper::VisitCompoundStmt(const CompoundStmt *S) {
+ VisitStmt(S);
+ if (S->hasStoredFPFeatures())
+ printFPOptions(S->getStoredFPFeatures());
+}
diff --git a/contrib/llvm-project/clang/lib/AST/Type.cpp b/contrib/llvm-project/clang/lib/AST/Type.cpp
index adfe8a9462b3..0f168a518707 100644
--- a/contrib/llvm-project/clang/lib/AST/Type.cpp
+++ b/contrib/llvm-project/clang/lib/AST/Type.cpp
@@ -1898,8 +1898,14 @@ bool Type::hasAutoForTrailingReturnType() const {
bool Type::hasIntegerRepresentation() const {
if (const auto *VT = dyn_cast<VectorType>(CanonicalType))
return VT->getElementType()->isIntegerType();
- else
- return isIntegerType();
+ if (CanonicalType->isVLSTBuiltinType()) {
+ const auto *VT = cast<BuiltinType>(CanonicalType);
+ return VT->getKind() == BuiltinType::SveBool ||
+ (VT->getKind() >= BuiltinType::SveInt8 &&
+ VT->getKind() <= BuiltinType::SveUint64);
+ }
+
+ return isIntegerType();
}
/// Determine whether this type is an integral type.
@@ -2104,6 +2110,11 @@ bool Type::hasUnsignedIntegerRepresentation() const {
return VT->getElementType()->isUnsignedIntegerOrEnumerationType();
if (const auto *VT = dyn_cast<MatrixType>(CanonicalType))
return VT->getElementType()->isUnsignedIntegerOrEnumerationType();
+ if (CanonicalType->isVLSTBuiltinType()) {
+ const auto *VT = cast<BuiltinType>(CanonicalType);
+ return VT->getKind() >= BuiltinType::SveUint8 &&
+ VT->getKind() <= BuiltinType::SveUint64;
+ }
return isUnsignedIntegerOrEnumerationType();
}
@@ -2494,6 +2505,25 @@ bool QualType::isTriviallyCopyableType(const ASTContext &Context) const {
return false;
}
+bool QualType::isTriviallyRelocatableType(const ASTContext &Context) const {
+ QualType BaseElementType = Context.getBaseElementType(*this);
+
+ if (BaseElementType->isIncompleteType()) {
+ return false;
+ } else if (const auto *RD = BaseElementType->getAsRecordDecl()) {
+ return RD->canPassInRegisters();
+ } else {
+ switch (isNonTrivialToPrimitiveDestructiveMove()) {
+ case PCK_Trivial:
+ return !isDestructedType();
+ case PCK_ARCStrong:
+ return true;
+ default:
+ return false;
+ }
+ }
+}
+
bool QualType::isNonWeakInMRRWithObjCWeak(const ASTContext &Context) const {
return !Context.getLangOpts().ObjCAutoRefCount &&
Context.getLangOpts().ObjCWeak &&
@@ -3155,6 +3185,8 @@ StringRef FunctionType::getNameForCallConv(CallingConv CC) {
case CC_AAPCS: return "aapcs";
case CC_AAPCS_VFP: return "aapcs-vfp";
case CC_AArch64VectorCall: return "aarch64_vector_pcs";
+ case CC_AArch64SVEPCS: return "aarch64_sve_pcs";
+ case CC_AMDGPUKernelCall: return "amdgpu_kernel";
case CC_IntelOclBicc: return "intel_ocl_bicc";
case CC_SpirFunction: return "spir_function";
case CC_OpenCLKernel: return "opencl_kernel";
@@ -3181,12 +3213,15 @@ FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
FunctionTypeBits.Variadic = epi.Variadic;
FunctionTypeBits.HasTrailingReturn = epi.HasTrailingReturn;
- // Fill in the extra trailing bitfields if present.
- if (hasExtraBitfields(epi.ExceptionSpec.Type)) {
+ if (epi.requiresFunctionProtoTypeExtraBitfields()) {
+ FunctionTypeBits.HasExtraBitfields = true;
auto &ExtraBits = *getTrailingObjects<FunctionTypeExtraBitfields>();
- ExtraBits.NumExceptionType = epi.ExceptionSpec.Exceptions.size();
+ ExtraBits = FunctionTypeExtraBitfields();
+ } else {
+ FunctionTypeBits.HasExtraBitfields = false;
}
+
// Fill in the trailing argument array.
auto *argSlot = getTrailingObjects<QualType>();
for (unsigned i = 0; i != getNumParams(); ++i) {
@@ -3197,6 +3232,9 @@ FunctionProtoType::FunctionProtoType(QualType result, ArrayRef<QualType> params,
// Fill in the exception type array if present.
if (getExceptionSpecType() == EST_Dynamic) {
+ auto &ExtraBits = *getTrailingObjects<FunctionTypeExtraBitfields>();
+ ExtraBits.NumExceptionType = epi.ExceptionSpec.Exceptions.size();
+
assert(hasExtraBitfields() && "missing trailing extra bitfields!");
auto *exnSlot =
reinterpret_cast<QualType *>(getTrailingObjects<ExceptionType>());
@@ -3300,7 +3338,6 @@ CanThrowResult FunctionProtoType::canThrow() const {
switch (getExceptionSpecType()) {
case EST_Unparsed:
case EST_Unevaluated:
- case EST_Uninstantiated:
llvm_unreachable("should not call this with unresolved exception specs");
case EST_DynamicNone:
@@ -3322,6 +3359,7 @@ CanThrowResult FunctionProtoType::canThrow() const {
return CT_Can;
return CT_Dependent;
+ case EST_Uninstantiated:
case EST_DependentNoexcept:
return CT_Dependent;
}
@@ -3590,6 +3628,8 @@ bool AttributedType::isCallingConv() const {
case attr::SwiftAsyncCall:
case attr::VectorCall:
case attr::AArch64VectorPcs:
+ case attr::AArch64SVEPcs:
+ case attr::AMDGPUKernelCall:
case attr::Pascal:
case attr::MSABI:
case attr::SysVABI:
@@ -3675,7 +3715,8 @@ TemplateSpecializationType::TemplateSpecializationType(
"Use DependentTemplateSpecializationType for dependent template-name");
assert((T.getKind() == TemplateName::Template ||
T.getKind() == TemplateName::SubstTemplateTemplateParm ||
- T.getKind() == TemplateName::SubstTemplateTemplateParmPack) &&
+ T.getKind() == TemplateName::SubstTemplateTemplateParmPack ||
+ T.getKind() == TemplateName::UsingTemplate) &&
"Unexpected template name for TemplateSpecializationType");
auto *TemplateArgs = reinterpret_cast<TemplateArgument *>(this + 1);
diff --git a/contrib/llvm-project/clang/lib/AST/TypeLoc.cpp b/contrib/llvm-project/clang/lib/AST/TypeLoc.cpp
index 13aa54c48f66..cf5e2f979230 100644
--- a/contrib/llvm-project/clang/lib/AST/TypeLoc.cpp
+++ b/contrib/llvm-project/clang/lib/AST/TypeLoc.cpp
@@ -507,6 +507,10 @@ SourceRange AttributedTypeLoc::getLocalSourceRange() const {
return getAttr() ? getAttr()->getRange() : SourceRange();
}
+SourceRange BTFTagAttributedTypeLoc::getLocalSourceRange() const {
+ return getAttr() ? getAttr()->getRange() : SourceRange();
+}
+
void TypeOfTypeLoc::initializeLocal(ASTContext &Context,
SourceLocation Loc) {
TypeofLikeTypeLoc<TypeOfTypeLoc, TypeOfType, TypeOfTypeLocInfo>
@@ -683,6 +687,10 @@ namespace {
return Visit(T.getModifiedLoc());
}
+ TypeLoc VisitBTFTagAttributedTypeLoc(BTFTagAttributedTypeLoc T) {
+ return Visit(T.getWrappedLoc());
+ }
+
TypeLoc VisitMacroQualifiedTypeLoc(MacroQualifiedTypeLoc T) {
return Visit(T.getInnerLoc());
}
diff --git a/contrib/llvm-project/clang/lib/AST/TypePrinter.cpp b/contrib/llvm-project/clang/lib/AST/TypePrinter.cpp
index 6e827530f41b..6b13d3806037 100644
--- a/contrib/llvm-project/clang/lib/AST/TypePrinter.cpp
+++ b/contrib/llvm-project/clang/lib/AST/TypePrinter.cpp
@@ -80,6 +80,21 @@ namespace {
}
};
+ class DefaultTemplateArgsPolicyRAII {
+ PrintingPolicy &Policy;
+ bool Old;
+
+ public:
+ explicit DefaultTemplateArgsPolicyRAII(PrintingPolicy &Policy)
+ : Policy(Policy), Old(Policy.SuppressDefaultTemplateArgs) {
+ Policy.SuppressDefaultTemplateArgs = false;
+ }
+
+ ~DefaultTemplateArgsPolicyRAII() {
+ Policy.SuppressDefaultTemplateArgs = Old;
+ }
+ };
+
class ElaboratedTypePolicyRAII {
PrintingPolicy &Policy;
bool SuppressTagKeyword;
@@ -235,6 +250,7 @@ bool TypePrinter::canPrefixQualifiers(const Type *T,
case Type::Pipe:
case Type::BitInt:
case Type::DependentBitInt:
+ case Type::BTFTagAttributed:
CanPrefixQualifiers = true;
break;
@@ -282,6 +298,7 @@ bool TypePrinter::canPrefixQualifiers(const Type *T,
// address_space attribute.
const auto *AttrTy = cast<AttributedType>(UnderlyingType);
CanPrefixQualifiers = AttrTy->getAttrKind() == attr::AddressSpace;
+ break;
}
}
@@ -959,6 +976,12 @@ void TypePrinter::printFunctionAfter(const FunctionType::ExtInfo &Info,
case CC_AArch64VectorCall:
OS << "__attribute__((aarch64_vector_pcs))";
break;
+ case CC_AArch64SVEPCS:
+ OS << "__attribute__((aarch64_sve_pcs))";
+ break;
+ case CC_AMDGPUKernelCall:
+ OS << "__attribute__((amdgpu_kernel))";
+ break;
case CC_IntelOclBicc:
OS << " __attribute__((intel_ocl_bicc))";
break;
@@ -1462,6 +1485,7 @@ void TypePrinter::printTemplateId(const TemplateSpecializationType *T,
IncludeStrongLifetimeRAII Strong(Policy);
TemplateDecl *TD = T->getTemplateName().getAsTemplateDecl();
+ // FIXME: Null TD never excercised in test suite.
if (FullyQualify && TD) {
if (!Policy.SuppressScope)
AppendScope(TD->getDeclContext(), OS, TD->getDeclName());
@@ -1471,7 +1495,9 @@ void TypePrinter::printTemplateId(const TemplateSpecializationType *T,
T->getTemplateName().print(OS, Policy);
}
- printTemplateArgumentList(OS, T->template_arguments(), Policy);
+ DefaultTemplateArgsPolicyRAII TemplateArgs(Policy);
+ const TemplateParameterList *TPL = TD ? TD->getTemplateParameters() : nullptr;
+ printTemplateArgumentList(OS, T->template_arguments(), Policy, TPL);
spaceBeforePlaceHolder(OS);
}
@@ -1680,6 +1706,15 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
if (T->getAttrKind() == attr::AddressSpace)
return;
+ if (T->getAttrKind() == attr::AnnotateType) {
+ // FIXME: Print the attribute arguments once we have a way to retrieve these
+ // here. For the meantime, we just print `[[clang::annotate_type(...)]]`
+ // without the arguments so that we know at least that we had _some_
+ // annotation on the type.
+ OS << " [[clang::annotate_type(...)]]";
+ return;
+ }
+
OS << " __attribute__((";
switch (T->getAttrKind()) {
#define TYPE_ATTR(NAME)
@@ -1688,6 +1723,9 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
#include "clang/Basic/AttrList.inc"
llvm_unreachable("non-type attribute attached to type");
+ case attr::BTFTypeTag:
+ llvm_unreachable("BTFTypeTag attribute handled separately");
+
case attr::OpenCLPrivateAddressSpace:
case attr::OpenCLGlobalAddressSpace:
case attr::OpenCLGlobalDeviceAddressSpace:
@@ -1714,6 +1752,7 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
case attr::UPtr:
case attr::AddressSpace:
case attr::CmseNSCall:
+ case attr::AnnotateType:
llvm_unreachable("This attribute should have been handled already");
case attr::NSReturnsRetained:
@@ -1745,6 +1784,8 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
break;
}
case attr::AArch64VectorPcs: OS << "aarch64_vector_pcs"; break;
+ case attr::AArch64SVEPcs: OS << "aarch64_sve_pcs"; break;
+ case attr::AMDGPUKernelCall: OS << "amdgpu_kernel"; break;
case attr::IntelOclBicc: OS << "inteloclbicc"; break;
case attr::PreserveMost:
OS << "preserve_most";
@@ -1762,13 +1803,21 @@ void TypePrinter::printAttributedAfter(const AttributedType *T,
case attr::ArmMveStrictPolymorphism:
OS << "__clang_arm_mve_strict_polymorphism";
break;
- case attr::BTFTypeTag:
- OS << "btf_type_tag";
- break;
}
OS << "))";
}
+void TypePrinter::printBTFTagAttributedBefore(const BTFTagAttributedType *T,
+ raw_ostream &OS) {
+ printBefore(T->getWrappedType(), OS);
+ OS << " btf_type_tag(" << T->getAttr()->getBTFTypeTag() << ")";
+}
+
+void TypePrinter::printBTFTagAttributedAfter(const BTFTagAttributedType *T,
+ raw_ostream &OS) {
+ printAfter(T->getWrappedType(), OS);
+}
+
void TypePrinter::printObjCInterfaceBefore(const ObjCInterfaceType *T,
raw_ostream &OS) {
OS << T->getDecl()->getName();
@@ -2294,3 +2343,9 @@ void QualType::getAsStringInternal(const Type *ty, Qualifiers qs,
std::string str = std::string(StrOS.str());
buffer.swap(str);
}
+
+raw_ostream &clang::operator<<(raw_ostream &OS, QualType QT) {
+ SplitQualType S = QT.split();
+ TypePrinter(LangOptions()).print(S.Ty, S.Quals, OS, /*PlaceHolder=*/"");
+ return OS;
+}
diff --git a/contrib/llvm-project/clang/lib/AST/VTableBuilder.cpp b/contrib/llvm-project/clang/lib/AST/VTableBuilder.cpp
index 24586d6b70d4..3d64cb17fa9c 100644
--- a/contrib/llvm-project/clang/lib/AST/VTableBuilder.cpp
+++ b/contrib/llvm-project/clang/lib/AST/VTableBuilder.cpp
@@ -17,6 +17,7 @@
#include "clang/AST/RecordLayout.h"
#include "clang/Basic/TargetInfo.h"
#include "llvm/ADT/SetOperations.h"
+#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/Format.h"
#include "llvm/Support/raw_ostream.h"
@@ -3113,8 +3114,7 @@ static void dumpMicrosoftThunkAdjustment(const ThunkInfo &TI, raw_ostream &Out,
if (!ContinueFirstLine)
Out << LinePrefix;
Out << "[return adjustment (to type '"
- << TI.Method->getReturnType().getCanonicalType().getAsString()
- << "'): ";
+ << TI.Method->getReturnType().getCanonicalType() << "'): ";
if (R.Virtual.Microsoft.VBPtrOffset)
Out << "vbptr at offset " << R.Virtual.Microsoft.VBPtrOffset << ", ";
if (R.Virtual.Microsoft.VBIndex)
@@ -3384,10 +3384,8 @@ static bool rebucketPaths(VPtrInfoVector &Paths) {
// sorted vector to implement a multiset to form the buckets. Note that the
// ordering is based on pointers, but it doesn't change our output order. The
// current algorithm is designed to match MSVC 2012's names.
- llvm::SmallVector<std::reference_wrapper<VPtrInfo>, 2> PathsSorted;
- PathsSorted.reserve(Paths.size());
- for (auto& P : Paths)
- PathsSorted.push_back(*P);
+ llvm::SmallVector<std::reference_wrapper<VPtrInfo>, 2> PathsSorted(
+ llvm::make_pointee_range(Paths));
llvm::sort(PathsSorted, [](const VPtrInfo &LHS, const VPtrInfo &RHS) {
return LHS.MangledPath < RHS.MangledPath;
});
diff --git a/contrib/llvm-project/clang/lib/ASTMatchers/ASTMatchFinder.cpp b/contrib/llvm-project/clang/lib/ASTMatchers/ASTMatchFinder.cpp
index b19a7fe3be04..ac8e4eccad8e 100644
--- a/contrib/llvm-project/clang/lib/ASTMatchers/ASTMatchFinder.cpp
+++ b/contrib/llvm-project/clang/lib/ASTMatchers/ASTMatchFinder.cpp
@@ -21,6 +21,7 @@
#include "clang/AST/RecursiveASTVisitor.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringMap.h"
+#include "llvm/Support/PrettyStackTrace.h"
#include "llvm/Support/Timer.h"
#include <deque>
#include <memory>
@@ -428,7 +429,7 @@ public:
}
void onStartOfTranslationUnit() {
- const bool EnableCheckProfiling = Options.CheckProfiling.hasValue();
+ const bool EnableCheckProfiling = Options.CheckProfiling.has_value();
TimeBucketRegion Timer;
for (MatchCallback *MC : Matchers->AllCallbacks) {
if (EnableCheckProfiling)
@@ -438,7 +439,7 @@ public:
}
void onEndOfTranslationUnit() {
- const bool EnableCheckProfiling = Options.CheckProfiling.hasValue();
+ const bool EnableCheckProfiling = Options.CheckProfiling.has_value();
TimeBucketRegion Timer;
for (MatchCallback *MC : Matchers->AllCallbacks) {
if (EnableCheckProfiling)
@@ -765,6 +766,191 @@ private:
bool TraversingASTNodeNotAsIs = false;
bool TraversingASTChildrenNotSpelledInSource = false;
+ class CurMatchData {
+// We don't have enough free low bits in 32bit builds to discriminate 8 pointer
+// types in PointerUnion. so split the union in 2 using a free bit from the
+// callback pointer.
+#define CMD_TYPES_0 \
+ const QualType *, const TypeLoc *, const NestedNameSpecifier *, \
+ const NestedNameSpecifierLoc *
+#define CMD_TYPES_1 \
+ const CXXCtorInitializer *, const TemplateArgumentLoc *, const Attr *, \
+ const DynTypedNode *
+
+#define IMPL(Index) \
+ template <typename NodeType> \
+ typename std::enable_if_t< \
+ llvm::is_one_of<const NodeType *, CMD_TYPES_##Index>::value> \
+ SetCallbackAndRawNode(const MatchCallback *CB, const NodeType &N) { \
+ assertEmpty(); \
+ Callback.setPointerAndInt(CB, Index); \
+ Node##Index = &N; \
+ } \
+ \
+ template <typename T> \
+ typename std::enable_if_t< \
+ llvm::is_one_of<const T *, CMD_TYPES_##Index>::value, const T *> \
+ getNode() const { \
+ assertHoldsState(); \
+ return Callback.getInt() == (Index) ? Node##Index.dyn_cast<const T *>() \
+ : nullptr; \
+ }
+
+ public:
+ CurMatchData() : Node0(nullptr) {}
+
+ IMPL(0)
+ IMPL(1)
+
+ const MatchCallback *getCallback() const { return Callback.getPointer(); }
+
+ void SetBoundNodes(const BoundNodes &BN) {
+ assertHoldsState();
+ BNodes = &BN;
+ }
+
+ void clearBoundNodes() {
+ assertHoldsState();
+ BNodes = nullptr;
+ }
+
+ const BoundNodes *getBoundNodes() const {
+ assertHoldsState();
+ return BNodes;
+ }
+
+ void reset() {
+ assertHoldsState();
+ Callback.setPointerAndInt(nullptr, 0);
+ Node0 = nullptr;
+ }
+
+ private:
+ void assertHoldsState() const {
+ assert(Callback.getPointer() != nullptr && !Node0.isNull());
+ }
+
+ void assertEmpty() const {
+ assert(Callback.getPointer() == nullptr && Node0.isNull() &&
+ BNodes == nullptr);
+ }
+
+ llvm::PointerIntPair<const MatchCallback *, 1> Callback;
+ union {
+ llvm::PointerUnion<CMD_TYPES_0> Node0;
+ llvm::PointerUnion<CMD_TYPES_1> Node1;
+ };
+ const BoundNodes *BNodes = nullptr;
+
+#undef CMD_TYPES_0
+#undef CMD_TYPES_1
+#undef IMPL
+ } CurMatchState;
+
+ struct CurMatchRAII {
+ template <typename NodeType>
+ CurMatchRAII(MatchASTVisitor &MV, const MatchCallback *CB,
+ const NodeType &NT)
+ : MV(MV) {
+ MV.CurMatchState.SetCallbackAndRawNode(CB, NT);
+ }
+
+ ~CurMatchRAII() { MV.CurMatchState.reset(); }
+
+ private:
+ MatchASTVisitor &MV;
+ };
+
+public:
+ class TraceReporter : llvm::PrettyStackTraceEntry {
+ static void dumpNode(const ASTContext &Ctx, const DynTypedNode &Node,
+ raw_ostream &OS) {
+ if (const auto *D = Node.get<Decl>()) {
+ OS << D->getDeclKindName() << "Decl ";
+ if (const auto *ND = dyn_cast<NamedDecl>(D)) {
+ ND->printQualifiedName(OS);
+ OS << " : ";
+ } else
+ OS << ": ";
+ D->getSourceRange().print(OS, Ctx.getSourceManager());
+ } else if (const auto *S = Node.get<Stmt>()) {
+ OS << S->getStmtClassName() << " : ";
+ S->getSourceRange().print(OS, Ctx.getSourceManager());
+ } else if (const auto *T = Node.get<Type>()) {
+ OS << T->getTypeClassName() << "Type : ";
+ QualType(T, 0).print(OS, Ctx.getPrintingPolicy());
+ } else if (const auto *QT = Node.get<QualType>()) {
+ OS << "QualType : ";
+ QT->print(OS, Ctx.getPrintingPolicy());
+ } else {
+ OS << Node.getNodeKind().asStringRef() << " : ";
+ Node.getSourceRange().print(OS, Ctx.getSourceManager());
+ }
+ }
+
+ static void dumpNodeFromState(const ASTContext &Ctx,
+ const CurMatchData &State, raw_ostream &OS) {
+ if (const DynTypedNode *MatchNode = State.getNode<DynTypedNode>()) {
+ dumpNode(Ctx, *MatchNode, OS);
+ } else if (const auto *QT = State.getNode<QualType>()) {
+ dumpNode(Ctx, DynTypedNode::create(*QT), OS);
+ } else if (const auto *TL = State.getNode<TypeLoc>()) {
+ dumpNode(Ctx, DynTypedNode::create(*TL), OS);
+ } else if (const auto *NNS = State.getNode<NestedNameSpecifier>()) {
+ dumpNode(Ctx, DynTypedNode::create(*NNS), OS);
+ } else if (const auto *NNSL = State.getNode<NestedNameSpecifierLoc>()) {
+ dumpNode(Ctx, DynTypedNode::create(*NNSL), OS);
+ } else if (const auto *CtorInit = State.getNode<CXXCtorInitializer>()) {
+ dumpNode(Ctx, DynTypedNode::create(*CtorInit), OS);
+ } else if (const auto *TAL = State.getNode<TemplateArgumentLoc>()) {
+ dumpNode(Ctx, DynTypedNode::create(*TAL), OS);
+ } else if (const auto *At = State.getNode<Attr>()) {
+ dumpNode(Ctx, DynTypedNode::create(*At), OS);
+ }
+ }
+
+ public:
+ TraceReporter(const MatchASTVisitor &MV) : MV(MV) {}
+ void print(raw_ostream &OS) const override {
+ const CurMatchData &State = MV.CurMatchState;
+ const MatchCallback *CB = State.getCallback();
+ if (!CB) {
+ OS << "ASTMatcher: Not currently matching\n";
+ return;
+ }
+
+ assert(MV.ActiveASTContext &&
+ "ActiveASTContext should be set if there is a matched callback");
+
+ ASTContext &Ctx = MV.getASTContext();
+
+ if (const BoundNodes *Nodes = State.getBoundNodes()) {
+ OS << "ASTMatcher: Processing '" << CB->getID() << "' against:\n\t";
+ dumpNodeFromState(Ctx, State, OS);
+ const BoundNodes::IDToNodeMap &Map = Nodes->getMap();
+ if (Map.empty()) {
+ OS << "\nNo bound nodes\n";
+ return;
+ }
+ OS << "\n--- Bound Nodes Begin ---\n";
+ for (const auto &Item : Map) {
+ OS << " " << Item.first << " - { ";
+ dumpNode(Ctx, Item.second, OS);
+ OS << " }\n";
+ }
+ OS << "--- Bound Nodes End ---\n";
+ } else {
+ OS << "ASTMatcher: Matching '" << CB->getID() << "' against:\n\t";
+ dumpNodeFromState(Ctx, State, OS);
+ OS << '\n';
+ }
+ }
+
+ private:
+ const MatchASTVisitor &MV;
+ };
+
+private:
struct ASTNodeNotSpelledInSourceScope {
ASTNodeNotSpelledInSourceScope(MatchASTVisitor *V, bool B)
: MV(V), MB(V->TraversingASTNodeNotSpelledInSource) {
@@ -824,14 +1010,15 @@ private:
/// Used by \c matchDispatch() below.
template <typename T, typename MC>
void matchWithoutFilter(const T &Node, const MC &Matchers) {
- const bool EnableCheckProfiling = Options.CheckProfiling.hasValue();
+ const bool EnableCheckProfiling = Options.CheckProfiling.has_value();
TimeBucketRegion Timer;
for (const auto &MP : Matchers) {
if (EnableCheckProfiling)
Timer.setBucket(&TimeByBucket[MP.second->getID()]);
BoundNodesTreeBuilder Builder;
+ CurMatchRAII RAII(*this, MP.second, Node);
if (MP.first.matches(Node, this, &Builder)) {
- MatchVisitor Visitor(ActiveASTContext, MP.second);
+ MatchVisitor Visitor(*this, ActiveASTContext, MP.second);
Builder.visitMatches(&Visitor);
}
}
@@ -846,7 +1033,7 @@ private:
if (Filter.empty())
return;
- const bool EnableCheckProfiling = Options.CheckProfiling.hasValue();
+ const bool EnableCheckProfiling = Options.CheckProfiling.has_value();
TimeBucketRegion Timer;
auto &Matchers = this->Matchers->DeclOrStmt;
for (unsigned short I : Filter) {
@@ -862,8 +1049,9 @@ private:
continue;
}
+ CurMatchRAII RAII(*this, MP.second, DynNode);
if (MP.first.matches(DynNode, this, &Builder)) {
- MatchVisitor Visitor(ActiveASTContext, MP.second);
+ MatchVisitor Visitor(*this, ActiveASTContext, MP.second);
Builder.visitMatches(&Visitor);
}
}
@@ -1049,18 +1237,31 @@ private:
// Implements a BoundNodesTree::Visitor that calls a MatchCallback with
// the aggregated bound nodes for each match.
class MatchVisitor : public BoundNodesTreeBuilder::Visitor {
+ struct CurBoundScope {
+ CurBoundScope(MatchASTVisitor::CurMatchData &State, const BoundNodes &BN)
+ : State(State) {
+ State.SetBoundNodes(BN);
+ }
+
+ ~CurBoundScope() { State.clearBoundNodes(); }
+
+ private:
+ MatchASTVisitor::CurMatchData &State;
+ };
+
public:
- MatchVisitor(ASTContext* Context,
- MatchFinder::MatchCallback* Callback)
- : Context(Context),
- Callback(Callback) {}
+ MatchVisitor(MatchASTVisitor &MV, ASTContext *Context,
+ MatchFinder::MatchCallback *Callback)
+ : State(MV.CurMatchState), Context(Context), Callback(Callback) {}
void visitMatch(const BoundNodes& BoundNodesView) override {
TraversalKindScope RAII(*Context, Callback->getCheckTraversalKind());
+ CurBoundScope RAII2(State, BoundNodesView);
Callback->run(MatchFinder::MatchResult(BoundNodesView, Context));
}
private:
+ MatchASTVisitor::CurMatchData &State;
ASTContext* Context;
MatchFinder::MatchCallback* Callback;
};
@@ -1470,6 +1671,7 @@ void MatchFinder::match(const clang::DynTypedNode &Node, ASTContext &Context) {
void MatchFinder::matchAST(ASTContext &Context) {
internal::MatchASTVisitor Visitor(&Matchers, Options);
+ internal::MatchASTVisitor::TraceReporter StackTrace(Visitor);
Visitor.set_active_ast_context(&Context);
Visitor.onStartOfTranslationUnit();
Visitor.TraverseAST(Context);
diff --git a/contrib/llvm-project/clang/lib/ASTMatchers/ASTMatchersInternal.cpp b/contrib/llvm-project/clang/lib/ASTMatchers/ASTMatchersInternal.cpp
index 815058f32de4..4c438f9e4879 100644
--- a/contrib/llvm-project/clang/lib/ASTMatchers/ASTMatchersInternal.cpp
+++ b/contrib/llvm-project/clang/lib/ASTMatchers/ASTMatchersInternal.cpp
@@ -917,6 +917,7 @@ const internal::VariadicDynCastAllOfMatcher<Stmt, AsmStmt> asmStmt;
const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBoolLiteralExpr>
cxxBoolLiteral;
const internal::VariadicDynCastAllOfMatcher<Stmt, StringLiteral> stringLiteral;
+const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCStringLiteral> objcStringLiteral;
const internal::VariadicDynCastAllOfMatcher<Stmt, CharacterLiteral>
characterLiteral;
const internal::VariadicDynCastAllOfMatcher<Stmt, IntegerLiteral>
diff --git a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.cpp b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.cpp
index 40db70e6f4a5..70b5953fe969 100644
--- a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.cpp
+++ b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.cpp
@@ -148,7 +148,7 @@ clang::ast_matchers::dynamic::internal::ArgTypeTraits<
for (StringRef OrFlag : Split) {
if (llvm::Optional<llvm::Regex::RegexFlags> NextFlag =
getRegexFlag(OrFlag.trim()))
- Flag = Flag.getValueOr(llvm::Regex::NoFlags) | *NextFlag;
+ Flag = Flag.value_or(llvm::Regex::NoFlags) | *NextFlag;
else
return None;
}
diff --git a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.h b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.h
index 3e9c4f31b84d..bab36e5db47e 100644
--- a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.h
+++ b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Marshallers.h
@@ -174,7 +174,7 @@ public:
return Value.isString();
}
static bool hasCorrectValue(const VariantValue& Value) {
- return getAttrKind(Value.getString()).hasValue();
+ return getAttrKind(Value.getString()).has_value();
}
static attr::Kind get(const VariantValue &Value) {
@@ -204,7 +204,7 @@ public:
return Value.isString();
}
static bool hasCorrectValue(const VariantValue& Value) {
- return getCastKind(Value.getString()).hasValue();
+ return getCastKind(Value.getString()).has_value();
}
static CastKind get(const VariantValue &Value) {
@@ -227,7 +227,7 @@ public:
return Value.isString();
}
static bool hasCorrectValue(const VariantValue& Value) {
- return getFlags(Value.getString()).hasValue();
+ return getFlags(Value.getString()).has_value();
}
static llvm::Regex::RegexFlags get(const VariantValue &Value) {
@@ -254,7 +254,7 @@ public:
return Value.isString();
}
static bool hasCorrectValue(const VariantValue& Value) {
- return getClauseKind(Value.getString()).hasValue();
+ return getClauseKind(Value.getString()).has_value();
}
static OpenMPClauseKind get(const VariantValue &Value) {
@@ -285,7 +285,7 @@ public:
return Value.isString();
}
static bool hasCorrectValue(const VariantValue& Value) {
- return getUnaryOrTypeTraitKind(Value.getString()).hasValue();
+ return getUnaryOrTypeTraitKind(Value.getString()).has_value();
}
static UnaryExprOrTypeTrait get(const VariantValue &Value) {
diff --git a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Parser.cpp b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Parser.cpp
index cab1476acf94..6470df27e6e2 100644
--- a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Parser.cpp
+++ b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Parser.cpp
@@ -397,9 +397,9 @@ bool Parser::parseIdentifierPrefixImpl(VariantValue *Value) {
assert(NamedValue.isMatcher());
llvm::Optional<DynTypedMatcher> Result =
NamedValue.getMatcher().getSingleMatcher();
- if (Result.hasValue()) {
+ if (Result) {
llvm::Optional<DynTypedMatcher> Bound = Result->tryBind(BindID);
- if (Bound.hasValue()) {
+ if (Bound) {
*Value = VariantMatcher::SingleMatcher(*Bound);
return true;
}
@@ -645,7 +645,7 @@ bool Parser::parseMatcherExpressionImpl(const TokenInfo &NameToken,
Tokenizer->SkipNewlines();
{
- ScopedContextEntry SCE(this, Ctor.getValueOr(nullptr));
+ ScopedContextEntry SCE(this, Ctor.value_or(nullptr));
while (Tokenizer->nextTokenKind() != TokenInfo::TK_Eof) {
if (Tokenizer->nextTokenKind() == TokenInfo::TK_CloseParen) {
@@ -917,7 +917,7 @@ Parser::parseMatcherExpression(StringRef &Code, Sema *S,
}
llvm::Optional<DynTypedMatcher> Result =
Value.getMatcher().getSingleMatcher();
- if (!Result.hasValue()) {
+ if (!Result) {
Error->addError(SourceRange(), Error->ET_ParserOverloadedType)
<< Value.getTypeAsString();
}
diff --git a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Registry.cpp b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Registry.cpp
index 47db6b51966a..0a58652884ee 100644
--- a/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Registry.cpp
+++ b/contrib/llvm-project/clang/lib/ASTMatchers/Dynamic/Registry.cpp
@@ -250,6 +250,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(forEachLambdaCapture);
REGISTER_MATCHER(forEachOverridden);
REGISTER_MATCHER(forEachSwitchCase);
+ REGISTER_MATCHER(forEachTemplateArgument);
REGISTER_MATCHER(forField);
REGISTER_MATCHER(forFunction);
REGISTER_MATCHER(forStmt);
@@ -424,6 +425,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(isExpr);
REGISTER_MATCHER(isExternC);
REGISTER_MATCHER(isFinal);
+ REGISTER_MATCHER(isPrivateKind);
REGISTER_MATCHER(isFirstPrivateKind);
REGISTER_MATCHER(isImplicit);
REGISTER_MATCHER(isInStdNamespace);
@@ -503,6 +505,7 @@ RegistryMaps::RegistryMaps() {
REGISTER_MATCHER(objcObjectPointerType);
REGISTER_MATCHER(objcPropertyDecl);
REGISTER_MATCHER(objcProtocolDecl);
+ REGISTER_MATCHER(objcStringLiteral);
REGISTER_MATCHER(objcThrowStmt);
REGISTER_MATCHER(objcTryStmt);
REGISTER_MATCHER(ofClass);
@@ -795,9 +798,9 @@ VariantMatcher Registry::constructBoundMatcher(MatcherCtor Ctor,
if (Out.isNull()) return Out;
llvm::Optional<DynTypedMatcher> Result = Out.getSingleMatcher();
- if (Result.hasValue()) {
+ if (Result) {
llvm::Optional<DynTypedMatcher> Bound = Result->tryBind(BindID);
- if (Bound.hasValue()) {
+ if (Bound) {
return VariantMatcher::SingleMatcher(*Bound);
}
}
diff --git a/contrib/llvm-project/clang/lib/Analysis/AnalysisDeclContext.cpp b/contrib/llvm-project/clang/lib/Analysis/AnalysisDeclContext.cpp
index 06f1f813aeed..f20924604f64 100644
--- a/contrib/llvm-project/clang/lib/Analysis/AnalysisDeclContext.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/AnalysisDeclContext.cpp
@@ -352,7 +352,7 @@ std::string AnalysisDeclContext::getFunctionName(const Decl *D) {
for (const auto &P : FD->parameters()) {
if (P != *FD->param_begin())
OS << ", ";
- OS << P->getType().getAsString();
+ OS << P->getType();
}
OS << ')';
}
diff --git a/contrib/llvm-project/clang/lib/Analysis/BodyFarm.cpp b/contrib/llvm-project/clang/lib/Analysis/BodyFarm.cpp
index 92c236ed9080..23d37b881069 100644
--- a/contrib/llvm-project/clang/lib/Analysis/BodyFarm.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/BodyFarm.cpp
@@ -20,6 +20,7 @@
#include "clang/AST/ExprObjC.h"
#include "clang/AST/NestedNameSpecifier.h"
#include "clang/Analysis/CodeInjector.h"
+#include "clang/Basic/Builtins.h"
#include "clang/Basic/OperatorKinds.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/Debug.h"
@@ -86,6 +87,9 @@ public:
ImplicitCastExpr *makeImplicitCast(const Expr *Arg, QualType Ty,
CastKind CK = CK_LValueToRValue);
+ /// Create a cast to reference type.
+ CastExpr *makeReferenceCast(const Expr *Arg, QualType Ty);
+
/// Create an Objective-C bool literal.
ObjCBoolLiteralExpr *makeObjCBool(bool Val);
@@ -130,7 +134,8 @@ BinaryOperator *ASTMaker::makeComparison(const Expr *LHS, const Expr *RHS,
}
CompoundStmt *ASTMaker::makeCompound(ArrayRef<Stmt *> Stmts) {
- return CompoundStmt::Create(C, Stmts, SourceLocation(), SourceLocation());
+ return CompoundStmt::Create(C, Stmts, FPOptionsOverride(), SourceLocation(),
+ SourceLocation());
}
DeclRefExpr *ASTMaker::makeDeclRefExpr(
@@ -173,6 +178,16 @@ ImplicitCastExpr *ASTMaker::makeImplicitCast(const Expr *Arg, QualType Ty,
/* FPFeatures */ FPOptionsOverride());
}
+CastExpr *ASTMaker::makeReferenceCast(const Expr *Arg, QualType Ty) {
+ assert(Ty->isReferenceType());
+ return CXXStaticCastExpr::Create(
+ C, Ty.getNonReferenceType(),
+ Ty->isLValueReferenceType() ? VK_LValue : VK_XValue, CK_NoOp,
+ const_cast<Expr *>(Arg), /*CXXCastPath=*/nullptr,
+ /*Written=*/C.getTrivialTypeSourceInfo(Ty), FPOptionsOverride(),
+ SourceLocation(), SourceLocation(), SourceRange());
+}
+
Expr *ASTMaker::makeIntegralCast(const Expr *Arg, QualType Ty) {
if (Arg->getType() == Ty)
return const_cast<Expr*>(Arg);
@@ -296,6 +311,22 @@ static CallExpr *create_call_once_lambda_call(ASTContext &C, ASTMaker M,
/*FPFeatures=*/FPOptionsOverride());
}
+/// Create a fake body for 'std::move' or 'std::forward'. This is just:
+///
+/// \code
+/// return static_cast<return_type>(param);
+/// \endcode
+static Stmt *create_std_move_forward(ASTContext &C, const FunctionDecl *D) {
+ LLVM_DEBUG(llvm::dbgs() << "Generating body for std::move / std::forward\n");
+
+ ASTMaker M(C);
+
+ QualType ReturnType = D->getType()->castAs<FunctionType>()->getReturnType();
+ Expr *Param = M.makeDeclRefExpr(D->getParamDecl(0));
+ Expr *Cast = M.makeReferenceCast(Param, ReturnType);
+ return M.makeReturn(Cast);
+}
+
/// Create a fake body for std::call_once.
/// Emulates the following function body:
///
@@ -667,7 +698,7 @@ static Stmt *create_OSAtomicCompareAndSwap(ASTContext &C, const FunctionDecl *D)
Stmt *BodyFarm::getBody(const FunctionDecl *D) {
Optional<Stmt *> &Val = Bodies[D];
- if (Val.hasValue())
+ if (Val)
return Val.getValue();
Val = nullptr;
@@ -681,8 +712,20 @@ Stmt *BodyFarm::getBody(const FunctionDecl *D) {
FunctionFarmer FF;
- if (Name.startswith("OSAtomicCompareAndSwap") ||
- Name.startswith("objc_atomicCompareAndSwap")) {
+ if (unsigned BuiltinID = D->getBuiltinID()) {
+ switch (BuiltinID) {
+ case Builtin::BIas_const:
+ case Builtin::BIforward:
+ case Builtin::BImove:
+ case Builtin::BImove_if_noexcept:
+ FF = create_std_move_forward;
+ break;
+ default:
+ FF = nullptr;
+ break;
+ }
+ } else if (Name.startswith("OSAtomicCompareAndSwap") ||
+ Name.startswith("objc_atomicCompareAndSwap")) {
FF = create_OSAtomicCompareAndSwap;
} else if (Name == "call_once" && D->getDeclContext()->isStdNamespace()) {
FF = create_call_once;
@@ -695,7 +738,7 @@ Stmt *BodyFarm::getBody(const FunctionDecl *D) {
if (FF) { Val = FF(C, D); }
else if (Injector) { Val = Injector->getBody(D); }
- return Val.getValue();
+ return *Val;
}
static const ObjCIvarDecl *findBackingIvar(const ObjCPropertyDecl *Prop) {
@@ -830,7 +873,7 @@ Stmt *BodyFarm::getBody(const ObjCMethodDecl *D) {
return nullptr;
Optional<Stmt *> &Val = Bodies[D];
- if (Val.hasValue())
+ if (Val)
return Val.getValue();
Val = nullptr;
@@ -858,5 +901,5 @@ Stmt *BodyFarm::getBody(const ObjCMethodDecl *D) {
Val = createObjCPropertyGetter(C, D);
- return Val.getValue();
+ return *Val;
}
diff --git a/contrib/llvm-project/clang/lib/Analysis/CFG.cpp b/contrib/llvm-project/clang/lib/Analysis/CFG.cpp
index 8246854dc1b5..614d94ae31a6 100644
--- a/contrib/llvm-project/clang/lib/Analysis/CFG.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/CFG.cpp
@@ -441,8 +441,7 @@ reverse_children::reverse_children(Stmt *S) {
}
// Default case for all other statements.
- for (Stmt *SubStmt : S->children())
- childrenBuf.push_back(SubStmt);
+ llvm::append_range(childrenBuf, S->children());
// This needs to be done *after* childrenBuf has been populated.
children = childrenBuf;
@@ -565,6 +564,7 @@ private:
AddStmtChoice asc);
CFGBlock *VisitCXXThrowExpr(CXXThrowExpr *T);
CFGBlock *VisitCXXTryStmt(CXXTryStmt *S);
+ CFGBlock *VisitCXXTypeidExpr(CXXTypeidExpr *S, AddStmtChoice asc);
CFGBlock *VisitDeclStmt(DeclStmt *DS);
CFGBlock *VisitDeclSubExpr(DeclStmt *DS);
CFGBlock *VisitDefaultStmt(DefaultStmt *D);
@@ -598,6 +598,8 @@ private:
CFGBlock *VisitObjCMessageExpr(ObjCMessageExpr *E, AddStmtChoice asc);
CFGBlock *VisitPseudoObjectExpr(PseudoObjectExpr *E);
CFGBlock *VisitReturnStmt(Stmt *S);
+ CFGBlock *VisitCoroutineSuspendExpr(CoroutineSuspendExpr *S,
+ AddStmtChoice asc);
CFGBlock *VisitSEHExceptStmt(SEHExceptStmt *S);
CFGBlock *VisitSEHFinallyStmt(SEHFinallyStmt *S);
CFGBlock *VisitSEHLeaveStmt(SEHLeaveStmt *S);
@@ -608,6 +610,7 @@ private:
AddStmtChoice asc);
CFGBlock *VisitUnaryOperator(UnaryOperator *U, AddStmtChoice asc);
CFGBlock *VisitWhileStmt(WhileStmt *W);
+ CFGBlock *VisitArrayInitLoopExpr(ArrayInitLoopExpr *A, AddStmtChoice asc);
CFGBlock *Visit(Stmt *S, AddStmtChoice asc = AddStmtChoice::NotAlwaysAdd,
bool ExternallyDestructed = false);
@@ -2020,13 +2023,8 @@ LocalScope* CFGBuilder::addLocalScopeForVarDecl(VarDecl *VD,
return Scope;
// Check if variable is local.
- switch (VD->getStorageClass()) {
- case SC_None:
- case SC_Auto:
- case SC_Register:
- break;
- default: return Scope;
- }
+ if (!VD->hasLocalStorage())
+ return Scope;
if (BuildOpts.AddImplicitDtors) {
if (!hasTrivialDestructor(VD) || BuildOpts.AddScopes) {
@@ -2223,6 +2221,9 @@ CFGBlock *CFGBuilder::Visit(Stmt * S, AddStmtChoice asc,
case Stmt::CXXTryStmtClass:
return VisitCXXTryStmt(cast<CXXTryStmt>(S));
+ case Stmt::CXXTypeidExprClass:
+ return VisitCXXTypeidExpr(cast<CXXTypeidExpr>(S), asc);
+
case Stmt::CXXForRangeStmtClass:
return VisitCXXForRangeStmt(cast<CXXForRangeStmt>(S));
@@ -2303,6 +2304,10 @@ CFGBlock *CFGBuilder::Visit(Stmt * S, AddStmtChoice asc,
case Stmt::CoreturnStmtClass:
return VisitReturnStmt(S);
+ case Stmt::CoyieldExprClass:
+ case Stmt::CoawaitExprClass:
+ return VisitCoroutineSuspendExpr(cast<CoroutineSuspendExpr>(S), asc);
+
case Stmt::SEHExceptStmtClass:
return VisitSEHExceptStmt(cast<SEHExceptStmt>(S));
@@ -2330,6 +2335,9 @@ CFGBlock *CFGBuilder::Visit(Stmt * S, AddStmtChoice asc,
case Stmt::WhileStmtClass:
return VisitWhileStmt(cast<WhileStmt>(S));
+
+ case Stmt::ArrayInitLoopExprClass:
+ return VisitArrayInitLoopExpr(cast<ArrayInitLoopExpr>(S), asc);
}
}
@@ -3143,8 +3151,40 @@ CFGBlock *CFGBuilder::VisitReturnStmt(Stmt *S) {
return Visit(O, AddStmtChoice::AlwaysAdd, /*ExternallyDestructed=*/true);
return Block;
}
- // co_return
- return VisitChildren(S);
+
+ CoreturnStmt *CRS = cast<CoreturnStmt>(S);
+ auto *B = Block;
+ if (CFGBlock *R = Visit(CRS->getPromiseCall()))
+ B = R;
+
+ if (Expr *RV = CRS->getOperand())
+ if (RV->getType()->isVoidType() && !isa<InitListExpr>(RV))
+ // A non-initlist void expression.
+ if (CFGBlock *R = Visit(RV))
+ B = R;
+
+ return B;
+}
+
+CFGBlock *CFGBuilder::VisitCoroutineSuspendExpr(CoroutineSuspendExpr *E,
+ AddStmtChoice asc) {
+ // We're modelling the pre-coro-xform CFG. Thus just evalate the various
+ // active components of the co_await or co_yield. Note we do not model the
+ // edge from the builtin_suspend to the exit node.
+ if (asc.alwaysAdd(*this, E)) {
+ autoCreateBlock();
+ appendStmt(Block, E);
+ }
+ CFGBlock *B = Block;
+ if (auto *R = Visit(E->getResumeExpr()))
+ B = R;
+ if (auto *R = Visit(E->getSuspendExpr()))
+ B = R;
+ if (auto *R = Visit(E->getReadyExpr()))
+ B = R;
+ if (auto *R = Visit(E->getCommonExpr()))
+ B = R;
+ return B;
}
CFGBlock *CFGBuilder::VisitSEHExceptStmt(SEHExceptStmt *ES) {
@@ -3849,6 +3889,27 @@ CFGBlock *CFGBuilder::VisitWhileStmt(WhileStmt *W) {
return EntryConditionBlock;
}
+CFGBlock *CFGBuilder::VisitArrayInitLoopExpr(ArrayInitLoopExpr *A,
+ AddStmtChoice asc) {
+ if (asc.alwaysAdd(*this, A)) {
+ autoCreateBlock();
+ appendStmt(Block, A);
+ }
+
+ CFGBlock *B = Block;
+
+ if (CFGBlock *R = Visit(A->getSubExpr()))
+ B = R;
+
+ auto *OVE = dyn_cast<OpaqueValueExpr>(A->getCommonExpr());
+ assert(OVE && "ArrayInitLoopExpr->getCommonExpr() should be wrapped in an "
+ "OpaqueValueExpr!");
+ if (CFGBlock *R = Visit(OVE->getSourceExpr()))
+ B = R;
+
+ return B;
+}
+
CFGBlock *CFGBuilder::VisitObjCAtCatchStmt(ObjCAtCatchStmt *CS) {
// ObjCAtCatchStmt are treated like labels, so they are the first statement
// in a block.
@@ -3988,6 +4049,25 @@ CFGBlock *CFGBuilder::VisitCXXThrowExpr(CXXThrowExpr *T) {
return VisitStmt(T, AddStmtChoice::AlwaysAdd);
}
+CFGBlock *CFGBuilder::VisitCXXTypeidExpr(CXXTypeidExpr *S, AddStmtChoice asc) {
+ if (asc.alwaysAdd(*this, S)) {
+ autoCreateBlock();
+ appendStmt(Block, S);
+ }
+
+ // C++ [expr.typeid]p3:
+ // When typeid is applied to an expression other than an glvalue of a
+ // polymorphic class type [...] [the] expression is an unevaluated
+ // operand. [...]
+ // We add only potentially evaluated statements to the block to avoid
+ // CFG generation for unevaluated operands.
+ if (S && !S->isTypeDependent() && S->isPotentiallyEvaluated())
+ return VisitChildren(S);
+
+ // Return block without CFG for unevaluated operands.
+ return Block;
+}
+
CFGBlock *CFGBuilder::VisitDoStmt(DoStmt *D) {
CFGBlock *LoopSuccessor = nullptr;
@@ -5611,12 +5691,10 @@ static void print_elem(raw_ostream &OS, StmtPrinterHelper &Helper,
if (Optional<CFGConstructor> CE = E.getAs<CFGConstructor>()) {
print_construction_context(OS, Helper, CE->getConstructionContext());
}
- OS << ", " << CCE->getType().getAsString() << ")";
+ OS << ", " << CCE->getType() << ")";
} else if (const CastExpr *CE = dyn_cast<CastExpr>(S)) {
- OS << " (" << CE->getStmtClassName() << ", "
- << CE->getCastKindName()
- << ", " << CE->getType().getAsString()
- << ")";
+ OS << " (" << CE->getStmtClassName() << ", " << CE->getCastKindName()
+ << ", " << CE->getType() << ")";
}
// Expressions need a newline.
@@ -6130,17 +6208,13 @@ Stmt *CFGBlock::getTerminatorCondition(bool StripParens) {
// CFG Graphviz Visualization
//===----------------------------------------------------------------------===//
-#ifndef NDEBUG
-static StmtPrinterHelper* GraphHelper;
-#endif
+static StmtPrinterHelper *GraphHelper;
void CFG::viewCFG(const LangOptions &LO) const {
-#ifndef NDEBUG
StmtPrinterHelper H(this, LO);
GraphHelper = &H;
llvm::ViewGraph(this,"CFG");
GraphHelper = nullptr;
-#endif
}
namespace llvm {
@@ -6149,8 +6223,7 @@ template<>
struct DOTGraphTraits<const CFG*> : public DefaultDOTGraphTraits {
DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {}
- static std::string getNodeLabel(const CFGBlock *Node, const CFG* Graph) {
-#ifndef NDEBUG
+ static std::string getNodeLabel(const CFGBlock *Node, const CFG *Graph) {
std::string OutSStr;
llvm::raw_string_ostream Out(OutSStr);
print_block(Out,Graph, *Node, *GraphHelper, false, false);
@@ -6166,9 +6239,6 @@ struct DOTGraphTraits<const CFG*> : public DefaultDOTGraphTraits {
}
return OutStr;
-#else
- return {};
-#endif
}
};
diff --git a/contrib/llvm-project/clang/lib/Analysis/CalledOnceCheck.cpp b/contrib/llvm-project/clang/lib/Analysis/CalledOnceCheck.cpp
index 661f7b999f2b..c1d1d7b89ec7 100644
--- a/contrib/llvm-project/clang/lib/Analysis/CalledOnceCheck.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/CalledOnceCheck.cpp
@@ -1065,7 +1065,7 @@ private:
// 'swift_async' goes first and overrides anything else.
if (auto ConventionalAsync =
isConventionalSwiftAsync(Function, ParamIndex)) {
- return ConventionalAsync.getValue();
+ return *ConventionalAsync;
}
return shouldBeCalledOnce(Function->getParamDecl(ParamIndex)) ||
@@ -1082,7 +1082,7 @@ private:
// 'swift_async' goes first and overrides anything else.
if (auto ConventionalAsync = isConventionalSwiftAsync(Method, ParamIndex)) {
- return ConventionalAsync.getValue();
+ return *ConventionalAsync;
}
const ParmVarDecl *Parameter = Method->getParamDecl(ParamIndex);
diff --git a/contrib/llvm-project/clang/lib/Analysis/ExprMutationAnalyzer.cpp b/contrib/llvm-project/clang/lib/Analysis/ExprMutationAnalyzer.cpp
index e9ff5e5e8765..e3bb902b1fe9 100644
--- a/contrib/llvm-project/clang/lib/Analysis/ExprMutationAnalyzer.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/ExprMutationAnalyzer.cpp
@@ -39,8 +39,13 @@ AST_MATCHER_P(Expr, maybeEvalCommaExpr, ast_matchers::internal::Matcher<Expr>,
return InnerMatcher.matches(*Result, Finder, Builder);
}
-AST_MATCHER_P(Expr, canResolveToExpr, ast_matchers::internal::Matcher<Expr>,
+AST_MATCHER_P(Stmt, canResolveToExpr, ast_matchers::internal::Matcher<Stmt>,
InnerMatcher) {
+ auto *Exp = dyn_cast<Expr>(&Node);
+ if (!Exp) {
+ return stmt().matches(Node, Finder, Builder);
+ }
+
auto DerivedToBase = [](const ast_matchers::internal::Matcher<Expr> &Inner) {
return implicitCastExpr(anyOf(hasCastKind(CK_DerivedToBase),
hasCastKind(CK_UncheckedDerivedToBase)),
@@ -71,7 +76,7 @@ AST_MATCHER_P(Expr, canResolveToExpr, ast_matchers::internal::Matcher<Expr>,
IgnoreDerivedToBase(ConditionalOperator),
IgnoreDerivedToBase(ElvisOperator))));
- return ComplexMatcher.matches(Node, Finder, Builder);
+ return ComplexMatcher.matches(*Exp, Finder, Builder);
}
// Similar to 'hasAnyArgument', but does not work because 'InitListExpr' does
@@ -194,12 +199,13 @@ const Stmt *ExprMutationAnalyzer::tryEachDeclRef(const Decl *Dec,
return nullptr;
}
-bool ExprMutationAnalyzer::isUnevaluated(const Expr *Exp) {
- return selectFirst<Expr>(
+bool ExprMutationAnalyzer::isUnevaluated(const Stmt *Exp, const Stmt &Stm,
+ ASTContext &Context) {
+ return selectFirst<Stmt>(
NodeID<Expr>::value,
match(
findAll(
- expr(canResolveToExpr(equalsNode(Exp)),
+ stmt(canResolveToExpr(equalsNode(Exp)),
anyOf(
// `Exp` is part of the underlying expression of
// decltype/typeof if it has an ancestor of
@@ -225,6 +231,10 @@ bool ExprMutationAnalyzer::isUnevaluated(const Expr *Exp) {
Stm, Context)) != nullptr;
}
+bool ExprMutationAnalyzer::isUnevaluated(const Expr *Exp) {
+ return isUnevaluated(Exp, Stm, Context);
+}
+
const Stmt *
ExprMutationAnalyzer::findExprMutation(ArrayRef<BoundNodes> Matches) {
return tryEachMatch<Expr>(Matches, this, &ExprMutationAnalyzer::findMutation);
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/ControlFlowContext.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/ControlFlowContext.cpp
index 3ad2ed640cc1..fe9907a7c99b 100644
--- a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/ControlFlowContext.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/ControlFlowContext.cpp
@@ -33,11 +33,13 @@ buildStmtToBasicBlockMap(const CFG &Cfg) {
for (const CFGElement &Element : *Block) {
auto Stmt = Element.getAs<CFGStmt>();
- if (!Stmt.hasValue())
+ if (!Stmt)
continue;
StmtToBlock[Stmt.getValue().getStmt()] = Block;
}
+ if (const Stmt *TerminatorStmt = Block->getTerminatorStmt())
+ StmtToBlock[TerminatorStmt] = Block;
}
return StmtToBlock;
}
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowAnalysisContext.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowAnalysisContext.cpp
new file mode 100644
index 000000000000..e08fc71c51dc
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowAnalysisContext.cpp
@@ -0,0 +1,340 @@
+//===-- DataflowAnalysisContext.cpp -----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a DataflowAnalysisContext class that owns objects that
+// encompass the state of a program and stores context that is used during
+// dataflow analysis.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/FlowSensitive/DataflowAnalysisContext.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/Analysis/FlowSensitive/Value.h"
+#include <cassert>
+#include <memory>
+#include <utility>
+
+namespace clang {
+namespace dataflow {
+
+StorageLocation &
+DataflowAnalysisContext::getStableStorageLocation(QualType Type) {
+ assert(!Type.isNull());
+ if (Type->isStructureOrClassType() || Type->isUnionType()) {
+ // FIXME: Explore options to avoid eager initialization of fields as some of
+ // them might not be needed for a particular analysis.
+ llvm::DenseMap<const ValueDecl *, StorageLocation *> FieldLocs;
+ for (const FieldDecl *Field : getObjectFields(Type))
+ FieldLocs.insert({Field, &getStableStorageLocation(Field->getType())});
+ return takeOwnership(
+ std::make_unique<AggregateStorageLocation>(Type, std::move(FieldLocs)));
+ }
+ return takeOwnership(std::make_unique<ScalarStorageLocation>(Type));
+}
+
+StorageLocation &
+DataflowAnalysisContext::getStableStorageLocation(const VarDecl &D) {
+ if (auto *Loc = getStorageLocation(D))
+ return *Loc;
+ auto &Loc = getStableStorageLocation(D.getType());
+ setStorageLocation(D, Loc);
+ return Loc;
+}
+
+StorageLocation &
+DataflowAnalysisContext::getStableStorageLocation(const Expr &E) {
+ if (auto *Loc = getStorageLocation(E))
+ return *Loc;
+ auto &Loc = getStableStorageLocation(E.getType());
+ setStorageLocation(E, Loc);
+ return Loc;
+}
+
+PointerValue &
+DataflowAnalysisContext::getOrCreateNullPointerValue(QualType PointeeType) {
+ assert(!PointeeType.isNull());
+ auto CanonicalPointeeType = PointeeType.getCanonicalType();
+ auto Res = NullPointerVals.try_emplace(CanonicalPointeeType, nullptr);
+ if (Res.second) {
+ auto &PointeeLoc = getStableStorageLocation(CanonicalPointeeType);
+ Res.first->second =
+ &takeOwnership(std::make_unique<PointerValue>(PointeeLoc));
+ }
+ return *Res.first->second;
+}
+
+static std::pair<BoolValue *, BoolValue *>
+makeCanonicalBoolValuePair(BoolValue &LHS, BoolValue &RHS) {
+ auto Res = std::make_pair(&LHS, &RHS);
+ if (&RHS < &LHS)
+ std::swap(Res.first, Res.second);
+ return Res;
+}
+
+BoolValue &DataflowAnalysisContext::getOrCreateConjunction(BoolValue &LHS,
+ BoolValue &RHS) {
+ if (&LHS == &RHS)
+ return LHS;
+
+ auto Res = ConjunctionVals.try_emplace(makeCanonicalBoolValuePair(LHS, RHS),
+ nullptr);
+ if (Res.second)
+ Res.first->second =
+ &takeOwnership(std::make_unique<ConjunctionValue>(LHS, RHS));
+ return *Res.first->second;
+}
+
+BoolValue &DataflowAnalysisContext::getOrCreateDisjunction(BoolValue &LHS,
+ BoolValue &RHS) {
+ if (&LHS == &RHS)
+ return LHS;
+
+ auto Res = DisjunctionVals.try_emplace(makeCanonicalBoolValuePair(LHS, RHS),
+ nullptr);
+ if (Res.second)
+ Res.first->second =
+ &takeOwnership(std::make_unique<DisjunctionValue>(LHS, RHS));
+ return *Res.first->second;
+}
+
+BoolValue &DataflowAnalysisContext::getOrCreateNegation(BoolValue &Val) {
+ auto Res = NegationVals.try_emplace(&Val, nullptr);
+ if (Res.second)
+ Res.first->second = &takeOwnership(std::make_unique<NegationValue>(Val));
+ return *Res.first->second;
+}
+
+BoolValue &DataflowAnalysisContext::getOrCreateImplication(BoolValue &LHS,
+ BoolValue &RHS) {
+ return &LHS == &RHS ? getBoolLiteralValue(true)
+ : getOrCreateDisjunction(getOrCreateNegation(LHS), RHS);
+}
+
+BoolValue &DataflowAnalysisContext::getOrCreateIff(BoolValue &LHS,
+ BoolValue &RHS) {
+ return &LHS == &RHS
+ ? getBoolLiteralValue(true)
+ : getOrCreateConjunction(getOrCreateImplication(LHS, RHS),
+ getOrCreateImplication(RHS, LHS));
+}
+
+AtomicBoolValue &DataflowAnalysisContext::makeFlowConditionToken() {
+ return createAtomicBoolValue();
+}
+
+void DataflowAnalysisContext::addFlowConditionConstraint(
+ AtomicBoolValue &Token, BoolValue &Constraint) {
+ auto Res = FlowConditionConstraints.try_emplace(&Token, &Constraint);
+ if (!Res.second) {
+ Res.first->second = &getOrCreateConjunction(*Res.first->second, Constraint);
+ }
+}
+
+AtomicBoolValue &
+DataflowAnalysisContext::forkFlowCondition(AtomicBoolValue &Token) {
+ auto &ForkToken = makeFlowConditionToken();
+ FlowConditionDeps[&ForkToken].insert(&Token);
+ addFlowConditionConstraint(ForkToken, Token);
+ return ForkToken;
+}
+
+AtomicBoolValue &
+DataflowAnalysisContext::joinFlowConditions(AtomicBoolValue &FirstToken,
+ AtomicBoolValue &SecondToken) {
+ auto &Token = makeFlowConditionToken();
+ FlowConditionDeps[&Token].insert(&FirstToken);
+ FlowConditionDeps[&Token].insert(&SecondToken);
+ addFlowConditionConstraint(Token,
+ getOrCreateDisjunction(FirstToken, SecondToken));
+ return Token;
+}
+
+Solver::Result
+DataflowAnalysisContext::querySolver(llvm::DenseSet<BoolValue *> Constraints) {
+ Constraints.insert(&getBoolLiteralValue(true));
+ Constraints.insert(&getOrCreateNegation(getBoolLiteralValue(false)));
+ return S->solve(std::move(Constraints));
+}
+
+bool DataflowAnalysisContext::flowConditionImplies(AtomicBoolValue &Token,
+ BoolValue &Val) {
+ // Returns true if and only if truth assignment of the flow condition implies
+ // that `Val` is also true. We prove whether or not this property holds by
+ // reducing the problem to satisfiability checking. In other words, we attempt
+ // to show that assuming `Val` is false makes the constraints induced by the
+ // flow condition unsatisfiable.
+ llvm::DenseSet<BoolValue *> Constraints = {&Token, &getOrCreateNegation(Val)};
+ llvm::DenseSet<AtomicBoolValue *> VisitedTokens;
+ addTransitiveFlowConditionConstraints(Token, Constraints, VisitedTokens);
+ return isUnsatisfiable(std::move(Constraints));
+}
+
+bool DataflowAnalysisContext::flowConditionIsTautology(AtomicBoolValue &Token) {
+ // Returns true if and only if we cannot prove that the flow condition can
+ // ever be false.
+ llvm::DenseSet<BoolValue *> Constraints = {&getOrCreateNegation(Token)};
+ llvm::DenseSet<AtomicBoolValue *> VisitedTokens;
+ addTransitiveFlowConditionConstraints(Token, Constraints, VisitedTokens);
+ return isUnsatisfiable(std::move(Constraints));
+}
+
+bool DataflowAnalysisContext::equivalentBoolValues(BoolValue &Val1,
+ BoolValue &Val2) {
+ llvm::DenseSet<BoolValue *> Constraints = {
+ &getOrCreateNegation(getOrCreateIff(Val1, Val2))};
+ return isUnsatisfiable(Constraints);
+}
+
+void DataflowAnalysisContext::addTransitiveFlowConditionConstraints(
+ AtomicBoolValue &Token, llvm::DenseSet<BoolValue *> &Constraints,
+ llvm::DenseSet<AtomicBoolValue *> &VisitedTokens) {
+ auto Res = VisitedTokens.insert(&Token);
+ if (!Res.second)
+ return;
+
+ auto ConstraintsIT = FlowConditionConstraints.find(&Token);
+ if (ConstraintsIT == FlowConditionConstraints.end()) {
+ Constraints.insert(&Token);
+ } else {
+ // Bind flow condition token via `iff` to its set of constraints:
+ // FC <=> (C1 ^ C2 ^ ...), where Ci are constraints
+ Constraints.insert(&getOrCreateIff(Token, *ConstraintsIT->second));
+ }
+
+ auto DepsIT = FlowConditionDeps.find(&Token);
+ if (DepsIT != FlowConditionDeps.end()) {
+ for (AtomicBoolValue *DepToken : DepsIT->second) {
+ addTransitiveFlowConditionConstraints(*DepToken, Constraints,
+ VisitedTokens);
+ }
+ }
+}
+
+BoolValue &DataflowAnalysisContext::substituteBoolValue(
+ BoolValue &Val,
+ llvm::DenseMap<BoolValue *, BoolValue *> &SubstitutionsCache) {
+ auto IT = SubstitutionsCache.find(&Val);
+ if (IT != SubstitutionsCache.end()) {
+ // Return memoized result of substituting this boolean value.
+ return *IT->second;
+ }
+
+ // Handle substitution on the boolean value (and its subvalues), saving the
+ // result into `SubstitutionsCache`.
+ BoolValue *Result;
+ switch (Val.getKind()) {
+ case Value::Kind::AtomicBool: {
+ Result = &Val;
+ break;
+ }
+ case Value::Kind::Negation: {
+ auto &Negation = *cast<NegationValue>(&Val);
+ auto &Sub = substituteBoolValue(Negation.getSubVal(), SubstitutionsCache);
+ Result = &getOrCreateNegation(Sub);
+ break;
+ }
+ case Value::Kind::Disjunction: {
+ auto &Disjunct = *cast<DisjunctionValue>(&Val);
+ auto &LeftSub =
+ substituteBoolValue(Disjunct.getLeftSubValue(), SubstitutionsCache);
+ auto &RightSub =
+ substituteBoolValue(Disjunct.getRightSubValue(), SubstitutionsCache);
+ Result = &getOrCreateDisjunction(LeftSub, RightSub);
+ break;
+ }
+ case Value::Kind::Conjunction: {
+ auto &Conjunct = *cast<ConjunctionValue>(&Val);
+ auto &LeftSub =
+ substituteBoolValue(Conjunct.getLeftSubValue(), SubstitutionsCache);
+ auto &RightSub =
+ substituteBoolValue(Conjunct.getRightSubValue(), SubstitutionsCache);
+ Result = &getOrCreateConjunction(LeftSub, RightSub);
+ break;
+ }
+ default:
+ llvm_unreachable("Unhandled Value Kind");
+ }
+ SubstitutionsCache[&Val] = Result;
+ return *Result;
+}
+
+BoolValue &DataflowAnalysisContext::buildAndSubstituteFlowCondition(
+ AtomicBoolValue &Token,
+ llvm::DenseMap<AtomicBoolValue *, BoolValue *> Substitutions) {
+ assert(
+ Substitutions.find(&getBoolLiteralValue(true)) == Substitutions.end() &&
+ Substitutions.find(&getBoolLiteralValue(false)) == Substitutions.end() &&
+ "Do not substitute true/false boolean literals");
+ llvm::DenseMap<BoolValue *, BoolValue *> SubstitutionsCache(
+ Substitutions.begin(), Substitutions.end());
+ return buildAndSubstituteFlowConditionWithCache(Token, SubstitutionsCache);
+}
+
+BoolValue &DataflowAnalysisContext::buildAndSubstituteFlowConditionWithCache(
+ AtomicBoolValue &Token,
+ llvm::DenseMap<BoolValue *, BoolValue *> &SubstitutionsCache) {
+ auto ConstraintsIT = FlowConditionConstraints.find(&Token);
+ if (ConstraintsIT == FlowConditionConstraints.end()) {
+ return getBoolLiteralValue(true);
+ }
+ auto DepsIT = FlowConditionDeps.find(&Token);
+ if (DepsIT != FlowConditionDeps.end()) {
+ for (AtomicBoolValue *DepToken : DepsIT->second) {
+ auto &NewDep = buildAndSubstituteFlowConditionWithCache(
+ *DepToken, SubstitutionsCache);
+ SubstitutionsCache[DepToken] = &NewDep;
+ }
+ }
+ return substituteBoolValue(*ConstraintsIT->second, SubstitutionsCache);
+}
+
+} // namespace dataflow
+} // namespace clang
+
+using namespace clang;
+
+const Expr &clang::dataflow::ignoreCFGOmittedNodes(const Expr &E) {
+ const Expr *Current = &E;
+ if (auto *EWC = dyn_cast<ExprWithCleanups>(Current)) {
+ Current = EWC->getSubExpr();
+ assert(Current != nullptr);
+ }
+ Current = Current->IgnoreParens();
+ assert(Current != nullptr);
+ return *Current;
+}
+
+const Stmt &clang::dataflow::ignoreCFGOmittedNodes(const Stmt &S) {
+ if (auto *E = dyn_cast<Expr>(&S))
+ return ignoreCFGOmittedNodes(*E);
+ return S;
+}
+
+// FIXME: Does not precisely handle non-virtual diamond inheritance. A single
+// field decl will be modeled for all instances of the inherited field.
+static void
+getFieldsFromClassHierarchy(QualType Type,
+ llvm::DenseSet<const FieldDecl *> &Fields) {
+ if (Type->isIncompleteType() || Type->isDependentType() ||
+ !Type->isRecordType())
+ return;
+
+ for (const FieldDecl *Field : Type->getAsRecordDecl()->fields())
+ Fields.insert(Field);
+ if (auto *CXXRecord = Type->getAsCXXRecordDecl())
+ for (const CXXBaseSpecifier &Base : CXXRecord->bases())
+ getFieldsFromClassHierarchy(Base.getType(), Fields);
+}
+
+/// Gets the set of all fields in the type.
+llvm::DenseSet<const FieldDecl *>
+clang::dataflow::getObjectFields(QualType Type) {
+ llvm::DenseSet<const FieldDecl *> Fields;
+ getFieldsFromClassHierarchy(Type, Fields);
+ return Fields;
+}
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp
index eca58b313761..3aea670f20aa 100644
--- a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/DataflowEnvironment.cpp
@@ -15,19 +15,28 @@
#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
#include "clang/AST/Decl.h"
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/ExprCXX.h"
#include "clang/AST/Type.h"
#include "clang/Analysis/FlowSensitive/DataflowLattice.h"
#include "clang/Analysis/FlowSensitive/StorageLocation.h"
#include "clang/Analysis/FlowSensitive/Value.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
+#include <cassert>
#include <memory>
#include <utility>
namespace clang {
namespace dataflow {
+// FIXME: convert these to parameters of the analysis or environment. Current
+// settings have been experimentaly validated, but only for a particular
+// analysis.
+static constexpr int MaxCompositeValueDepth = 3;
+static constexpr int MaxCompositeValueSize = 1000;
+
/// Returns a map consisting of key-value entries that are present in both maps.
template <typename K, typename V>
llvm::DenseMap<K, V> intersectDenseMaps(const llvm::DenseMap<K, V> &Map1,
@@ -41,25 +50,130 @@ llvm::DenseMap<K, V> intersectDenseMaps(const llvm::DenseMap<K, V> &Map1,
return Result;
}
+static bool areEquivalentIndirectionValues(Value *Val1, Value *Val2) {
+ if (auto *IndVal1 = dyn_cast<ReferenceValue>(Val1)) {
+ auto *IndVal2 = cast<ReferenceValue>(Val2);
+ return &IndVal1->getReferentLoc() == &IndVal2->getReferentLoc();
+ }
+ if (auto *IndVal1 = dyn_cast<PointerValue>(Val1)) {
+ auto *IndVal2 = cast<PointerValue>(Val2);
+ return &IndVal1->getPointeeLoc() == &IndVal2->getPointeeLoc();
+ }
+ return false;
+}
+
/// Returns true if and only if `Val1` is equivalent to `Val2`.
-static bool equivalentValues(QualType Type, Value *Val1, Value *Val2,
+static bool equivalentValues(QualType Type, Value *Val1,
+ const Environment &Env1, Value *Val2,
+ const Environment &Env2,
Environment::ValueModel &Model) {
- if (Val1 == Val2)
- return true;
+ return Val1 == Val2 || areEquivalentIndirectionValues(Val1, Val2) ||
+ Model.compareEquivalent(Type, *Val1, Env1, *Val2, Env2);
+}
- if (auto *IndVal1 = dyn_cast<IndirectionValue>(Val1)) {
- auto *IndVal2 = cast<IndirectionValue>(Val2);
- assert(IndVal1->getKind() == IndVal2->getKind());
- return &IndVal1->getPointeeLoc() == &IndVal2->getPointeeLoc();
+/// Attempts to merge distinct values `Val1` and `Val2` in `Env1` and `Env2`,
+/// respectively, of the same type `Type`. Merging generally produces a single
+/// value that (soundly) approximates the two inputs, although the actual
+/// meaning depends on `Model`.
+static Value *mergeDistinctValues(QualType Type, Value *Val1,
+ const Environment &Env1, Value *Val2,
+ const Environment &Env2,
+ Environment &MergedEnv,
+ Environment::ValueModel &Model) {
+ // Join distinct boolean values preserving information about the constraints
+ // in the respective path conditions.
+ //
+ // FIXME: Does not work for backedges, since the two (or more) paths will not
+ // have mutually exclusive conditions.
+ if (auto *Expr1 = dyn_cast<BoolValue>(Val1)) {
+ auto *Expr2 = cast<BoolValue>(Val2);
+ auto &MergedVal = MergedEnv.makeAtomicBoolValue();
+ MergedEnv.addToFlowCondition(MergedEnv.makeOr(
+ MergedEnv.makeAnd(Env1.getFlowConditionToken(),
+ MergedEnv.makeIff(MergedVal, *Expr1)),
+ MergedEnv.makeAnd(Env2.getFlowConditionToken(),
+ MergedEnv.makeIff(MergedVal, *Expr2))));
+ return &MergedVal;
+ }
+
+ // FIXME: add unit tests that cover this statement.
+ if (areEquivalentIndirectionValues(Val1, Val2)) {
+ return Val1;
+ }
+
+ // FIXME: Consider destroying `MergedValue` immediately if `ValueModel::merge`
+ // returns false to avoid storing unneeded values in `DACtx`.
+ if (Value *MergedVal = MergedEnv.createValue(Type))
+ if (Model.merge(Type, *Val1, Env1, *Val2, Env2, *MergedVal, MergedEnv))
+ return MergedVal;
+
+ return nullptr;
+}
+
+/// Initializes a global storage value.
+static void initGlobalVar(const VarDecl &D, Environment &Env) {
+ if (!D.hasGlobalStorage() ||
+ Env.getStorageLocation(D, SkipPast::None) != nullptr)
+ return;
+
+ auto &Loc = Env.createStorageLocation(D);
+ Env.setStorageLocation(D, Loc);
+ if (auto *Val = Env.createValue(D.getType()))
+ Env.setValue(Loc, *Val);
+}
+
+/// Initializes a global storage value.
+static void initGlobalVar(const Decl &D, Environment &Env) {
+ if (auto *V = dyn_cast<VarDecl>(&D))
+ initGlobalVar(*V, Env);
+}
+
+/// Initializes global storage values that are declared or referenced from
+/// sub-statements of `S`.
+// FIXME: Add support for resetting globals after function calls to enable
+// the implementation of sound analyses.
+static void initGlobalVars(const Stmt &S, Environment &Env) {
+ for (auto *Child : S.children()) {
+ if (Child != nullptr)
+ initGlobalVars(*Child, Env);
}
- return Model.compareEquivalent(Type, *Val1, *Val2);
+ if (auto *DS = dyn_cast<DeclStmt>(&S)) {
+ if (DS->isSingleDecl()) {
+ initGlobalVar(*DS->getSingleDecl(), Env);
+ } else {
+ for (auto *D : DS->getDeclGroup())
+ initGlobalVar(*D, Env);
+ }
+ } else if (auto *E = dyn_cast<DeclRefExpr>(&S)) {
+ initGlobalVar(*E->getDecl(), Env);
+ } else if (auto *E = dyn_cast<MemberExpr>(&S)) {
+ initGlobalVar(*E->getMemberDecl(), Env);
+ }
+}
+
+Environment::Environment(DataflowAnalysisContext &DACtx)
+ : DACtx(&DACtx), FlowConditionToken(&DACtx.makeFlowConditionToken()) {}
+
+Environment::Environment(const Environment &Other)
+ : DACtx(Other.DACtx), DeclToLoc(Other.DeclToLoc),
+ ExprToLoc(Other.ExprToLoc), LocToVal(Other.LocToVal),
+ MemberLocToStruct(Other.MemberLocToStruct),
+ FlowConditionToken(&DACtx->forkFlowCondition(*Other.FlowConditionToken)) {
+}
+
+Environment &Environment::operator=(const Environment &Other) {
+ Environment Copy(Other);
+ *this = std::move(Copy);
+ return *this;
}
Environment::Environment(DataflowAnalysisContext &DACtx,
const DeclContext &DeclCtx)
: Environment(DACtx) {
if (const auto *FuncDecl = dyn_cast<FunctionDecl>(&DeclCtx)) {
+ assert(FuncDecl->getBody() != nullptr);
+ initGlobalVars(*FuncDecl->getBody(), *this);
for (const auto *ParamDecl : FuncDecl->parameters()) {
assert(ParamDecl != nullptr);
auto &ParamLoc = createStorageLocation(*ParamDecl);
@@ -70,7 +184,12 @@ Environment::Environment(DataflowAnalysisContext &DACtx,
}
if (const auto *MethodDecl = dyn_cast<CXXMethodDecl>(&DeclCtx)) {
- if (!MethodDecl->isStatic()) {
+ auto *Parent = MethodDecl->getParent();
+ assert(Parent != nullptr);
+ if (Parent->isLambda())
+ MethodDecl = dyn_cast<CXXMethodDecl>(Parent->getDeclContext());
+
+ if (MethodDecl && !MethodDecl->isStatic()) {
QualType ThisPointeeType = MethodDecl->getThisObjectType();
// FIXME: Add support for union types.
if (!ThisPointeeType->isUnionType()) {
@@ -93,9 +212,7 @@ bool Environment::equivalentTo(const Environment &Other,
if (ExprToLoc != Other.ExprToLoc)
return false;
- if (LocToVal.size() != Other.LocToVal.size())
- return false;
-
+ // Compare the contents for the intersection of their domains.
for (auto &Entry : LocToVal) {
const StorageLocation *Loc = Entry.first;
assert(Loc != nullptr);
@@ -105,10 +222,10 @@ bool Environment::equivalentTo(const Environment &Other,
auto It = Other.LocToVal.find(Loc);
if (It == Other.LocToVal.end())
- return false;
+ continue;
assert(It->second != nullptr);
- if (!equivalentValues(Loc->getType(), Val, It->second, Model))
+ if (!equivalentValues(Loc->getType(), Val, *this, It->second, Other, Model))
return false;
}
@@ -121,22 +238,26 @@ LatticeJoinEffect Environment::join(const Environment &Other,
auto Effect = LatticeJoinEffect::Unchanged;
- const unsigned DeclToLocSizeBefore = DeclToLoc.size();
- DeclToLoc = intersectDenseMaps(DeclToLoc, Other.DeclToLoc);
- if (DeclToLocSizeBefore != DeclToLoc.size())
+ Environment JoinedEnv(*DACtx);
+
+ JoinedEnv.DeclToLoc = intersectDenseMaps(DeclToLoc, Other.DeclToLoc);
+ if (DeclToLoc.size() != JoinedEnv.DeclToLoc.size())
Effect = LatticeJoinEffect::Changed;
- const unsigned ExprToLocSizeBefore = ExprToLoc.size();
- ExprToLoc = intersectDenseMaps(ExprToLoc, Other.ExprToLoc);
- if (ExprToLocSizeBefore != ExprToLoc.size())
+ JoinedEnv.ExprToLoc = intersectDenseMaps(ExprToLoc, Other.ExprToLoc);
+ if (ExprToLoc.size() != JoinedEnv.ExprToLoc.size())
Effect = LatticeJoinEffect::Changed;
- // Move `LocToVal` so that `Environment::ValueModel::merge` can safely assign
- // values to storage locations while this code iterates over the current
- // assignments.
- llvm::DenseMap<const StorageLocation *, Value *> OldLocToVal =
- std::move(LocToVal);
- for (auto &Entry : OldLocToVal) {
+ JoinedEnv.MemberLocToStruct =
+ intersectDenseMaps(MemberLocToStruct, Other.MemberLocToStruct);
+ if (MemberLocToStruct.size() != JoinedEnv.MemberLocToStruct.size())
+ Effect = LatticeJoinEffect::Changed;
+
+ // FIXME: set `Effect` as needed.
+ JoinedEnv.FlowConditionToken = &DACtx->joinFlowConditions(
+ *FlowConditionToken, *Other.FlowConditionToken);
+
+ for (auto &Entry : LocToVal) {
const StorageLocation *Loc = Entry.first;
assert(Loc != nullptr);
@@ -148,59 +269,39 @@ LatticeJoinEffect Environment::join(const Environment &Other,
continue;
assert(It->second != nullptr);
- if (equivalentValues(Loc->getType(), Val, It->second, Model)) {
- LocToVal.insert({Loc, Val});
+ if (Val == It->second) {
+ JoinedEnv.LocToVal.insert({Loc, Val});
continue;
}
- // FIXME: Consider destroying `MergedValue` immediately if
- // `ValueModel::merge` returns false to avoid storing unneeded values in
- // `DACtx`.
- if (Value *MergedVal = createValue(Loc->getType()))
- if (Model.merge(Loc->getType(), *Val, *It->second, *MergedVal, *this))
- LocToVal.insert({Loc, MergedVal});
+ if (Value *MergedVal = mergeDistinctValues(
+ Loc->getType(), Val, *this, It->second, Other, JoinedEnv, Model))
+ JoinedEnv.LocToVal.insert({Loc, MergedVal});
}
- if (OldLocToVal.size() != LocToVal.size())
+ if (LocToVal.size() != JoinedEnv.LocToVal.size())
Effect = LatticeJoinEffect::Changed;
+ *this = std::move(JoinedEnv);
+
return Effect;
}
StorageLocation &Environment::createStorageLocation(QualType Type) {
- assert(!Type.isNull());
- if (Type->isStructureOrClassType() || Type->isUnionType()) {
- // FIXME: Explore options to avoid eager initialization of fields as some of
- // them might not be needed for a particular analysis.
- llvm::DenseMap<const ValueDecl *, StorageLocation *> FieldLocs;
- for (const FieldDecl *Field : Type->getAsRecordDecl()->fields()) {
- FieldLocs.insert({Field, &createStorageLocation(Field->getType())});
- }
- return takeOwnership(
- std::make_unique<AggregateStorageLocation>(Type, std::move(FieldLocs)));
- }
- return takeOwnership(std::make_unique<ScalarStorageLocation>(Type));
+ return DACtx->getStableStorageLocation(Type);
}
StorageLocation &Environment::createStorageLocation(const VarDecl &D) {
// Evaluated declarations are always assigned the same storage locations to
// ensure that the environment stabilizes across loop iterations. Storage
// locations for evaluated declarations are stored in the analysis context.
- if (auto *Loc = DACtx->getStorageLocation(D))
- return *Loc;
- auto &Loc = createStorageLocation(D.getType());
- DACtx->setStorageLocation(D, Loc);
- return Loc;
+ return DACtx->getStableStorageLocation(D);
}
StorageLocation &Environment::createStorageLocation(const Expr &E) {
// Evaluated expressions are always assigned the same storage locations to
// ensure that the environment stabilizes across loop iterations. Storage
// locations for evaluated expressions are stored in the analysis context.
- if (auto *Loc = DACtx->getStorageLocation(E))
- return *Loc;
- auto &Loc = createStorageLocation(E.getType());
- DACtx->setStorageLocation(E, Loc);
- return Loc;
+ return DACtx->getStableStorageLocation(E);
}
void Environment::setStorageLocation(const ValueDecl &D, StorageLocation &Loc) {
@@ -215,13 +316,15 @@ StorageLocation *Environment::getStorageLocation(const ValueDecl &D,
}
void Environment::setStorageLocation(const Expr &E, StorageLocation &Loc) {
- assert(ExprToLoc.find(&E) == ExprToLoc.end());
- ExprToLoc[&E] = &Loc;
+ const Expr &CanonE = ignoreCFGOmittedNodes(E);
+ assert(ExprToLoc.find(&CanonE) == ExprToLoc.end());
+ ExprToLoc[&CanonE] = &Loc;
}
StorageLocation *Environment::getStorageLocation(const Expr &E,
SkipPast SP) const {
- auto It = ExprToLoc.find(&E);
+ // FIXME: Add a test with parens.
+ auto It = ExprToLoc.find(&ignoreCFGOmittedNodes(E));
return It == ExprToLoc.end() ? nullptr : &skip(*It->second, SP);
}
@@ -229,6 +332,10 @@ StorageLocation *Environment::getThisPointeeStorageLocation() const {
return DACtx->getThisPointeeStorageLocation();
}
+PointerValue &Environment::getOrCreateNullPointerValue(QualType PointeeType) {
+ return DACtx->getOrCreateNullPointerValue(PointeeType);
+}
+
void Environment::setValue(const StorageLocation &Loc, Value &Val) {
LocToVal[&Loc] = &Val;
@@ -238,11 +345,28 @@ void Environment::setValue(const StorageLocation &Loc, Value &Val) {
const QualType Type = AggregateLoc.getType();
assert(Type->isStructureOrClassType());
- for (const FieldDecl *Field : Type->getAsRecordDecl()->fields()) {
+ for (const FieldDecl *Field : getObjectFields(Type)) {
assert(Field != nullptr);
- setValue(AggregateLoc.getChild(*Field), StructVal->getChild(*Field));
+ StorageLocation &FieldLoc = AggregateLoc.getChild(*Field);
+ MemberLocToStruct[&FieldLoc] = std::make_pair(StructVal, Field);
+ if (auto *FieldVal = StructVal->getChild(*Field))
+ setValue(FieldLoc, *FieldVal);
}
}
+
+ auto IT = MemberLocToStruct.find(&Loc);
+ if (IT != MemberLocToStruct.end()) {
+ // `Loc` is the location of a struct member so we need to also update the
+ // value of the member in the corresponding `StructValue`.
+
+ assert(IT->second.first != nullptr);
+ StructValue &StructVal = *IT->second.first;
+
+ assert(IT->second.second != nullptr);
+ const ValueDecl &Member = *IT->second.second;
+
+ StructVal.setChild(Member, Val);
+ }
}
Value *Environment::getValue(const StorageLocation &Loc) const {
@@ -266,25 +390,44 @@ Value *Environment::getValue(const Expr &E, SkipPast SP) const {
Value *Environment::createValue(QualType Type) {
llvm::DenseSet<QualType> Visited;
- return createValueUnlessSelfReferential(Type, Visited);
+ int CreatedValuesCount = 0;
+ Value *Val = createValueUnlessSelfReferential(Type, Visited, /*Depth=*/0,
+ CreatedValuesCount);
+ if (CreatedValuesCount > MaxCompositeValueSize) {
+ llvm::errs() << "Attempting to initialize a huge value of type: " << Type
+ << '\n';
+ }
+ return Val;
}
Value *Environment::createValueUnlessSelfReferential(
- QualType Type, llvm::DenseSet<QualType> &Visited) {
+ QualType Type, llvm::DenseSet<QualType> &Visited, int Depth,
+ int &CreatedValuesCount) {
assert(!Type.isNull());
+ // Allow unlimited fields at depth 1; only cap at deeper nesting levels.
+ if ((Depth > 1 && CreatedValuesCount > MaxCompositeValueSize) ||
+ Depth > MaxCompositeValueDepth)
+ return nullptr;
+
+ if (Type->isBooleanType()) {
+ CreatedValuesCount++;
+ return &makeAtomicBoolValue();
+ }
+
if (Type->isIntegerType()) {
+ CreatedValuesCount++;
return &takeOwnership(std::make_unique<IntegerValue>());
}
if (Type->isReferenceType()) {
- QualType PointeeType = Type->getAs<ReferenceType>()->getPointeeType();
+ CreatedValuesCount++;
+ QualType PointeeType = Type->castAs<ReferenceType>()->getPointeeType();
auto &PointeeLoc = createStorageLocation(PointeeType);
- if (!Visited.contains(PointeeType.getCanonicalType())) {
- Visited.insert(PointeeType.getCanonicalType());
- Value *PointeeVal =
- createValueUnlessSelfReferential(PointeeType, Visited);
+ if (Visited.insert(PointeeType.getCanonicalType()).second) {
+ Value *PointeeVal = createValueUnlessSelfReferential(
+ PointeeType, Visited, Depth, CreatedValuesCount);
Visited.erase(PointeeType.getCanonicalType());
if (PointeeVal != nullptr)
@@ -295,13 +438,13 @@ Value *Environment::createValueUnlessSelfReferential(
}
if (Type->isPointerType()) {
- QualType PointeeType = Type->getAs<PointerType>()->getPointeeType();
+ CreatedValuesCount++;
+ QualType PointeeType = Type->castAs<PointerType>()->getPointeeType();
auto &PointeeLoc = createStorageLocation(PointeeType);
- if (!Visited.contains(PointeeType.getCanonicalType())) {
- Visited.insert(PointeeType.getCanonicalType());
- Value *PointeeVal =
- createValueUnlessSelfReferential(PointeeType, Visited);
+ if (Visited.insert(PointeeType.getCanonicalType()).second) {
+ Value *PointeeVal = createValueUnlessSelfReferential(
+ PointeeType, Visited, Depth, CreatedValuesCount);
Visited.erase(PointeeType.getCanonicalType());
if (PointeeVal != nullptr)
@@ -312,10 +455,11 @@ Value *Environment::createValueUnlessSelfReferential(
}
if (Type->isStructureOrClassType()) {
+ CreatedValuesCount++;
// FIXME: Initialize only fields that are accessed in the context that is
// being analyzed.
llvm::DenseMap<const ValueDecl *, Value *> FieldValues;
- for (const FieldDecl *Field : Type->getAsRecordDecl()->fields()) {
+ for (const FieldDecl *Field : getObjectFields(Type)) {
assert(Field != nullptr);
QualType FieldType = Field->getType();
@@ -323,8 +467,9 @@ Value *Environment::createValueUnlessSelfReferential(
continue;
Visited.insert(FieldType.getCanonicalType());
- FieldValues.insert(
- {Field, createValueUnlessSelfReferential(FieldType, Visited)});
+ if (auto *FieldValue = createValueUnlessSelfReferential(
+ FieldType, Visited, Depth + 1, CreatedValuesCount))
+ FieldValues.insert({Field, FieldValue});
Visited.erase(FieldType.getCanonicalType());
}
@@ -343,7 +488,7 @@ StorageLocation &Environment::skip(StorageLocation &Loc, SkipPast SP) const {
// References cannot be chained so we only need to skip past one level of
// indirection.
if (auto *Val = dyn_cast_or_null<ReferenceValue>(getValue(Loc)))
- return Val->getPointeeLoc();
+ return Val->getReferentLoc();
return Loc;
case SkipPast::ReferenceThenPointer:
StorageLocation &LocPastRef = skip(Loc, SkipPast::Reference);
@@ -359,5 +504,13 @@ const StorageLocation &Environment::skip(const StorageLocation &Loc,
return skip(*const_cast<StorageLocation *>(&Loc), SP);
}
+void Environment::addToFlowCondition(BoolValue &Val) {
+ DACtx->addFlowConditionConstraint(*FlowConditionToken, Val);
+}
+
+bool Environment::flowConditionImplies(BoolValue &Val) const {
+ return DACtx->flowConditionImplies(*FlowConditionToken, Val);
+}
+
} // namespace dataflow
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Models/ChromiumCheckModel.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Models/ChromiumCheckModel.cpp
new file mode 100644
index 000000000000..3910847316a5
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Models/ChromiumCheckModel.cpp
@@ -0,0 +1,67 @@
+//===-- ChromiumCheckModel.cpp ----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/FlowSensitive/Models/ChromiumCheckModel.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "llvm/ADT/DenseSet.h"
+
+namespace clang {
+namespace dataflow {
+
+/// Determines whether `D` is one of the methods used to implement Chromium's
+/// `CHECK` macros. Populates `CheckDecls`, if empty.
+bool isCheckLikeMethod(llvm::SmallDenseSet<const CXXMethodDecl *> &CheckDecls,
+ const CXXMethodDecl &D) {
+ // All of the methods of interest are static, so avoid any lookup for
+ // non-static methods (the common case).
+ if (!D.isStatic())
+ return false;
+
+ if (CheckDecls.empty()) {
+ // Attempt to initialize `CheckDecls` with the methods in class
+ // `CheckError`.
+ const CXXRecordDecl *ParentClass = D.getParent();
+ if (ParentClass == nullptr || !ParentClass->getDeclName().isIdentifier() ||
+ ParentClass->getName() != "CheckError")
+ return false;
+
+ // Check whether namespace is "logging".
+ const auto *N =
+ dyn_cast_or_null<NamespaceDecl>(ParentClass->getDeclContext());
+ if (N == nullptr || !N->getDeclName().isIdentifier() ||
+ N->getName() != "logging")
+ return false;
+
+ // Check whether "logging" is a top-level namespace.
+ if (N->getParent() == nullptr || !N->getParent()->isTranslationUnit())
+ return false;
+
+ for (const CXXMethodDecl *M : ParentClass->methods())
+ if (M->getDeclName().isIdentifier() && M->getName().endswith("Check"))
+ CheckDecls.insert(M);
+ }
+
+ return CheckDecls.contains(&D);
+}
+
+bool ChromiumCheckModel::transfer(const Stmt *Stmt, Environment &Env) {
+ if (const auto *Call = dyn_cast<CallExpr>(Stmt)) {
+ if (const auto *M = dyn_cast<CXXMethodDecl>(Call->getDirectCallee())) {
+ if (isCheckLikeMethod(CheckDecls, *M)) {
+ // Mark this branch as unreachable.
+ Env.addToFlowCondition(Env.getBoolLiteralValue(false));
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+} // namespace dataflow
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp
new file mode 100644
index 000000000000..eef3cc813a4a
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.cpp
@@ -0,0 +1,753 @@
+//===-- UncheckedOptionalAccessModel.cpp ------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a dataflow analysis that detects unsafe uses of optional
+// values.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/FlowSensitive/Models/UncheckedOptionalAccessModel.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/Expr.h"
+#include "clang/AST/ExprCXX.h"
+#include "clang/AST/Stmt.h"
+#include "clang/ASTMatchers/ASTMatchers.h"
+#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
+#include "clang/Analysis/FlowSensitive/MatchSwitch.h"
+#include "clang/Analysis/FlowSensitive/NoopLattice.h"
+#include "clang/Analysis/FlowSensitive/Value.h"
+#include "clang/Basic/SourceLocation.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Casting.h"
+#include <cassert>
+#include <memory>
+#include <utility>
+#include <vector>
+
+namespace clang {
+namespace dataflow {
+namespace {
+
+using namespace ::clang::ast_matchers;
+using LatticeTransferState = TransferState<NoopLattice>;
+
+DeclarationMatcher optionalClass() {
+ return classTemplateSpecializationDecl(
+ anyOf(hasName("std::optional"), hasName("std::__optional_storage_base"),
+ hasName("__optional_destruct_base"), hasName("absl::optional"),
+ hasName("base::Optional")),
+ hasTemplateArgument(0, refersToType(type().bind("T"))));
+}
+
+auto optionalOrAliasType() {
+ return hasUnqualifiedDesugaredType(
+ recordType(hasDeclaration(optionalClass())));
+}
+
+/// Matches any of the spellings of the optional types and sugar, aliases, etc.
+auto hasOptionalType() { return hasType(optionalOrAliasType()); }
+
+auto isOptionalMemberCallWithName(
+ llvm::StringRef MemberName,
+ llvm::Optional<StatementMatcher> Ignorable = llvm::None) {
+ auto Exception = unless(Ignorable ? expr(anyOf(*Ignorable, cxxThisExpr()))
+ : cxxThisExpr());
+ return cxxMemberCallExpr(
+ on(expr(Exception)),
+ callee(cxxMethodDecl(hasName(MemberName), ofClass(optionalClass()))));
+}
+
+auto isOptionalOperatorCallWithName(
+ llvm::StringRef operator_name,
+ llvm::Optional<StatementMatcher> Ignorable = llvm::None) {
+ return cxxOperatorCallExpr(
+ hasOverloadedOperatorName(operator_name),
+ callee(cxxMethodDecl(ofClass(optionalClass()))),
+ Ignorable ? callExpr(unless(hasArgument(0, *Ignorable))) : callExpr());
+}
+
+auto isMakeOptionalCall() {
+ return callExpr(
+ callee(functionDecl(hasAnyName(
+ "std::make_optional", "base::make_optional", "absl::make_optional"))),
+ hasOptionalType());
+}
+
+auto hasNulloptType() {
+ return hasType(namedDecl(
+ hasAnyName("std::nullopt_t", "absl::nullopt_t", "base::nullopt_t")));
+}
+
+auto inPlaceClass() {
+ return recordDecl(
+ hasAnyName("std::in_place_t", "absl::in_place_t", "base::in_place_t"));
+}
+
+auto isOptionalNulloptConstructor() {
+ return cxxConstructExpr(hasOptionalType(), argumentCountIs(1),
+ hasArgument(0, hasNulloptType()));
+}
+
+auto isOptionalInPlaceConstructor() {
+ return cxxConstructExpr(hasOptionalType(),
+ hasArgument(0, hasType(inPlaceClass())));
+}
+
+auto isOptionalValueOrConversionConstructor() {
+ return cxxConstructExpr(
+ hasOptionalType(),
+ unless(hasDeclaration(
+ cxxConstructorDecl(anyOf(isCopyConstructor(), isMoveConstructor())))),
+ argumentCountIs(1), hasArgument(0, unless(hasNulloptType())));
+}
+
+auto isOptionalValueOrConversionAssignment() {
+ return cxxOperatorCallExpr(
+ hasOverloadedOperatorName("="),
+ callee(cxxMethodDecl(ofClass(optionalClass()))),
+ unless(hasDeclaration(cxxMethodDecl(
+ anyOf(isCopyAssignmentOperator(), isMoveAssignmentOperator())))),
+ argumentCountIs(2), hasArgument(1, unless(hasNulloptType())));
+}
+
+auto isOptionalNulloptAssignment() {
+ return cxxOperatorCallExpr(hasOverloadedOperatorName("="),
+ callee(cxxMethodDecl(ofClass(optionalClass()))),
+ argumentCountIs(2),
+ hasArgument(1, hasNulloptType()));
+}
+
+auto isStdSwapCall() {
+ return callExpr(callee(functionDecl(hasName("std::swap"))),
+ argumentCountIs(2), hasArgument(0, hasOptionalType()),
+ hasArgument(1, hasOptionalType()));
+}
+
+constexpr llvm::StringLiteral ValueOrCallID = "ValueOrCall";
+
+auto isValueOrStringEmptyCall() {
+ // `opt.value_or("").empty()`
+ return cxxMemberCallExpr(
+ callee(cxxMethodDecl(hasName("empty"))),
+ onImplicitObjectArgument(ignoringImplicit(
+ cxxMemberCallExpr(on(expr(unless(cxxThisExpr()))),
+ callee(cxxMethodDecl(hasName("value_or"),
+ ofClass(optionalClass()))),
+ hasArgument(0, stringLiteral(hasSize(0))))
+ .bind(ValueOrCallID))));
+}
+
+auto isValueOrNotEqX() {
+ auto ComparesToSame = [](ast_matchers::internal::Matcher<Stmt> Arg) {
+ return hasOperands(
+ ignoringImplicit(
+ cxxMemberCallExpr(on(expr(unless(cxxThisExpr()))),
+ callee(cxxMethodDecl(hasName("value_or"),
+ ofClass(optionalClass()))),
+ hasArgument(0, Arg))
+ .bind(ValueOrCallID)),
+ ignoringImplicit(Arg));
+ };
+
+ // `opt.value_or(X) != X`, for X is `nullptr`, `""`, or `0`. Ideally, we'd
+ // support this pattern for any expression, but the AST does not have a
+ // generic expression comparison facility, so we specialize to common cases
+ // seen in practice. FIXME: define a matcher that compares values across
+ // nodes, which would let us generalize this to any `X`.
+ return binaryOperation(hasOperatorName("!="),
+ anyOf(ComparesToSame(cxxNullPtrLiteralExpr()),
+ ComparesToSame(stringLiteral(hasSize(0))),
+ ComparesToSame(integerLiteral(equals(0)))));
+}
+
+auto isCallReturningOptional() {
+ return callExpr(hasType(qualType(anyOf(
+ optionalOrAliasType(), referenceType(pointee(optionalOrAliasType()))))));
+}
+
+/// Sets `HasValueVal` as the symbolic value that represents the "has_value"
+/// property of the optional value `OptionalVal`.
+void setHasValue(Value &OptionalVal, BoolValue &HasValueVal) {
+ OptionalVal.setProperty("has_value", HasValueVal);
+}
+
+/// Creates a symbolic value for an `optional` value using `HasValueVal` as the
+/// symbolic value of its "has_value" property.
+StructValue &createOptionalValue(Environment &Env, BoolValue &HasValueVal) {
+ auto OptionalVal = std::make_unique<StructValue>();
+ setHasValue(*OptionalVal, HasValueVal);
+ return Env.takeOwnership(std::move(OptionalVal));
+}
+
+/// Returns the symbolic value that represents the "has_value" property of the
+/// optional value `OptionalVal`. Returns null if `OptionalVal` is null.
+BoolValue *getHasValue(Environment &Env, Value *OptionalVal) {
+ if (OptionalVal != nullptr) {
+ auto *HasValueVal =
+ cast_or_null<BoolValue>(OptionalVal->getProperty("has_value"));
+ if (HasValueVal == nullptr) {
+ HasValueVal = &Env.makeAtomicBoolValue();
+ OptionalVal->setProperty("has_value", *HasValueVal);
+ }
+ return HasValueVal;
+ }
+ return nullptr;
+}
+
+/// If `Type` is a reference type, returns the type of its pointee. Otherwise,
+/// returns `Type` itself.
+QualType stripReference(QualType Type) {
+ return Type->isReferenceType() ? Type->getPointeeType() : Type;
+}
+
+/// Returns true if and only if `Type` is an optional type.
+bool IsOptionalType(QualType Type) {
+ if (!Type->isRecordType())
+ return false;
+ // FIXME: Optimize this by avoiding the `getQualifiedNameAsString` call.
+ auto TypeName = Type->getAsCXXRecordDecl()->getQualifiedNameAsString();
+ return TypeName == "std::optional" || TypeName == "absl::optional" ||
+ TypeName == "base::Optional";
+}
+
+/// Returns the number of optional wrappers in `Type`.
+///
+/// For example, if `Type` is `optional<optional<int>>`, the result of this
+/// function will be 2.
+int countOptionalWrappers(const ASTContext &ASTCtx, QualType Type) {
+ if (!IsOptionalType(Type))
+ return 0;
+ return 1 + countOptionalWrappers(
+ ASTCtx,
+ cast<ClassTemplateSpecializationDecl>(Type->getAsRecordDecl())
+ ->getTemplateArgs()
+ .get(0)
+ .getAsType()
+ .getDesugaredType(ASTCtx));
+}
+
+/// Tries to initialize the `optional`'s value (that is, contents), and return
+/// its location. Returns nullptr if the value can't be represented.
+StorageLocation *maybeInitializeOptionalValueMember(QualType Q,
+ Value &OptionalVal,
+ Environment &Env) {
+ // The "value" property represents a synthetic field. As such, it needs
+ // `StorageLocation`, like normal fields (and other variables). So, we model
+ // it with a `ReferenceValue`, since that includes a storage location. Once
+ // the property is set, it will be shared by all environments that access the
+ // `Value` representing the optional (here, `OptionalVal`).
+ if (auto *ValueProp = OptionalVal.getProperty("value")) {
+ auto *ValueRef = clang::cast<ReferenceValue>(ValueProp);
+ auto &ValueLoc = ValueRef->getReferentLoc();
+ if (Env.getValue(ValueLoc) == nullptr) {
+ // The property was previously set, but the value has been lost. This can
+ // happen, for example, because of an environment merge (where the two
+ // environments mapped the property to different values, which resulted in
+ // them both being discarded), or when two blocks in the CFG, with neither
+ // a dominator of the other, visit the same optional value, or even when a
+ // block is revisited during testing to collect per-statement state.
+ // FIXME: This situation means that the optional contents are not shared
+ // between branches and the like. Practically, this lack of sharing
+ // reduces the precision of the model when the contents are relevant to
+ // the check, like another optional or a boolean that influences control
+ // flow.
+ auto *ValueVal = Env.createValue(ValueLoc.getType());
+ if (ValueVal == nullptr)
+ return nullptr;
+ Env.setValue(ValueLoc, *ValueVal);
+ }
+ return &ValueLoc;
+ }
+
+ auto Ty = stripReference(Q);
+ auto *ValueVal = Env.createValue(Ty);
+ if (ValueVal == nullptr)
+ return nullptr;
+ auto &ValueLoc = Env.createStorageLocation(Ty);
+ Env.setValue(ValueLoc, *ValueVal);
+ auto ValueRef = std::make_unique<ReferenceValue>(ValueLoc);
+ OptionalVal.setProperty("value", Env.takeOwnership(std::move(ValueRef)));
+ return &ValueLoc;
+}
+
+void initializeOptionalReference(const Expr *OptionalExpr,
+ const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ if (auto *OptionalVal =
+ State.Env.getValue(*OptionalExpr, SkipPast::Reference)) {
+ if (OptionalVal->getProperty("has_value") == nullptr) {
+ setHasValue(*OptionalVal, State.Env.makeAtomicBoolValue());
+ }
+ }
+}
+
+/// Returns true if and only if `OptionalVal` is initialized and known to be
+/// empty in `Env.
+bool isEmptyOptional(const Value &OptionalVal, const Environment &Env) {
+ auto *HasValueVal =
+ cast_or_null<BoolValue>(OptionalVal.getProperty("has_value"));
+ return HasValueVal != nullptr &&
+ Env.flowConditionImplies(Env.makeNot(*HasValueVal));
+}
+
+/// Returns true if and only if `OptionalVal` is initialized and known to be
+/// non-empty in `Env.
+bool isNonEmptyOptional(const Value &OptionalVal, const Environment &Env) {
+ auto *HasValueVal =
+ cast_or_null<BoolValue>(OptionalVal.getProperty("has_value"));
+ return HasValueVal != nullptr && Env.flowConditionImplies(*HasValueVal);
+}
+
+void transferUnwrapCall(const Expr *UnwrapExpr, const Expr *ObjectExpr,
+ LatticeTransferState &State) {
+ if (auto *OptionalVal =
+ State.Env.getValue(*ObjectExpr, SkipPast::ReferenceThenPointer)) {
+ if (State.Env.getStorageLocation(*UnwrapExpr, SkipPast::None) == nullptr)
+ if (auto *Loc = maybeInitializeOptionalValueMember(
+ UnwrapExpr->getType(), *OptionalVal, State.Env))
+ State.Env.setStorageLocation(*UnwrapExpr, *Loc);
+ }
+}
+
+void transferMakeOptionalCall(const CallExpr *E,
+ const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ auto &Loc = State.Env.createStorageLocation(*E);
+ State.Env.setStorageLocation(*E, Loc);
+ State.Env.setValue(
+ Loc, createOptionalValue(State.Env, State.Env.getBoolLiteralValue(true)));
+}
+
+void transferOptionalHasValueCall(const CXXMemberCallExpr *CallExpr,
+ const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ if (auto *HasValueVal = getHasValue(
+ State.Env, State.Env.getValue(*CallExpr->getImplicitObjectArgument(),
+ SkipPast::ReferenceThenPointer))) {
+ auto &CallExprLoc = State.Env.createStorageLocation(*CallExpr);
+ State.Env.setValue(CallExprLoc, *HasValueVal);
+ State.Env.setStorageLocation(*CallExpr, CallExprLoc);
+ }
+}
+
+/// `ModelPred` builds a logical formula relating the predicate in
+/// `ValueOrPredExpr` to the optional's `has_value` property.
+void transferValueOrImpl(const clang::Expr *ValueOrPredExpr,
+ const MatchFinder::MatchResult &Result,
+ LatticeTransferState &State,
+ BoolValue &(*ModelPred)(Environment &Env,
+ BoolValue &ExprVal,
+ BoolValue &HasValueVal)) {
+ auto &Env = State.Env;
+
+ const auto *ObjectArgumentExpr =
+ Result.Nodes.getNodeAs<clang::CXXMemberCallExpr>(ValueOrCallID)
+ ->getImplicitObjectArgument();
+
+ auto *HasValueVal = getHasValue(
+ State.Env,
+ State.Env.getValue(*ObjectArgumentExpr, SkipPast::ReferenceThenPointer));
+ if (HasValueVal == nullptr)
+ return;
+
+ auto *ExprValue = cast_or_null<BoolValue>(
+ State.Env.getValue(*ValueOrPredExpr, SkipPast::None));
+ if (ExprValue == nullptr) {
+ auto &ExprLoc = State.Env.createStorageLocation(*ValueOrPredExpr);
+ ExprValue = &State.Env.makeAtomicBoolValue();
+ State.Env.setValue(ExprLoc, *ExprValue);
+ State.Env.setStorageLocation(*ValueOrPredExpr, ExprLoc);
+ }
+
+ Env.addToFlowCondition(ModelPred(Env, *ExprValue, *HasValueVal));
+}
+
+void transferValueOrStringEmptyCall(const clang::Expr *ComparisonExpr,
+ const MatchFinder::MatchResult &Result,
+ LatticeTransferState &State) {
+ return transferValueOrImpl(ComparisonExpr, Result, State,
+ [](Environment &Env, BoolValue &ExprVal,
+ BoolValue &HasValueVal) -> BoolValue & {
+ // If the result is *not* empty, then we know the
+ // optional must have been holding a value. If
+ // `ExprVal` is true, though, we don't learn
+ // anything definite about `has_value`, so we
+ // don't add any corresponding implications to
+ // the flow condition.
+ return Env.makeImplication(Env.makeNot(ExprVal),
+ HasValueVal);
+ });
+}
+
+void transferValueOrNotEqX(const Expr *ComparisonExpr,
+ const MatchFinder::MatchResult &Result,
+ LatticeTransferState &State) {
+ transferValueOrImpl(ComparisonExpr, Result, State,
+ [](Environment &Env, BoolValue &ExprVal,
+ BoolValue &HasValueVal) -> BoolValue & {
+ // We know that if `(opt.value_or(X) != X)` then
+ // `opt.hasValue()`, even without knowing further
+ // details about the contents of `opt`.
+ return Env.makeImplication(ExprVal, HasValueVal);
+ });
+}
+
+void transferCallReturningOptional(const CallExpr *E,
+ const MatchFinder::MatchResult &Result,
+ LatticeTransferState &State) {
+ if (State.Env.getStorageLocation(*E, SkipPast::None) != nullptr)
+ return;
+
+ auto &Loc = State.Env.createStorageLocation(*E);
+ State.Env.setStorageLocation(*E, Loc);
+ State.Env.setValue(
+ Loc, createOptionalValue(State.Env, State.Env.makeAtomicBoolValue()));
+}
+
+void assignOptionalValue(const Expr &E, LatticeTransferState &State,
+ BoolValue &HasValueVal) {
+ if (auto *OptionalLoc =
+ State.Env.getStorageLocation(E, SkipPast::ReferenceThenPointer)) {
+ State.Env.setValue(*OptionalLoc,
+ createOptionalValue(State.Env, HasValueVal));
+ }
+}
+
+/// Returns a symbolic value for the "has_value" property of an `optional<T>`
+/// value that is constructed/assigned from a value of type `U` or `optional<U>`
+/// where `T` is constructible from `U`.
+BoolValue &value_orConversionHasValue(const FunctionDecl &F, const Expr &E,
+ const MatchFinder::MatchResult &MatchRes,
+ LatticeTransferState &State) {
+ assert(F.getTemplateSpecializationArgs()->size() > 0);
+
+ const int TemplateParamOptionalWrappersCount = countOptionalWrappers(
+ *MatchRes.Context,
+ stripReference(F.getTemplateSpecializationArgs()->get(0).getAsType()));
+ const int ArgTypeOptionalWrappersCount =
+ countOptionalWrappers(*MatchRes.Context, stripReference(E.getType()));
+
+ // Check if this is a constructor/assignment call for `optional<T>` with
+ // argument of type `U` such that `T` is constructible from `U`.
+ if (TemplateParamOptionalWrappersCount == ArgTypeOptionalWrappersCount)
+ return State.Env.getBoolLiteralValue(true);
+
+ // This is a constructor/assignment call for `optional<T>` with argument of
+ // type `optional<U>` such that `T` is constructible from `U`.
+ if (auto *HasValueVal =
+ getHasValue(State.Env, State.Env.getValue(E, SkipPast::Reference)))
+ return *HasValueVal;
+ return State.Env.makeAtomicBoolValue();
+}
+
+void transferValueOrConversionConstructor(
+ const CXXConstructExpr *E, const MatchFinder::MatchResult &MatchRes,
+ LatticeTransferState &State) {
+ assert(E->getNumArgs() > 0);
+
+ assignOptionalValue(*E, State,
+ value_orConversionHasValue(*E->getConstructor(),
+ *E->getArg(0), MatchRes,
+ State));
+}
+
+void transferAssignment(const CXXOperatorCallExpr *E, BoolValue &HasValueVal,
+ LatticeTransferState &State) {
+ assert(E->getNumArgs() > 0);
+
+ auto *OptionalLoc =
+ State.Env.getStorageLocation(*E->getArg(0), SkipPast::Reference);
+ if (OptionalLoc == nullptr)
+ return;
+
+ State.Env.setValue(*OptionalLoc, createOptionalValue(State.Env, HasValueVal));
+
+ // Assign a storage location for the whole expression.
+ State.Env.setStorageLocation(*E, *OptionalLoc);
+}
+
+void transferValueOrConversionAssignment(
+ const CXXOperatorCallExpr *E, const MatchFinder::MatchResult &MatchRes,
+ LatticeTransferState &State) {
+ assert(E->getNumArgs() > 1);
+ transferAssignment(E,
+ value_orConversionHasValue(*E->getDirectCallee(),
+ *E->getArg(1), MatchRes, State),
+ State);
+}
+
+void transferNulloptAssignment(const CXXOperatorCallExpr *E,
+ const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ transferAssignment(E, State.Env.getBoolLiteralValue(false), State);
+}
+
+void transferSwap(const StorageLocation &OptionalLoc1,
+ const StorageLocation &OptionalLoc2,
+ LatticeTransferState &State) {
+ auto *OptionalVal1 = State.Env.getValue(OptionalLoc1);
+ assert(OptionalVal1 != nullptr);
+
+ auto *OptionalVal2 = State.Env.getValue(OptionalLoc2);
+ assert(OptionalVal2 != nullptr);
+
+ State.Env.setValue(OptionalLoc1, *OptionalVal2);
+ State.Env.setValue(OptionalLoc2, *OptionalVal1);
+}
+
+void transferSwapCall(const CXXMemberCallExpr *E,
+ const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ assert(E->getNumArgs() == 1);
+
+ auto *OptionalLoc1 = State.Env.getStorageLocation(
+ *E->getImplicitObjectArgument(), SkipPast::ReferenceThenPointer);
+ assert(OptionalLoc1 != nullptr);
+
+ auto *OptionalLoc2 =
+ State.Env.getStorageLocation(*E->getArg(0), SkipPast::Reference);
+ assert(OptionalLoc2 != nullptr);
+
+ transferSwap(*OptionalLoc1, *OptionalLoc2, State);
+}
+
+void transferStdSwapCall(const CallExpr *E, const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ assert(E->getNumArgs() == 2);
+
+ auto *OptionalLoc1 =
+ State.Env.getStorageLocation(*E->getArg(0), SkipPast::Reference);
+ assert(OptionalLoc1 != nullptr);
+
+ auto *OptionalLoc2 =
+ State.Env.getStorageLocation(*E->getArg(1), SkipPast::Reference);
+ assert(OptionalLoc2 != nullptr);
+
+ transferSwap(*OptionalLoc1, *OptionalLoc2, State);
+}
+
+llvm::Optional<StatementMatcher>
+ignorableOptional(const UncheckedOptionalAccessModelOptions &Options) {
+ if (Options.IgnoreSmartPointerDereference)
+ return memberExpr(hasObjectExpression(ignoringParenImpCasts(
+ cxxOperatorCallExpr(anyOf(hasOverloadedOperatorName("->"),
+ hasOverloadedOperatorName("*")),
+ unless(hasArgument(0, expr(hasOptionalType())))))));
+ return llvm::None;
+}
+
+StatementMatcher
+valueCall(llvm::Optional<StatementMatcher> &IgnorableOptional) {
+ return isOptionalMemberCallWithName("value", IgnorableOptional);
+}
+
+StatementMatcher
+valueOperatorCall(llvm::Optional<StatementMatcher> &IgnorableOptional) {
+ return expr(anyOf(isOptionalOperatorCallWithName("*", IgnorableOptional),
+ isOptionalOperatorCallWithName("->", IgnorableOptional)));
+}
+
+auto buildTransferMatchSwitch(
+ const UncheckedOptionalAccessModelOptions &Options) {
+ // FIXME: Evaluate the efficiency of matchers. If using matchers results in a
+ // lot of duplicated work (e.g. string comparisons), consider providing APIs
+ // that avoid it through memoization.
+ auto IgnorableOptional = ignorableOptional(Options);
+ return MatchSwitchBuilder<LatticeTransferState>()
+ // Attach a symbolic "has_value" state to optional values that we see for
+ // the first time.
+ .CaseOf<Expr>(
+ expr(anyOf(declRefExpr(), memberExpr()), hasOptionalType()),
+ initializeOptionalReference)
+
+ // make_optional
+ .CaseOf<CallExpr>(isMakeOptionalCall(), transferMakeOptionalCall)
+
+ // optional::optional
+ .CaseOf<CXXConstructExpr>(
+ isOptionalInPlaceConstructor(),
+ [](const CXXConstructExpr *E, const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ assignOptionalValue(*E, State, State.Env.getBoolLiteralValue(true));
+ })
+ .CaseOf<CXXConstructExpr>(
+ isOptionalNulloptConstructor(),
+ [](const CXXConstructExpr *E, const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ assignOptionalValue(*E, State,
+ State.Env.getBoolLiteralValue(false));
+ })
+ .CaseOf<CXXConstructExpr>(isOptionalValueOrConversionConstructor(),
+ transferValueOrConversionConstructor)
+
+ // optional::operator=
+ .CaseOf<CXXOperatorCallExpr>(isOptionalValueOrConversionAssignment(),
+ transferValueOrConversionAssignment)
+ .CaseOf<CXXOperatorCallExpr>(isOptionalNulloptAssignment(),
+ transferNulloptAssignment)
+
+ // optional::value
+ .CaseOf<CXXMemberCallExpr>(
+ valueCall(IgnorableOptional),
+ [](const CXXMemberCallExpr *E, const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ transferUnwrapCall(E, E->getImplicitObjectArgument(), State);
+ })
+
+ // optional::operator*, optional::operator->
+ .CaseOf<CallExpr>(valueOperatorCall(IgnorableOptional),
+ [](const CallExpr *E, const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ transferUnwrapCall(E, E->getArg(0), State);
+ })
+
+ // optional::has_value
+ .CaseOf<CXXMemberCallExpr>(isOptionalMemberCallWithName("has_value"),
+ transferOptionalHasValueCall)
+
+ // optional::operator bool
+ .CaseOf<CXXMemberCallExpr>(isOptionalMemberCallWithName("operator bool"),
+ transferOptionalHasValueCall)
+
+ // optional::emplace
+ .CaseOf<CXXMemberCallExpr>(
+ isOptionalMemberCallWithName("emplace"),
+ [](const CXXMemberCallExpr *E, const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ assignOptionalValue(*E->getImplicitObjectArgument(), State,
+ State.Env.getBoolLiteralValue(true));
+ })
+
+ // optional::reset
+ .CaseOf<CXXMemberCallExpr>(
+ isOptionalMemberCallWithName("reset"),
+ [](const CXXMemberCallExpr *E, const MatchFinder::MatchResult &,
+ LatticeTransferState &State) {
+ assignOptionalValue(*E->getImplicitObjectArgument(), State,
+ State.Env.getBoolLiteralValue(false));
+ })
+
+ // optional::swap
+ .CaseOf<CXXMemberCallExpr>(isOptionalMemberCallWithName("swap"),
+ transferSwapCall)
+
+ // std::swap
+ .CaseOf<CallExpr>(isStdSwapCall(), transferStdSwapCall)
+
+ // opt.value_or("").empty()
+ .CaseOf<Expr>(isValueOrStringEmptyCall(), transferValueOrStringEmptyCall)
+
+ // opt.value_or(X) != X
+ .CaseOf<Expr>(isValueOrNotEqX(), transferValueOrNotEqX)
+
+ // returns optional
+ .CaseOf<CallExpr>(isCallReturningOptional(),
+ transferCallReturningOptional)
+
+ .Build();
+}
+
+std::vector<SourceLocation> diagnoseUnwrapCall(const Expr *UnwrapExpr,
+ const Expr *ObjectExpr,
+ const Environment &Env) {
+ if (auto *OptionalVal =
+ Env.getValue(*ObjectExpr, SkipPast::ReferenceThenPointer)) {
+ auto *Prop = OptionalVal->getProperty("has_value");
+ if (auto *HasValueVal = cast_or_null<BoolValue>(Prop)) {
+ if (Env.flowConditionImplies(*HasValueVal))
+ return {};
+ }
+ }
+
+ // Record that this unwrap is *not* provably safe.
+ // FIXME: include either the name of the optional (if applicable) or a source
+ // range of the access for easier interpretation of the result.
+ return {ObjectExpr->getBeginLoc()};
+}
+
+auto buildDiagnoseMatchSwitch(
+ const UncheckedOptionalAccessModelOptions &Options) {
+ // FIXME: Evaluate the efficiency of matchers. If using matchers results in a
+ // lot of duplicated work (e.g. string comparisons), consider providing APIs
+ // that avoid it through memoization.
+ auto IgnorableOptional = ignorableOptional(Options);
+ return MatchSwitchBuilder<const Environment, std::vector<SourceLocation>>()
+ // optional::value
+ .CaseOf<CXXMemberCallExpr>(
+ valueCall(IgnorableOptional),
+ [](const CXXMemberCallExpr *E, const MatchFinder::MatchResult &,
+ const Environment &Env) {
+ return diagnoseUnwrapCall(E, E->getImplicitObjectArgument(), Env);
+ })
+
+ // optional::operator*, optional::operator->
+ .CaseOf<CallExpr>(
+ valueOperatorCall(IgnorableOptional),
+ [](const CallExpr *E, const MatchFinder::MatchResult &,
+ const Environment &Env) {
+ return diagnoseUnwrapCall(E, E->getArg(0), Env);
+ })
+ .Build();
+}
+
+} // namespace
+
+ast_matchers::DeclarationMatcher
+UncheckedOptionalAccessModel::optionalClassDecl() {
+ return optionalClass();
+}
+
+UncheckedOptionalAccessModel::UncheckedOptionalAccessModel(
+ ASTContext &Ctx, UncheckedOptionalAccessModelOptions Options)
+ : DataflowAnalysis<UncheckedOptionalAccessModel, NoopLattice>(Ctx),
+ TransferMatchSwitch(buildTransferMatchSwitch(Options)) {}
+
+void UncheckedOptionalAccessModel::transfer(const Stmt *S, NoopLattice &L,
+ Environment &Env) {
+ LatticeTransferState State(L, Env);
+ TransferMatchSwitch(*S, getASTContext(), State);
+}
+
+bool UncheckedOptionalAccessModel::compareEquivalent(QualType Type,
+ const Value &Val1,
+ const Environment &Env1,
+ const Value &Val2,
+ const Environment &Env2) {
+ return isNonEmptyOptional(Val1, Env1) == isNonEmptyOptional(Val2, Env2);
+}
+
+bool UncheckedOptionalAccessModel::merge(QualType Type, const Value &Val1,
+ const Environment &Env1,
+ const Value &Val2,
+ const Environment &Env2,
+ Value &MergedVal,
+ Environment &MergedEnv) {
+ if (!IsOptionalType(Type))
+ return true;
+
+ auto &HasValueVal = MergedEnv.makeAtomicBoolValue();
+ if (isNonEmptyOptional(Val1, Env1) && isNonEmptyOptional(Val2, Env2))
+ MergedEnv.addToFlowCondition(HasValueVal);
+ else if (isEmptyOptional(Val1, Env1) && isEmptyOptional(Val2, Env2))
+ MergedEnv.addToFlowCondition(MergedEnv.makeNot(HasValueVal));
+ setHasValue(MergedVal, HasValueVal);
+ return true;
+}
+
+UncheckedOptionalAccessDiagnoser::UncheckedOptionalAccessDiagnoser(
+ UncheckedOptionalAccessModelOptions Options)
+ : DiagnoseMatchSwitch(buildDiagnoseMatchSwitch(Options)) {}
+
+std::vector<SourceLocation> UncheckedOptionalAccessDiagnoser::diagnose(
+ ASTContext &Context, const Stmt *Stmt, const Environment &Env) {
+ return DiagnoseMatchSwitch(*Stmt, Context, Env);
+}
+
+} // namespace dataflow
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Transfer.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Transfer.cpp
index 51a86b727e33..500e1a7a9390 100644
--- a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Transfer.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/Transfer.cpp
@@ -21,6 +21,8 @@
#include "clang/AST/Stmt.h"
#include "clang/AST/StmtVisitor.h"
#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
+#include "clang/Analysis/FlowSensitive/Value.h"
+#include "clang/Basic/Builtins.h"
#include "clang/Basic/OperatorKinds.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Casting.h"
@@ -31,45 +33,76 @@
namespace clang {
namespace dataflow {
-static const Expr *skipExprWithCleanups(const Expr *E) {
- if (auto *C = dyn_cast_or_null<ExprWithCleanups>(E))
- return C->getSubExpr();
- return E;
+static BoolValue &evaluateBooleanEquality(const Expr &LHS, const Expr &RHS,
+ Environment &Env) {
+ if (auto *LHSValue =
+ dyn_cast_or_null<BoolValue>(Env.getValue(LHS, SkipPast::Reference)))
+ if (auto *RHSValue =
+ dyn_cast_or_null<BoolValue>(Env.getValue(RHS, SkipPast::Reference)))
+ return Env.makeIff(*LHSValue, *RHSValue);
+
+ return Env.makeAtomicBoolValue();
}
class TransferVisitor : public ConstStmtVisitor<TransferVisitor> {
public:
- TransferVisitor(Environment &Env) : Env(Env) {}
+ TransferVisitor(const StmtToEnvMap &StmtToEnv, Environment &Env)
+ : StmtToEnv(StmtToEnv), Env(Env) {}
void VisitBinaryOperator(const BinaryOperator *S) {
- if (S->getOpcode() == BO_Assign) {
- // The CFG does not contain `ParenExpr` as top-level statements in basic
- // blocks, however sub-expressions can still be of that type.
- assert(S->getLHS() != nullptr);
- const Expr *LHS = S->getLHS()->IgnoreParens();
+ const Expr *LHS = S->getLHS();
+ assert(LHS != nullptr);
+
+ const Expr *RHS = S->getRHS();
+ assert(RHS != nullptr);
- assert(LHS != nullptr);
+ switch (S->getOpcode()) {
+ case BO_Assign: {
auto *LHSLoc = Env.getStorageLocation(*LHS, SkipPast::Reference);
if (LHSLoc == nullptr)
- return;
-
- // The CFG does not contain `ParenExpr` as top-level statements in basic
- // blocks, however sub-expressions can still be of that type.
- assert(S->getRHS() != nullptr);
- const Expr *RHS = S->getRHS()->IgnoreParens();
+ break;
- assert(RHS != nullptr);
- Value *RHSVal = Env.getValue(*RHS, SkipPast::Reference);
+ auto *RHSVal = Env.getValue(*RHS, SkipPast::Reference);
if (RHSVal == nullptr)
- return;
+ break;
// Assign a value to the storage location of the left-hand side.
Env.setValue(*LHSLoc, *RHSVal);
// Assign a storage location for the whole expression.
Env.setStorageLocation(*S, *LHSLoc);
+ break;
+ }
+ case BO_LAnd:
+ case BO_LOr: {
+ BoolValue &LHSVal = getLogicOperatorSubExprValue(*LHS);
+ BoolValue &RHSVal = getLogicOperatorSubExprValue(*RHS);
+
+ auto &Loc = Env.createStorageLocation(*S);
+ Env.setStorageLocation(*S, Loc);
+ if (S->getOpcode() == BO_LAnd)
+ Env.setValue(Loc, Env.makeAnd(LHSVal, RHSVal));
+ else
+ Env.setValue(Loc, Env.makeOr(LHSVal, RHSVal));
+ break;
+ }
+ case BO_NE:
+ case BO_EQ: {
+ auto &LHSEqRHSValue = evaluateBooleanEquality(*LHS, *RHS, Env);
+ auto &Loc = Env.createStorageLocation(*S);
+ Env.setStorageLocation(*S, Loc);
+ Env.setValue(Loc, S->getOpcode() == BO_EQ ? LHSEqRHSValue
+ : Env.makeNot(LHSEqRHSValue));
+ break;
+ }
+ case BO_Comma: {
+ if (auto *Loc = Env.getStorageLocation(*RHS, SkipPast::None))
+ Env.setStorageLocation(*S, *Loc);
+ break;
+ }
+ default:
+ break;
}
- // FIXME: Add support for BO_EQ, BO_NE.
}
void VisitDeclRefExpr(const DeclRefExpr *S) {
@@ -92,6 +125,11 @@ public:
// Group decls are converted into single decls in the CFG so the cast below
// is safe.
const auto &D = *cast<VarDecl>(S->getSingleDecl());
+
+ // Static local vars are already initialized in `Environment`.
+ if (D.hasGlobalStorage())
+ return;
+
auto &Loc = Env.createStorageLocation(D);
Env.setStorageLocation(D, Loc);
@@ -103,11 +141,6 @@ public:
return;
}
- // The CFG does not contain `ParenExpr` as top-level statements in basic
- // blocks, however sub-expressions can still be of that type.
- InitExpr = skipExprWithCleanups(D.getInit()->IgnoreParens());
- assert(InitExpr != nullptr);
-
if (D.getType()->isReferenceType()) {
// Initializing a reference variable - do not create a reference to
// reference.
@@ -116,37 +149,75 @@ public:
auto &Val =
Env.takeOwnership(std::make_unique<ReferenceValue>(*InitExprLoc));
Env.setValue(Loc, Val);
- } else {
- // FIXME: The initializer expression must always be assigned a value.
- // Replace this with an assert when we have sufficient coverage of
- // language features.
- if (Value *Val = Env.createValue(D.getType()))
- Env.setValue(Loc, *Val);
}
- return;
+ } else if (auto *InitExprVal = Env.getValue(*InitExpr, SkipPast::None)) {
+ Env.setValue(Loc, *InitExprVal);
}
- if (auto *InitExprVal = Env.getValue(*InitExpr, SkipPast::None)) {
- Env.setValue(Loc, *InitExprVal);
- } else if (!D.getType()->isStructureOrClassType()) {
- // FIXME: The initializer expression must always be assigned a value.
- // Replace this with an assert when we have sufficient coverage of
- // language features.
+ if (Env.getValue(Loc) == nullptr) {
+ // We arrive here in (the few) cases where an expression is intentionally
+ // "uninterpreted". There are two ways to handle this situation: propagate
+ // the status, so that uninterpreted initializers result in uninterpreted
+ // variables, or provide a default value. We choose the latter so that
+ // later refinements of the variable can be used for reasoning about the
+ // surrounding code.
+ //
+ // FIXME. If and when we interpret all language cases, change this to
+ // assert that `InitExpr` is interpreted, rather than supplying a default
+ // value (assuming we don't update the environment API to return
+ // references).
if (Value *Val = Env.createValue(D.getType()))
Env.setValue(Loc, *Val);
- } else {
- llvm_unreachable("structs and classes must always be assigned values");
+ }
+
+ if (const auto *Decomp = dyn_cast<DecompositionDecl>(&D)) {
+ // If VarDecl is a DecompositionDecl, evaluate each of its bindings. This
+ // needs to be evaluated after initializing the values in the storage for
+ // VarDecl, as the bindings refer to them.
+ // FIXME: Add support for ArraySubscriptExpr.
+ // FIXME: Consider adding AST nodes that are used for structured bindings
+ // to the CFG.
+ for (const auto *B : Decomp->bindings()) {
+ auto *ME = dyn_cast_or_null<MemberExpr>(B->getBinding());
+ if (ME == nullptr)
+ continue;
+
+ auto *DE = dyn_cast_or_null<DeclRefExpr>(ME->getBase());
+ if (DE == nullptr)
+ continue;
+
+ // ME and its base haven't been visited because they aren't included in
+ // the statements of the CFG basic block.
+ VisitDeclRefExpr(DE);
+ VisitMemberExpr(ME);
+
+ if (auto *Loc = Env.getStorageLocation(*ME, SkipPast::Reference))
+ Env.setStorageLocation(*B, *Loc);
+ }
}
}
void VisitImplicitCastExpr(const ImplicitCastExpr *S) {
- // The CFG does not contain `ParenExpr` as top-level statements in basic
- // blocks, however sub-expressions can still be of that type.
- assert(S->getSubExpr() != nullptr);
- const Expr *SubExpr = S->getSubExpr()->IgnoreParens();
+ const Expr *SubExpr = S->getSubExpr();
assert(SubExpr != nullptr);
switch (S->getCastKind()) {
+ case CK_IntegralToBoolean: {
+ // This cast creates a new, boolean value from the integral value. We
+ // model that with a fresh value in the environment, unless it's already a
+ // boolean.
+ auto &Loc = Env.createStorageLocation(*S);
+ Env.setStorageLocation(*S, Loc);
+ if (auto *SubExprVal = dyn_cast_or_null<BoolValue>(
+ Env.getValue(*SubExpr, SkipPast::Reference)))
+ Env.setValue(Loc, *SubExprVal);
+ else
+ // FIXME: If integer modeling is added, then update this code to create
+ // the boolean based on the integer model.
+ Env.setValue(Loc, Env.makeAtomicBoolValue());
+ break;
+ }
+
case CK_LValueToRValue: {
auto *SubExprVal = Env.getValue(*SubExpr, SkipPast::Reference);
if (SubExprVal == nullptr)
@@ -157,6 +228,18 @@ public:
Env.setValue(ExprLoc, *SubExprVal);
break;
}
+
+ case CK_IntegralCast:
+ // FIXME: This cast creates a new integral value from the
+ // subexpression. But, because we don't model integers, we don't
+ // distinguish between this new value and the underlying one. If integer
+ // modeling is added, then update this code to create a fresh location and
+ // value.
+ case CK_UncheckedDerivedToBase:
+ case CK_ConstructorConversion:
+ case CK_UserDefinedConversion:
+ // FIXME: Add tests that excercise CK_UncheckedDerivedToBase,
+ // CK_ConstructorConversion, and CK_UserDefinedConversion.
case CK_NoOp: {
// FIXME: Consider making `Environment::getStorageLocation` skip noop
// expressions (this and other similar expressions in the file) instead of
@@ -168,18 +251,23 @@ public:
Env.setStorageLocation(*S, *SubExprLoc);
break;
}
+ case CK_NullToPointer:
+ case CK_NullToMemberPointer: {
+ auto &Loc = Env.createStorageLocation(S->getType());
+ Env.setStorageLocation(*S, Loc);
+
+ auto &NullPointerVal =
+ Env.getOrCreateNullPointerValue(S->getType()->getPointeeType());
+ Env.setValue(Loc, NullPointerVal);
+ break;
+ }
default:
- // FIXME: Add support for CK_UserDefinedConversion,
- // CK_ConstructorConversion, CK_UncheckedDerivedToBase.
break;
}
}
void VisitUnaryOperator(const UnaryOperator *S) {
- // The CFG does not contain `ParenExpr` as top-level statements in basic
- // blocks, however sub-expressions can still be of that type.
- assert(S->getSubExpr() != nullptr);
- const Expr *SubExpr = S->getSubExpr()->IgnoreParens();
+ const Expr *SubExpr = S->getSubExpr();
assert(SubExpr != nullptr);
switch (S->getOpcode()) {
@@ -212,15 +300,28 @@ public:
Env.setValue(PointerLoc, PointerVal);
break;
}
+ case UO_LNot: {
+ auto *SubExprVal =
+ dyn_cast_or_null<BoolValue>(Env.getValue(*SubExpr, SkipPast::None));
+ if (SubExprVal == nullptr)
+ break;
+
+ auto &ExprLoc = Env.createStorageLocation(*S);
+ Env.setStorageLocation(*S, ExprLoc);
+ Env.setValue(ExprLoc, Env.makeNot(*SubExprVal));
+ break;
+ }
default:
- // FIXME: Add support for UO_LNot.
break;
}
}
void VisitCXXThisExpr(const CXXThisExpr *S) {
auto *ThisPointeeLoc = Env.getThisPointeeStorageLocation();
- assert(ThisPointeeLoc != nullptr);
+ if (ThisPointeeLoc == nullptr)
+ // Unions are not supported yet, and will not have a location for the
+ // `this` expression's pointee.
+ return;
auto &Loc = Env.createStorageLocation(*S);
Env.setStorageLocation(*S, Loc);
@@ -236,6 +337,24 @@ public:
if (Member->isFunctionOrFunctionTemplate())
return;
+ if (auto *D = dyn_cast<VarDecl>(Member)) {
+ if (D->hasGlobalStorage()) {
+ auto *VarDeclLoc = Env.getStorageLocation(*D, SkipPast::None);
+ if (VarDeclLoc == nullptr)
+ return;
+
+ if (VarDeclLoc->getType()->isReferenceType()) {
+ Env.setStorageLocation(*S, *VarDeclLoc);
+ } else {
+ auto &Loc = Env.createStorageLocation(*S);
+ Env.setStorageLocation(*S, Loc);
+ Env.setValue(Loc, Env.takeOwnership(
+ std::make_unique<ReferenceValue>(*VarDeclLoc)));
+ }
+ return;
+ }
+ }
+
// The receiver can be either a value or a pointer to a value. Skip past the
// indirection to handle both cases.
auto *BaseLoc = cast_or_null<AggregateStorageLocation>(
@@ -329,15 +448,17 @@ public:
if (Val == nullptr)
return;
+ // Assign a value to the storage location of the object.
Env.setValue(*ObjectLoc, *Val);
+
+ // FIXME: Add a test for the value of the whole expression.
+ // Assign a storage location for the whole expression.
+ Env.setStorageLocation(*S, *ObjectLoc);
}
}
void VisitCXXFunctionalCastExpr(const CXXFunctionalCastExpr *S) {
if (S->getCastKind() == CK_ConstructorConversion) {
- // The CFG does not contain `ParenExpr` as top-level statements in basic
- // blocks, however sub-expressions can still be of that type.
- assert(S->getSubExpr() != nullptr);
const Expr *SubExpr = S->getSubExpr();
assert(SubExpr != nullptr);
@@ -357,6 +478,9 @@ public:
}
void VisitCallExpr(const CallExpr *S) {
+ // Of clang's builtins, only `__builtin_expect` is handled explicitly, since
+ // others (like trap, debugtrap, and unreachable) are handled by CFG
+ // construction.
if (S->isCallToStdMove()) {
assert(S->getNumArgs() == 1);
@@ -368,6 +492,17 @@ public:
return;
Env.setStorageLocation(*S, *ArgLoc);
+ } else if (S->getDirectCallee() != nullptr &&
+ S->getDirectCallee()->getBuiltinID() ==
+ Builtin::BI__builtin_expect) {
+ assert(S->getNumArgs() > 0);
+ assert(S->getArg(0) != nullptr);
+ // `__builtin_expect` returns by-value, so strip away any potential
+ // references in the argument.
+ auto *ArgLoc = Env.getStorageLocation(*S->getArg(0), SkipPast::Reference);
+ if (ArgLoc == nullptr)
+ return;
+ Env.setStorageLocation(*S, *ArgLoc);
}
}
@@ -449,13 +584,59 @@ public:
Env.setValue(Loc, Env.getBoolLiteralValue(S->getValue()));
}
+ void VisitParenExpr(const ParenExpr *S) {
+ // The CFG does not contain `ParenExpr` as top-level statements in basic
+ // blocks, however manual traversal to sub-expressions may encounter them.
+ // Redirect to the sub-expression.
+ auto *SubExpr = S->getSubExpr();
+ assert(SubExpr != nullptr);
+ Visit(SubExpr);
+ }
+
+ void VisitExprWithCleanups(const ExprWithCleanups *S) {
+ // The CFG does not contain `ExprWithCleanups` as top-level statements in
+ // basic blocks, however manual traversal to sub-expressions may encounter
+ // them. Redirect to the sub-expression.
+ auto *SubExpr = S->getSubExpr();
+ assert(SubExpr != nullptr);
+ Visit(SubExpr);
+ }
+
private:
+ BoolValue &getLogicOperatorSubExprValue(const Expr &SubExpr) {
+ // `SubExpr` and its parent logic operator might be part of different basic
+ // blocks. We try to access the value that is assigned to `SubExpr` in the
+ // corresponding environment.
+ if (const Environment *SubExprEnv = StmtToEnv.getEnvironment(SubExpr)) {
+ if (auto *Val = dyn_cast_or_null<BoolValue>(
+ SubExprEnv->getValue(SubExpr, SkipPast::Reference)))
+ return *Val;
+ }
+
+ if (Env.getStorageLocation(SubExpr, SkipPast::None) == nullptr) {
+ // Sub-expressions that are logic operators are not added in basic blocks
+ // (e.g. see CFG for `bool d = a && (b || c);`). If `SubExpr` is a logic
+ // operator, it may not have been evaluated and assigned a value yet. In
+ // that case, we need to first visit `SubExpr` and then try to get the
+ // value that gets assigned to it.
+ Visit(&SubExpr);
+ }
+
+ if (auto *Val = dyn_cast_or_null<BoolValue>(
+ Env.getValue(SubExpr, SkipPast::Reference)))
+ return *Val;
+
+ // If the value of `SubExpr` is still unknown, we create a fresh symbolic
+ // boolean value for it.
+ return Env.makeAtomicBoolValue();
+ }
+
+ const StmtToEnvMap &StmtToEnv;
Environment &Env;
};
-void transfer(const Stmt &S, Environment &Env) {
- assert(!isa<ParenExpr>(&S));
- TransferVisitor(Env).Visit(&S);
+void transfer(const StmtToEnvMap &StmtToEnv, const Stmt &S, Environment &Env) {
+ TransferVisitor(StmtToEnv, Env).Visit(&S);
}
} // namespace dataflow
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.cpp
index 6b14b5ceaf69..6443fc1b6422 100644
--- a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.cpp
@@ -11,12 +11,15 @@
//
//===----------------------------------------------------------------------===//
+#include <algorithm>
#include <memory>
#include <system_error>
#include <utility>
#include <vector>
#include "clang/AST/DeclCXX.h"
+#include "clang/AST/OperationKinds.h"
+#include "clang/AST/StmtVisitor.h"
#include "clang/Analysis/Analyses/PostOrderCFGView.h"
#include "clang/Analysis/CFG.h"
#include "clang/Analysis/FlowSensitive/DataflowEnvironment.h"
@@ -24,14 +27,131 @@
#include "clang/Analysis/FlowSensitive/Transfer.h"
#include "clang/Analysis/FlowSensitive/TypeErasedDataflowAnalysis.h"
#include "clang/Analysis/FlowSensitive/Value.h"
+#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Error.h"
+#include "llvm/Support/ErrorHandling.h"
namespace clang {
namespace dataflow {
+class StmtToEnvMapImpl : public StmtToEnvMap {
+public:
+ StmtToEnvMapImpl(
+ const ControlFlowContext &CFCtx,
+ llvm::ArrayRef<llvm::Optional<TypeErasedDataflowAnalysisState>>
+ BlockToState)
+ : CFCtx(CFCtx), BlockToState(BlockToState) {}
+
+ const Environment *getEnvironment(const Stmt &S) const override {
+ auto BlockIT = CFCtx.getStmtToBlock().find(&ignoreCFGOmittedNodes(S));
+ assert(BlockIT != CFCtx.getStmtToBlock().end());
+ const auto &State = BlockToState[BlockIT->getSecond()->getBlockID()];
+ assert(State);
+ return &State.getValue().Env;
+ }
+
+private:
+ const ControlFlowContext &CFCtx;
+ llvm::ArrayRef<llvm::Optional<TypeErasedDataflowAnalysisState>> BlockToState;
+};
+
+/// Returns the index of `Block` in the successors of `Pred`.
+static int blockIndexInPredecessor(const CFGBlock &Pred,
+ const CFGBlock &Block) {
+ auto BlockPos = llvm::find_if(
+ Pred.succs(), [&Block](const CFGBlock::AdjacentBlock &Succ) {
+ return Succ && Succ->getBlockID() == Block.getBlockID();
+ });
+ return BlockPos - Pred.succ_begin();
+}
+
+/// Extends the flow condition of an environment based on a terminator
+/// statement.
+class TerminatorVisitor : public ConstStmtVisitor<TerminatorVisitor> {
+public:
+ TerminatorVisitor(const StmtToEnvMap &StmtToEnv, Environment &Env,
+ int BlockSuccIdx)
+ : StmtToEnv(StmtToEnv), Env(Env), BlockSuccIdx(BlockSuccIdx) {}
+
+ void VisitIfStmt(const IfStmt *S) {
+ auto *Cond = S->getCond();
+ assert(Cond != nullptr);
+ extendFlowCondition(*Cond);
+ }
+
+ void VisitWhileStmt(const WhileStmt *S) {
+ auto *Cond = S->getCond();
+ assert(Cond != nullptr);
+ extendFlowCondition(*Cond);
+ }
+
+ void VisitDoStmt(const DoStmt *S) {
+ auto *Cond = S->getCond();
+ assert(Cond != nullptr);
+ extendFlowCondition(*Cond);
+ }
+
+ void VisitForStmt(const ForStmt *S) {
+ auto *Cond = S->getCond();
+ if (Cond != nullptr)
+ extendFlowCondition(*Cond);
+ }
+
+ void VisitBinaryOperator(const BinaryOperator *S) {
+ assert(S->getOpcode() == BO_LAnd || S->getOpcode() == BO_LOr);
+ auto *LHS = S->getLHS();
+ assert(LHS != nullptr);
+ extendFlowCondition(*LHS);
+ }
+
+ void VisitConditionalOperator(const ConditionalOperator *S) {
+ auto *Cond = S->getCond();
+ assert(Cond != nullptr);
+ extendFlowCondition(*Cond);
+ }
+
+private:
+ void extendFlowCondition(const Expr &Cond) {
+ // The terminator sub-expression might not be evaluated.
+ if (Env.getStorageLocation(Cond, SkipPast::None) == nullptr)
+ transfer(StmtToEnv, Cond, Env);
+
+ // FIXME: The flow condition must be an r-value, so `SkipPast::None` should
+ // suffice.
+ auto *Val =
+ cast_or_null<BoolValue>(Env.getValue(Cond, SkipPast::Reference));
+ // Value merging depends on flow conditions from different environments
+ // being mutually exclusive -- that is, they cannot both be true in their
+ // entirety (even if they may share some clauses). So, we need *some* value
+ // for the condition expression, even if just an atom.
+ if (Val == nullptr) {
+ // FIXME: Consider introducing a helper for this get-or-create pattern.
+ auto *Loc = Env.getStorageLocation(Cond, SkipPast::None);
+ if (Loc == nullptr) {
+ Loc = &Env.createStorageLocation(Cond);
+ Env.setStorageLocation(Cond, *Loc);
+ }
+ Val = &Env.makeAtomicBoolValue();
+ Env.setValue(*Loc, *Val);
+ }
+
+ // The condition must be inverted for the successor that encompasses the
+ // "else" branch, if such exists.
+ if (BlockSuccIdx == 1)
+ Val = &Env.makeNot(*Val);
+
+ Env.addToFlowCondition(*Val);
+ }
+
+ const StmtToEnvMap &StmtToEnv;
+ Environment &Env;
+ int BlockSuccIdx;
+};
+
/// Computes the input state for a given basic block by joining the output
/// states of its predecessors.
///
@@ -78,6 +198,8 @@ static TypeErasedDataflowAnalysisState computeBlockInputState(
}
llvm::Optional<TypeErasedDataflowAnalysisState> MaybeState;
+ bool ApplyBuiltinTransfer = Analysis.applyBuiltinTransfer();
+
for (const CFGBlock *Pred : Preds) {
// Skip if the `Block` is unreachable or control flow cannot get past it.
if (!Pred || Pred->hasNoReturnElement())
@@ -87,19 +209,27 @@ static TypeErasedDataflowAnalysisState computeBlockInputState(
// loop back edge to `Block`.
const llvm::Optional<TypeErasedDataflowAnalysisState> &MaybePredState =
BlockStates[Pred->getBlockID()];
- if (!MaybePredState.hasValue())
+ if (!MaybePredState)
continue;
- const TypeErasedDataflowAnalysisState &PredState =
- MaybePredState.getValue();
- if (MaybeState.hasValue()) {
+ TypeErasedDataflowAnalysisState PredState = MaybePredState.getValue();
+ if (ApplyBuiltinTransfer) {
+ if (const Stmt *PredTerminatorStmt = Pred->getTerminatorStmt()) {
+ const StmtToEnvMapImpl StmtToEnv(CFCtx, BlockStates);
+ TerminatorVisitor(StmtToEnv, PredState.Env,
+ blockIndexInPredecessor(*Pred, Block))
+ .Visit(PredTerminatorStmt);
+ }
+ }
+
+ if (MaybeState) {
Analysis.joinTypeErased(MaybeState->Lattice, PredState.Lattice);
MaybeState->Env.join(PredState.Env, Analysis);
} else {
- MaybeState = PredState;
+ MaybeState = std::move(PredState);
}
}
- if (!MaybeState.hasValue()) {
+ if (!MaybeState) {
// FIXME: Consider passing `Block` to `Analysis.typeErasedInitialElement()`
// to enable building analyses like computation of dominators that
// initialize the state of each basic block differently.
@@ -111,17 +241,19 @@ static TypeErasedDataflowAnalysisState computeBlockInputState(
/// Transfers `State` by evaluating `CfgStmt` in the context of `Analysis`.
/// `HandleTransferredStmt` (if provided) will be applied to `CfgStmt`, after it
/// is evaluated.
-static void
-transferCFGStmt(const CFGStmt &CfgStmt, TypeErasedDataflowAnalysis &Analysis,
- TypeErasedDataflowAnalysisState &State,
- std::function<void(const CFGStmt &,
- const TypeErasedDataflowAnalysisState &)>
- HandleTransferredStmt) {
+static void transferCFGStmt(
+ const ControlFlowContext &CFCtx,
+ llvm::ArrayRef<llvm::Optional<TypeErasedDataflowAnalysisState>> BlockStates,
+ const CFGStmt &CfgStmt, TypeErasedDataflowAnalysis &Analysis,
+ TypeErasedDataflowAnalysisState &State,
+ std::function<void(const CFGStmt &,
+ const TypeErasedDataflowAnalysisState &)>
+ HandleTransferredStmt) {
const Stmt *S = CfgStmt.getStmt();
assert(S != nullptr);
if (Analysis.applyBuiltinTransfer())
- transfer(*S, State.Env);
+ transfer(StmtToEnvMapImpl(CFCtx, BlockStates), *S, State.Env);
Analysis.transferTypeErased(S, State.Lattice, State.Env);
if (HandleTransferredStmt != nullptr)
@@ -137,6 +269,11 @@ static void transferCFGInitializer(const CFGInitializer &CfgInit,
const CXXCtorInitializer *Initializer = CfgInit.getInitializer();
assert(Initializer != nullptr);
+ const FieldDecl *Member = Initializer->getMember();
+ if (Member == nullptr)
+ // Not a field initializer.
+ return;
+
auto *InitStmt = Initializer->getInit();
assert(InitStmt != nullptr);
@@ -149,9 +286,6 @@ static void transferCFGInitializer(const CFGInitializer &CfgInit,
if (InitStmtVal == nullptr)
return;
- const FieldDecl *Member = Initializer->getMember();
- assert(Member != nullptr);
-
if (Member->getType()->isReferenceType()) {
auto &MemberLoc = ThisLoc.getChild(*Member);
State.Env.setValue(MemberLoc,
@@ -176,8 +310,8 @@ TypeErasedDataflowAnalysisState transferBlock(
for (const CFGElement &Element : Block) {
switch (Element.getKind()) {
case CFGElement::Statement:
- transferCFGStmt(*Element.getAs<CFGStmt>(), Analysis, State,
- HandleTransferredStmt);
+ transferCFGStmt(CFCtx, BlockStates, *Element.getAs<CFGStmt>(), Analysis,
+ State, HandleTransferredStmt);
break;
case CFGElement::Initializer:
if (Analysis.applyBuiltinTransfer())
@@ -192,9 +326,11 @@ TypeErasedDataflowAnalysisState transferBlock(
}
llvm::Expected<std::vector<llvm::Optional<TypeErasedDataflowAnalysisState>>>
-runTypeErasedDataflowAnalysis(const ControlFlowContext &CFCtx,
- TypeErasedDataflowAnalysis &Analysis,
- const Environment &InitEnv) {
+runTypeErasedDataflowAnalysis(
+ const ControlFlowContext &CFCtx, TypeErasedDataflowAnalysis &Analysis,
+ const Environment &InitEnv,
+ std::function<void(const Stmt *, const TypeErasedDataflowAnalysisState &)>
+ PostVisitStmt) {
PostOrderCFGView POV(&CFCtx.getCFG());
ForwardDataflowWorklist Worklist(CFCtx.getCFG(), &POV);
@@ -211,10 +347,17 @@ runTypeErasedDataflowAnalysis(const ControlFlowContext &CFCtx,
// converging. To limit the damage (infinite loops) that these bugs can cause,
// limit the number of iterations.
// FIXME: Consider making the maximum number of iterations configurable.
+ // FIXME: Consider restricting the number of backedges followed, rather than
+ // iterations.
// FIXME: Set up statistics (see llvm/ADT/Statistic.h) to count average number
// of iterations, number of functions that time out, etc.
+ static constexpr uint32_t MaxAverageVisitsPerBlock = 4;
+ static constexpr uint32_t AbsoluteMaxIterations = 1 << 16;
+ const uint32_t RelativeMaxIterations =
+ MaxAverageVisitsPerBlock * BlockStates.size();
+ const uint32_t MaxIterations =
+ std::min(RelativeMaxIterations, AbsoluteMaxIterations);
uint32_t Iterations = 0;
- static constexpr uint32_t MaxIterations = 1 << 16;
while (const CFGBlock *Block = Worklist.dequeue()) {
if (++Iterations > MaxIterations) {
return llvm::createStringError(std::errc::timed_out,
@@ -226,7 +369,7 @@ runTypeErasedDataflowAnalysis(const ControlFlowContext &CFCtx,
TypeErasedDataflowAnalysisState NewBlockState =
transferBlock(CFCtx, BlockStates, *Block, InitEnv, Analysis);
- if (OldBlockState.hasValue() &&
+ if (OldBlockState &&
Analysis.isEqualTypeErased(OldBlockState.getValue().Lattice,
NewBlockState.Lattice) &&
OldBlockState->Env.equivalentTo(NewBlockState.Env, Analysis)) {
@@ -246,6 +389,20 @@ runTypeErasedDataflowAnalysis(const ControlFlowContext &CFCtx,
// FIXME: Consider evaluating unreachable basic blocks (those that have a
// state set to `llvm::None` at this point) to also analyze dead code.
+ if (PostVisitStmt) {
+ for (const CFGBlock *Block : CFCtx.getCFG()) {
+ // Skip blocks that were not evaluated.
+ if (!BlockStates[Block->getBlockID()])
+ continue;
+ transferBlock(
+ CFCtx, BlockStates, *Block, InitEnv, Analysis,
+ [&PostVisitStmt](const clang::CFGStmt &Stmt,
+ const TypeErasedDataflowAnalysisState &State) {
+ PostVisitStmt(Stmt.getStmt(), State);
+ });
+ }
+ }
+
return BlockStates;
}
diff --git a/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/WatchedLiteralsSolver.cpp b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/WatchedLiteralsSolver.cpp
new file mode 100644
index 000000000000..0e6e70d6d5d4
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Analysis/FlowSensitive/WatchedLiteralsSolver.cpp
@@ -0,0 +1,600 @@
+//===- WatchedLiteralsSolver.cpp --------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a SAT solver implementation that can be used by dataflow
+// analyses.
+//
+//===----------------------------------------------------------------------===//
+
+#include <cassert>
+#include <cstdint>
+#include <iterator>
+#include <queue>
+#include <vector>
+
+#include "clang/Analysis/FlowSensitive/Solver.h"
+#include "clang/Analysis/FlowSensitive/Value.h"
+#include "clang/Analysis/FlowSensitive/WatchedLiteralsSolver.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/STLExtras.h"
+
+namespace clang {
+namespace dataflow {
+
+// `WatchedLiteralsSolver` is an implementation of Algorithm D from Knuth's
+// The Art of Computer Programming Volume 4: Satisfiability, Fascicle 6. It is
+// based on the backtracking DPLL algorithm [1], keeps references to a single
+// "watched" literal per clause, and uses a set of "active" variables to perform
+// unit propagation.
+//
+// The solver expects that its input is a boolean formula in conjunctive normal
+// form that consists of clauses of at least one literal. A literal is either a
+// boolean variable or its negation. Below we define types, data structures, and
+// utilities that are used to represent boolean formulas in conjunctive normal
+// form.
+//
+// [1] https://en.wikipedia.org/wiki/DPLL_algorithm
+
+/// Boolean variables are represented as positive integers.
+using Variable = uint32_t;
+
+/// A null boolean variable is used as a placeholder in various data structures
+/// and algorithms.
+static constexpr Variable NullVar = 0;
+
+/// Literals are represented as positive integers. Specifically, for a boolean
+/// variable `V` that is represented as the positive integer `I`, the positive
+/// literal `V` is represented as the integer `2*I` and the negative literal
+/// `!V` is represented as the integer `2*I+1`.
+using Literal = uint32_t;
+
+/// A null literal is used as a placeholder in various data structures and
+/// algorithms.
+static constexpr Literal NullLit = 0;
+
+/// Returns the positive literal `V`.
+static constexpr Literal posLit(Variable V) { return 2 * V; }
+
+/// Returns the negative literal `!V`.
+static constexpr Literal negLit(Variable V) { return 2 * V + 1; }
+
+/// Returns the negated literal `!L`.
+static constexpr Literal notLit(Literal L) { return L ^ 1; }
+
+/// Returns the variable of `L`.
+static constexpr Variable var(Literal L) { return L >> 1; }
+
+/// Clause identifiers are represented as positive integers.
+using ClauseID = uint32_t;
+
+/// A null clause identifier is used as a placeholder in various data structures
+/// and algorithms.
+static constexpr ClauseID NullClause = 0;
+
+/// A boolean formula in conjunctive normal form.
+struct BooleanFormula {
+ /// `LargestVar` is equal to the largest positive integer that represents a
+ /// variable in the formula.
+ const Variable LargestVar;
+
+ /// Literals of all clauses in the formula.
+ ///
+ /// The element at index 0 stands for the literal in the null clause. It is
+ /// set to 0 and isn't used. Literals of clauses in the formula start from the
+ /// element at index 1.
+ ///
+ /// For example, for the formula `(L1 v L2) ^ (L2 v L3 v L4)` the elements of
+ /// `Clauses` will be `[0, L1, L2, L2, L3, L4]`.
+ std::vector<Literal> Clauses;
+
+ /// Start indices of clauses of the formula in `Clauses`.
+ ///
+ /// The element at index 0 stands for the start index of the null clause. It
+ /// is set to 0 and isn't used. Start indices of clauses in the formula start
+ /// from the element at index 1.
+ ///
+ /// For example, for the formula `(L1 v L2) ^ (L2 v L3 v L4)` the elements of
+ /// `ClauseStarts` will be `[0, 1, 3]`. Note that the literals of the first
+ /// clause always start at index 1. The start index for the literals of the
+ /// second clause depends on the size of the first clause and so on.
+ std::vector<size_t> ClauseStarts;
+
+ /// Maps literals (indices of the vector) to clause identifiers (elements of
+ /// the vector) that watch the respective literals.
+ ///
+ /// For a given clause, its watched literal is always its first literal in
+ /// `Clauses`. This invariant is maintained when watched literals change.
+ std::vector<ClauseID> WatchedHead;
+
+ /// Maps clause identifiers (elements of the vector) to identifiers of other
+ /// clauses that watch the same literals, forming a set of linked lists.
+ ///
+ /// The element at index 0 stands for the identifier of the clause that
+ /// follows the null clause. It is set to 0 and isn't used. Identifiers of
+ /// clauses in the formula start from the element at index 1.
+ std::vector<ClauseID> NextWatched;
+
+ explicit BooleanFormula(Variable LargestVar) : LargestVar(LargestVar) {
+ Clauses.push_back(0);
+ ClauseStarts.push_back(0);
+ NextWatched.push_back(0);
+ const size_t NumLiterals = 2 * LargestVar + 1;
+ WatchedHead.resize(NumLiterals + 1, 0);
+ }
+
+ /// Adds the `L1 v L2 v L3` clause to the formula. If `L2` or `L3` are
+ /// `NullLit` they are respectively omitted from the clause.
+ ///
+ /// Requirements:
+ ///
+ /// `L1` must not be `NullLit`.
+ ///
+ /// All literals in the input that are not `NullLit` must be distinct.
+ void addClause(Literal L1, Literal L2 = NullLit, Literal L3 = NullLit) {
+ // The literals are guaranteed to be distinct from properties of BoolValue
+ // and the construction in `buildBooleanFormula`.
+ assert(L1 != NullLit && L1 != L2 && L1 != L3 &&
+ (L2 != L3 || L2 == NullLit));
+
+ const ClauseID C = ClauseStarts.size();
+ const size_t S = Clauses.size();
+ ClauseStarts.push_back(S);
+
+ Clauses.push_back(L1);
+ if (L2 != NullLit)
+ Clauses.push_back(L2);
+ if (L3 != NullLit)
+ Clauses.push_back(L3);
+
+ // Designate the first literal as the "watched" literal of the clause.
+ NextWatched.push_back(WatchedHead[L1]);
+ WatchedHead[L1] = C;
+ }
+
+ /// Returns the number of literals in clause `C`.
+ size_t clauseSize(ClauseID C) const {
+ return C == ClauseStarts.size() - 1 ? Clauses.size() - ClauseStarts[C]
+ : ClauseStarts[C + 1] - ClauseStarts[C];
+ }
+
+ /// Returns the literals of clause `C`.
+ llvm::ArrayRef<Literal> clauseLiterals(ClauseID C) const {
+ return llvm::ArrayRef<Literal>(&Clauses[ClauseStarts[C]], clauseSize(C));
+ }
+};
+
+/// Converts the conjunction of `Vals` into a formula in conjunctive normal
+/// form where each clause has at least one and at most three literals.
+BooleanFormula buildBooleanFormula(const llvm::DenseSet<BoolValue *> &Vals) {
+ // The general strategy of the algorithm implemented below is to map each
+ // of the sub-values in `Vals` to a unique variable and use these variables in
+ // the resulting CNF expression to avoid exponential blow up. The number of
+ // literals in the resulting formula is guaranteed to be linear in the number
+ // of sub-values in `Vals`.
+
+ // Map each sub-value in `Vals` to a unique variable.
+ llvm::DenseMap<BoolValue *, Variable> SubValsToVar;
+ Variable NextVar = 1;
+ {
+ std::queue<BoolValue *> UnprocessedSubVals;
+ for (BoolValue *Val : Vals)
+ UnprocessedSubVals.push(Val);
+ while (!UnprocessedSubVals.empty()) {
+ BoolValue *Val = UnprocessedSubVals.front();
+ UnprocessedSubVals.pop();
+
+ if (!SubValsToVar.try_emplace(Val, NextVar).second)
+ continue;
+ ++NextVar;
+
+ // Visit the sub-values of `Val`.
+ if (auto *C = dyn_cast<ConjunctionValue>(Val)) {
+ UnprocessedSubVals.push(&C->getLeftSubValue());
+ UnprocessedSubVals.push(&C->getRightSubValue());
+ } else if (auto *D = dyn_cast<DisjunctionValue>(Val)) {
+ UnprocessedSubVals.push(&D->getLeftSubValue());
+ UnprocessedSubVals.push(&D->getRightSubValue());
+ } else if (auto *N = dyn_cast<NegationValue>(Val)) {
+ UnprocessedSubVals.push(&N->getSubVal());
+ }
+ }
+ }
+
+ auto GetVar = [&SubValsToVar](const BoolValue *Val) {
+ auto ValIt = SubValsToVar.find(Val);
+ assert(ValIt != SubValsToVar.end());
+ return ValIt->second;
+ };
+
+ BooleanFormula Formula(NextVar - 1);
+ std::vector<bool> ProcessedSubVals(NextVar, false);
+
+ // Add a conjunct for each variable that represents a top-level conjunction
+ // value in `Vals`.
+ for (BoolValue *Val : Vals)
+ Formula.addClause(posLit(GetVar(Val)));
+
+ // Add conjuncts that represent the mapping between newly-created variables
+ // and their corresponding sub-values.
+ std::queue<BoolValue *> UnprocessedSubVals;
+ for (BoolValue *Val : Vals)
+ UnprocessedSubVals.push(Val);
+ while (!UnprocessedSubVals.empty()) {
+ const BoolValue *Val = UnprocessedSubVals.front();
+ UnprocessedSubVals.pop();
+ const Variable Var = GetVar(Val);
+
+ if (ProcessedSubVals[Var])
+ continue;
+ ProcessedSubVals[Var] = true;
+
+ if (auto *C = dyn_cast<ConjunctionValue>(Val)) {
+ const Variable LeftSubVar = GetVar(&C->getLeftSubValue());
+ const Variable RightSubVar = GetVar(&C->getRightSubValue());
+
+ // `X <=> (A ^ B)` is equivalent to `(!X v A) ^ (!X v B) ^ (X v !A v !B)`
+ // which is already in conjunctive normal form. Below we add each of the
+ // conjuncts of the latter expression to the result.
+ Formula.addClause(negLit(Var), posLit(LeftSubVar));
+ Formula.addClause(negLit(Var), posLit(RightSubVar));
+ Formula.addClause(posLit(Var), negLit(LeftSubVar), negLit(RightSubVar));
+
+ // Visit the sub-values of `Val`.
+ UnprocessedSubVals.push(&C->getLeftSubValue());
+ UnprocessedSubVals.push(&C->getRightSubValue());
+ } else if (auto *D = dyn_cast<DisjunctionValue>(Val)) {
+ const Variable LeftSubVar = GetVar(&D->getLeftSubValue());
+ const Variable RightSubVar = GetVar(&D->getRightSubValue());
+
+ // `X <=> (A v B)` is equivalent to `(!X v A v B) ^ (X v !A) ^ (X v !B)`
+ // which is already in conjunctive normal form. Below we add each of the
+ // conjuncts of the latter expression to the result.
+ Formula.addClause(negLit(Var), posLit(LeftSubVar), posLit(RightSubVar));
+ Formula.addClause(posLit(Var), negLit(LeftSubVar));
+ Formula.addClause(posLit(Var), negLit(RightSubVar));
+
+ // Visit the sub-values of `Val`.
+ UnprocessedSubVals.push(&D->getLeftSubValue());
+ UnprocessedSubVals.push(&D->getRightSubValue());
+ } else if (auto *N = dyn_cast<NegationValue>(Val)) {
+ const Variable SubVar = GetVar(&N->getSubVal());
+
+ // `X <=> !Y` is equivalent to `(!X v !Y) ^ (X v Y)` which is already in
+ // conjunctive normal form. Below we add each of the conjuncts of the
+ // latter expression to the result.
+ Formula.addClause(negLit(Var), negLit(SubVar));
+ Formula.addClause(posLit(Var), posLit(SubVar));
+
+ // Visit the sub-values of `Val`.
+ UnprocessedSubVals.push(&N->getSubVal());
+ }
+ }
+
+ return Formula;
+}
+
+class WatchedLiteralsSolverImpl {
+ /// A boolean formula in conjunctive normal form that the solver will attempt
+ /// to prove satisfiable. The formula will be modified in the process.
+ BooleanFormula Formula;
+
+ /// The search for a satisfying assignment of the variables in `Formula` will
+ /// proceed in levels, starting from 1 and going up to `Formula.LargestVar`
+ /// (inclusive). The current level is stored in `Level`. At each level the
+ /// solver will assign a value to an unassigned variable. If this leads to a
+ /// consistent partial assignment, `Level` will be incremented. Otherwise, if
+ /// it results in a conflict, the solver will backtrack by decrementing
+ /// `Level` until it reaches the most recent level where a decision was made.
+ size_t Level = 0;
+
+ /// Maps levels (indices of the vector) to variables (elements of the vector)
+ /// that are assigned values at the respective levels.
+ ///
+ /// The element at index 0 isn't used. Variables start from the element at
+ /// index 1.
+ std::vector<Variable> LevelVars;
+
+ /// State of the solver at a particular level.
+ enum class State : uint8_t {
+ /// Indicates that the solver made a decision.
+ Decision = 0,
+
+ /// Indicates that the solver made a forced move.
+ Forced = 1,
+ };
+
+ /// State of the solver at a particular level. It keeps track of previous
+ /// decisions that the solver can refer to when backtracking.
+ ///
+ /// The element at index 0 isn't used. States start from the element at index
+ /// 1.
+ std::vector<State> LevelStates;
+
+ enum class Assignment : int8_t {
+ Unassigned = -1,
+ AssignedFalse = 0,
+ AssignedTrue = 1
+ };
+
+ /// Maps variables (indices of the vector) to their assignments (elements of
+ /// the vector).
+ ///
+ /// The element at index 0 isn't used. Variable assignments start from the
+ /// element at index 1.
+ std::vector<Assignment> VarAssignments;
+
+ /// A set of unassigned variables that appear in watched literals in
+ /// `Formula`. The vector is guaranteed to contain unique elements.
+ std::vector<Variable> ActiveVars;
+
+public:
+ explicit WatchedLiteralsSolverImpl(const llvm::DenseSet<BoolValue *> &Vals)
+ : Formula(buildBooleanFormula(Vals)), LevelVars(Formula.LargestVar + 1),
+ LevelStates(Formula.LargestVar + 1) {
+ assert(!Vals.empty());
+
+ // Initialize the state at the root level to a decision so that in
+ // `reverseForcedMoves` we don't have to check that `Level >= 0` on each
+ // iteration.
+ LevelStates[0] = State::Decision;
+
+ // Initialize all variables as unassigned.
+ VarAssignments.resize(Formula.LargestVar + 1, Assignment::Unassigned);
+
+ // Initialize the active variables.
+ for (Variable Var = Formula.LargestVar; Var != NullVar; --Var) {
+ if (isWatched(posLit(Var)) || isWatched(negLit(Var)))
+ ActiveVars.push_back(Var);
+ }
+ }
+
+ Solver::Result solve() && {
+ size_t I = 0;
+ while (I < ActiveVars.size()) {
+ // Assert that the following invariants hold:
+ // 1. All active variables are unassigned.
+ // 2. All active variables form watched literals.
+ // 3. Unassigned variables that form watched literals are active.
+ // FIXME: Consider replacing these with test cases that fail if the any
+ // of the invariants is broken. That might not be easy due to the
+ // transformations performed by `buildBooleanFormula`.
+ assert(activeVarsAreUnassigned());
+ assert(activeVarsFormWatchedLiterals());
+ assert(unassignedVarsFormingWatchedLiteralsAreActive());
+
+ const Variable ActiveVar = ActiveVars[I];
+
+ // Look for unit clauses that contain the active variable.
+ const bool unitPosLit = watchedByUnitClause(posLit(ActiveVar));
+ const bool unitNegLit = watchedByUnitClause(negLit(ActiveVar));
+ if (unitPosLit && unitNegLit) {
+ // We found a conflict!
+
+ // Backtrack and rewind the `Level` until the most recent non-forced
+ // assignment.
+ reverseForcedMoves();
+
+ // If the root level is reached, then all possible assignments lead to
+ // a conflict.
+ if (Level == 0)
+ return WatchedLiteralsSolver::Result::Unsatisfiable;
+
+ // Otherwise, take the other branch at the most recent level where a
+ // decision was made.
+ LevelStates[Level] = State::Forced;
+ const Variable Var = LevelVars[Level];
+ VarAssignments[Var] = VarAssignments[Var] == Assignment::AssignedTrue
+ ? Assignment::AssignedFalse
+ : Assignment::AssignedTrue;
+
+ updateWatchedLiterals();
+ } else if (unitPosLit || unitNegLit) {
+ // We found a unit clause! The value of its unassigned variable is
+ // forced.
+ ++Level;
+
+ LevelVars[Level] = ActiveVar;
+ LevelStates[Level] = State::Forced;
+ VarAssignments[ActiveVar] =
+ unitPosLit ? Assignment::AssignedTrue : Assignment::AssignedFalse;
+
+ // Remove the variable that was just assigned from the set of active
+ // variables.
+ if (I + 1 < ActiveVars.size()) {
+ // Replace the variable that was just assigned with the last active
+ // variable for efficient removal.
+ ActiveVars[I] = ActiveVars.back();
+ } else {
+ // This was the last active variable. Repeat the process from the
+ // beginning.
+ I = 0;
+ }
+ ActiveVars.pop_back();
+
+ updateWatchedLiterals();
+ } else if (I + 1 == ActiveVars.size()) {
+ // There are no remaining unit clauses in the formula! Make a decision
+ // for one of the active variables at the current level.
+ ++Level;
+
+ LevelVars[Level] = ActiveVar;
+ LevelStates[Level] = State::Decision;
+ VarAssignments[ActiveVar] = decideAssignment(ActiveVar);
+
+ // Remove the variable that was just assigned from the set of active
+ // variables.
+ ActiveVars.pop_back();
+
+ updateWatchedLiterals();
+
+ // This was the last active variable. Repeat the process from the
+ // beginning.
+ I = 0;
+ } else {
+ ++I;
+ }
+ }
+ return WatchedLiteralsSolver::Result::Satisfiable;
+ }
+
+private:
+ // Reverses forced moves until the most recent level where a decision was made
+ // on the assignment of a variable.
+ void reverseForcedMoves() {
+ for (; LevelStates[Level] == State::Forced; --Level) {
+ const Variable Var = LevelVars[Level];
+
+ VarAssignments[Var] = Assignment::Unassigned;
+
+ // If the variable that we pass through is watched then we add it to the
+ // active variables.
+ if (isWatched(posLit(Var)) || isWatched(negLit(Var)))
+ ActiveVars.push_back(Var);
+ }
+ }
+
+ // Updates watched literals that are affected by a variable assignment.
+ void updateWatchedLiterals() {
+ const Variable Var = LevelVars[Level];
+
+ // Update the watched literals of clauses that currently watch the literal
+ // that falsifies `Var`.
+ const Literal FalseLit = VarAssignments[Var] == Assignment::AssignedTrue
+ ? negLit(Var)
+ : posLit(Var);
+ ClauseID FalseLitWatcher = Formula.WatchedHead[FalseLit];
+ Formula.WatchedHead[FalseLit] = NullClause;
+ while (FalseLitWatcher != NullClause) {
+ const ClauseID NextFalseLitWatcher = Formula.NextWatched[FalseLitWatcher];
+
+ // Pick the first non-false literal as the new watched literal.
+ const size_t FalseLitWatcherStart = Formula.ClauseStarts[FalseLitWatcher];
+ size_t NewWatchedLitIdx = FalseLitWatcherStart + 1;
+ while (isCurrentlyFalse(Formula.Clauses[NewWatchedLitIdx]))
+ ++NewWatchedLitIdx;
+ const Literal NewWatchedLit = Formula.Clauses[NewWatchedLitIdx];
+ const Variable NewWatchedLitVar = var(NewWatchedLit);
+
+ // Swap the old watched literal for the new one in `FalseLitWatcher` to
+ // maintain the invariant that the watched literal is at the beginning of
+ // the clause.
+ Formula.Clauses[NewWatchedLitIdx] = FalseLit;
+ Formula.Clauses[FalseLitWatcherStart] = NewWatchedLit;
+
+ // If the new watched literal isn't watched by any other clause and its
+ // variable isn't assigned we need to add it to the active variables.
+ if (!isWatched(NewWatchedLit) && !isWatched(notLit(NewWatchedLit)) &&
+ VarAssignments[NewWatchedLitVar] == Assignment::Unassigned)
+ ActiveVars.push_back(NewWatchedLitVar);
+
+ Formula.NextWatched[FalseLitWatcher] = Formula.WatchedHead[NewWatchedLit];
+ Formula.WatchedHead[NewWatchedLit] = FalseLitWatcher;
+
+ // Go to the next clause that watches `FalseLit`.
+ FalseLitWatcher = NextFalseLitWatcher;
+ }
+ }
+
+ /// Returns true if and only if one of the clauses that watch `Lit` is a unit
+ /// clause.
+ bool watchedByUnitClause(Literal Lit) const {
+ for (ClauseID LitWatcher = Formula.WatchedHead[Lit];
+ LitWatcher != NullClause;
+ LitWatcher = Formula.NextWatched[LitWatcher]) {
+ llvm::ArrayRef<Literal> Clause = Formula.clauseLiterals(LitWatcher);
+
+ // Assert the invariant that the watched literal is always the first one
+ // in the clause.
+ // FIXME: Consider replacing this with a test case that fails if the
+ // invariant is broken by `updateWatchedLiterals`. That might not be easy
+ // due to the transformations performed by `buildBooleanFormula`.
+ assert(Clause.front() == Lit);
+
+ if (isUnit(Clause))
+ return true;
+ }
+ return false;
+ }
+
+ /// Returns true if and only if `Clause` is a unit clause.
+ bool isUnit(llvm::ArrayRef<Literal> Clause) const {
+ return llvm::all_of(Clause.drop_front(),
+ [this](Literal L) { return isCurrentlyFalse(L); });
+ }
+
+ /// Returns true if and only if `Lit` evaluates to `false` in the current
+ /// partial assignment.
+ bool isCurrentlyFalse(Literal Lit) const {
+ return static_cast<int8_t>(VarAssignments[var(Lit)]) ==
+ static_cast<int8_t>(Lit & 1);
+ }
+
+ /// Returns true if and only if `Lit` is watched by a clause in `Formula`.
+ bool isWatched(Literal Lit) const {
+ return Formula.WatchedHead[Lit] != NullClause;
+ }
+
+ /// Returns an assignment for an unassigned variable.
+ Assignment decideAssignment(Variable Var) const {
+ return !isWatched(posLit(Var)) || isWatched(negLit(Var))
+ ? Assignment::AssignedFalse
+ : Assignment::AssignedTrue;
+ }
+
+ /// Returns a set of all watched literals.
+ llvm::DenseSet<Literal> watchedLiterals() const {
+ llvm::DenseSet<Literal> WatchedLiterals;
+ for (Literal Lit = 2; Lit < Formula.WatchedHead.size(); Lit++) {
+ if (Formula.WatchedHead[Lit] == NullClause)
+ continue;
+ WatchedLiterals.insert(Lit);
+ }
+ return WatchedLiterals;
+ }
+
+ /// Returns true if and only if all active variables are unassigned.
+ bool activeVarsAreUnassigned() const {
+ return llvm::all_of(ActiveVars, [this](Variable Var) {
+ return VarAssignments[Var] == Assignment::Unassigned;
+ });
+ }
+
+ /// Returns true if and only if all active variables form watched literals.
+ bool activeVarsFormWatchedLiterals() const {
+ const llvm::DenseSet<Literal> WatchedLiterals = watchedLiterals();
+ return llvm::all_of(ActiveVars, [&WatchedLiterals](Variable Var) {
+ return WatchedLiterals.contains(posLit(Var)) ||
+ WatchedLiterals.contains(negLit(Var));
+ });
+ }
+
+ /// Returns true if and only if all unassigned variables that are forming
+ /// watched literals are active.
+ bool unassignedVarsFormingWatchedLiteralsAreActive() const {
+ const llvm::DenseSet<Variable> ActiveVarsSet(ActiveVars.begin(),
+ ActiveVars.end());
+ for (Literal Lit : watchedLiterals()) {
+ const Variable Var = var(Lit);
+ if (VarAssignments[Var] != Assignment::Unassigned)
+ continue;
+ if (ActiveVarsSet.contains(Var))
+ continue;
+ return false;
+ }
+ return true;
+ }
+};
+
+Solver::Result WatchedLiteralsSolver::solve(llvm::DenseSet<BoolValue *> Vals) {
+ return Vals.empty() ? WatchedLiteralsSolver::Result::Satisfiable
+ : WatchedLiteralsSolverImpl(Vals).solve();
+}
+
+} // namespace dataflow
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Analysis/PathDiagnostic.cpp b/contrib/llvm-project/clang/lib/Analysis/PathDiagnostic.cpp
index ee8185c2147c..8a7305000746 100644
--- a/contrib/llvm-project/clang/lib/Analysis/PathDiagnostic.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/PathDiagnostic.cpp
@@ -319,7 +319,7 @@ static Optional<bool> comparePath(const PathPieces &X, const PathPieces &Y) {
for ( ; X_I != X_end && Y_I != Y_end; ++X_I, ++Y_I) {
Optional<bool> b = comparePiece(**X_I, **Y_I);
- if (b.hasValue())
+ if (b)
return b.getValue();
}
@@ -396,7 +396,7 @@ static bool compare(const PathDiagnostic &X, const PathDiagnostic &Y) {
return (*XI) < (*YI);
}
Optional<bool> b = comparePath(X.path, Y.path);
- assert(b.hasValue());
+ assert(b);
return b.getValue();
}
@@ -434,8 +434,8 @@ void PathDiagnosticConsumer::FlushDiagnostics(
}
PathDiagnosticConsumer::FilesMade::~FilesMade() {
- for (PDFileEntry &Entry : Set)
- Entry.~PDFileEntry();
+ for (auto It = Set.begin(); It != Set.end();)
+ (It++)->~PDFileEntry();
}
void PathDiagnosticConsumer::FilesMade::addDiagnostic(const PathDiagnostic &PD,
diff --git a/contrib/llvm-project/clang/lib/Analysis/ReachableCode.cpp b/contrib/llvm-project/clang/lib/Analysis/ReachableCode.cpp
index 5be8180113da..1b7d774aeb40 100644
--- a/contrib/llvm-project/clang/lib/Analysis/ReachableCode.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/ReachableCode.cpp
@@ -345,13 +345,13 @@ static unsigned scanFromBlock(const CFGBlock *Start,
if (!UB)
break;
- if (!TreatAllSuccessorsAsReachable.hasValue()) {
+ if (!TreatAllSuccessorsAsReachable) {
assert(PP);
TreatAllSuccessorsAsReachable =
shouldTreatSuccessorsAsReachable(item, *PP);
}
- if (TreatAllSuccessorsAsReachable.getValue()) {
+ if (*TreatAllSuccessorsAsReachable) {
B = UB;
break;
}
diff --git a/contrib/llvm-project/clang/lib/Analysis/RetainSummaryManager.cpp b/contrib/llvm-project/clang/lib/Analysis/RetainSummaryManager.cpp
index 836e369758d3..9098cf370d4f 100644
--- a/contrib/llvm-project/clang/lib/Analysis/RetainSummaryManager.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/RetainSummaryManager.cpp
@@ -398,7 +398,7 @@ const RetainSummary *RetainSummaryManager::getSummaryForObjCOrCFObject(
} else if (FName.startswith("NSLog")) {
return getDoNothingSummary();
} else if (FName.startswith("NS") && FName.contains("Insert")) {
- // Whitelist NSXXInsertXX, for example NSMapInsertIfAbsent, since they can
+ // Allowlist NSXXInsertXX, for example NSMapInsertIfAbsent, since they can
// be deallocated by NSMapRemove. (radar://11152419)
ScratchArgs = AF.add(ScratchArgs, 1, ArgEffect(StopTracking));
ScratchArgs = AF.add(ScratchArgs, 2, ArgEffect(StopTracking));
diff --git a/contrib/llvm-project/clang/lib/Analysis/ThreadSafety.cpp b/contrib/llvm-project/clang/lib/Analysis/ThreadSafety.cpp
index 9cc990bd35a3..03bbf078d7e8 100644
--- a/contrib/llvm-project/clang/lib/Analysis/ThreadSafety.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/ThreadSafety.cpp
@@ -41,7 +41,6 @@
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/ImmutableMap.h"
#include "llvm/ADT/Optional.h"
-#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
@@ -75,7 +74,7 @@ static void warnInvalidLock(ThreadSafetyHandler &Handler,
// FIXME: add a note about the attribute location in MutexExp or D
if (Loc.isValid())
- Handler.handleInvalidLockExp(Kind, Loc);
+ Handler.handleInvalidLockExp(Loc);
}
namespace {
@@ -140,12 +139,12 @@ public:
SourceLocation JoinLoc, LockErrorKind LEK,
ThreadSafetyHandler &Handler) const = 0;
virtual void handleLock(FactSet &FSet, FactManager &FactMan,
- const FactEntry &entry, ThreadSafetyHandler &Handler,
- StringRef DiagKind) const = 0;
+ const FactEntry &entry,
+ ThreadSafetyHandler &Handler) const = 0;
virtual void handleUnlock(FactSet &FSet, FactManager &FactMan,
const CapabilityExpr &Cp, SourceLocation UnlockLoc,
- bool FullyRemove, ThreadSafetyHandler &Handler,
- StringRef DiagKind) const = 0;
+ bool FullyRemove,
+ ThreadSafetyHandler &Handler) const = 0;
// Return true if LKind >= LK, where exclusive > shared
bool isAtLeast(LockKind LK) const {
@@ -866,21 +865,21 @@ public:
SourceLocation JoinLoc, LockErrorKind LEK,
ThreadSafetyHandler &Handler) const override {
if (!asserted() && !negative() && !isUniversal()) {
- Handler.handleMutexHeldEndOfScope("mutex", toString(), loc(), JoinLoc,
+ Handler.handleMutexHeldEndOfScope(getKind(), toString(), loc(), JoinLoc,
LEK);
}
}
void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry,
- ThreadSafetyHandler &Handler,
- StringRef DiagKind) const override {
- Handler.handleDoubleLock(DiagKind, entry.toString(), loc(), entry.loc());
+ ThreadSafetyHandler &Handler) const override {
+ Handler.handleDoubleLock(entry.getKind(), entry.toString(), loc(),
+ entry.loc());
}
void handleUnlock(FactSet &FSet, FactManager &FactMan,
const CapabilityExpr &Cp, SourceLocation UnlockLoc,
- bool FullyRemove, ThreadSafetyHandler &Handler,
- StringRef DiagKind) const override {
+ bool FullyRemove,
+ ThreadSafetyHandler &Handler) const override {
FSet.removeLock(FactMan, Cp);
if (!Cp.negative()) {
FSet.addLock(FactMan, std::make_unique<LockableFactEntry>(
@@ -897,25 +896,27 @@ private:
UCK_ReleasedExclusive, ///< Exclusive capability that was released.
};
- using UnderlyingCapability =
- llvm::PointerIntPair<const til::SExpr *, 2, UnderlyingCapabilityKind>;
+ struct UnderlyingCapability {
+ CapabilityExpr Cap;
+ UnderlyingCapabilityKind Kind;
+ };
- SmallVector<UnderlyingCapability, 4> UnderlyingMutexes;
+ SmallVector<UnderlyingCapability, 2> UnderlyingMutexes;
public:
ScopedLockableFactEntry(const CapabilityExpr &CE, SourceLocation Loc)
: FactEntry(CE, LK_Exclusive, Loc, Acquired) {}
void addLock(const CapabilityExpr &M) {
- UnderlyingMutexes.emplace_back(M.sexpr(), UCK_Acquired);
+ UnderlyingMutexes.push_back(UnderlyingCapability{M, UCK_Acquired});
}
void addExclusiveUnlock(const CapabilityExpr &M) {
- UnderlyingMutexes.emplace_back(M.sexpr(), UCK_ReleasedExclusive);
+ UnderlyingMutexes.push_back(UnderlyingCapability{M, UCK_ReleasedExclusive});
}
void addSharedUnlock(const CapabilityExpr &M) {
- UnderlyingMutexes.emplace_back(M.sexpr(), UCK_ReleasedShared);
+ UnderlyingMutexes.push_back(UnderlyingCapability{M, UCK_ReleasedShared});
}
void
@@ -923,51 +924,45 @@ public:
SourceLocation JoinLoc, LockErrorKind LEK,
ThreadSafetyHandler &Handler) const override {
for (const auto &UnderlyingMutex : UnderlyingMutexes) {
- const auto *Entry = FSet.findLock(
- FactMan, CapabilityExpr(UnderlyingMutex.getPointer(), false));
- if ((UnderlyingMutex.getInt() == UCK_Acquired && Entry) ||
- (UnderlyingMutex.getInt() != UCK_Acquired && !Entry)) {
+ const auto *Entry = FSet.findLock(FactMan, UnderlyingMutex.Cap);
+ if ((UnderlyingMutex.Kind == UCK_Acquired && Entry) ||
+ (UnderlyingMutex.Kind != UCK_Acquired && !Entry)) {
// If this scoped lock manages another mutex, and if the underlying
// mutex is still/not held, then warn about the underlying mutex.
- Handler.handleMutexHeldEndOfScope(
- "mutex", sx::toString(UnderlyingMutex.getPointer()), loc(), JoinLoc,
- LEK);
+ Handler.handleMutexHeldEndOfScope(UnderlyingMutex.Cap.getKind(),
+ UnderlyingMutex.Cap.toString(), loc(),
+ JoinLoc, LEK);
}
}
}
void handleLock(FactSet &FSet, FactManager &FactMan, const FactEntry &entry,
- ThreadSafetyHandler &Handler,
- StringRef DiagKind) const override {
+ ThreadSafetyHandler &Handler) const override {
for (const auto &UnderlyingMutex : UnderlyingMutexes) {
- CapabilityExpr UnderCp(UnderlyingMutex.getPointer(), false);
-
- if (UnderlyingMutex.getInt() == UCK_Acquired)
- lock(FSet, FactMan, UnderCp, entry.kind(), entry.loc(), &Handler,
- DiagKind);
+ if (UnderlyingMutex.Kind == UCK_Acquired)
+ lock(FSet, FactMan, UnderlyingMutex.Cap, entry.kind(), entry.loc(),
+ &Handler);
else
- unlock(FSet, FactMan, UnderCp, entry.loc(), &Handler, DiagKind);
+ unlock(FSet, FactMan, UnderlyingMutex.Cap, entry.loc(), &Handler);
}
}
void handleUnlock(FactSet &FSet, FactManager &FactMan,
const CapabilityExpr &Cp, SourceLocation UnlockLoc,
- bool FullyRemove, ThreadSafetyHandler &Handler,
- StringRef DiagKind) const override {
+ bool FullyRemove,
+ ThreadSafetyHandler &Handler) const override {
assert(!Cp.negative() && "Managing object cannot be negative.");
for (const auto &UnderlyingMutex : UnderlyingMutexes) {
- CapabilityExpr UnderCp(UnderlyingMutex.getPointer(), false);
-
// Remove/lock the underlying mutex if it exists/is still unlocked; warn
// on double unlocking/locking if we're not destroying the scoped object.
ThreadSafetyHandler *TSHandler = FullyRemove ? nullptr : &Handler;
- if (UnderlyingMutex.getInt() == UCK_Acquired) {
- unlock(FSet, FactMan, UnderCp, UnlockLoc, TSHandler, DiagKind);
+ if (UnderlyingMutex.Kind == UCK_Acquired) {
+ unlock(FSet, FactMan, UnderlyingMutex.Cap, UnlockLoc, TSHandler);
} else {
- LockKind kind = UnderlyingMutex.getInt() == UCK_ReleasedShared
+ LockKind kind = UnderlyingMutex.Kind == UCK_ReleasedShared
? LK_Shared
: LK_Exclusive;
- lock(FSet, FactMan, UnderCp, kind, UnlockLoc, TSHandler, DiagKind);
+ lock(FSet, FactMan, UnderlyingMutex.Cap, kind, UnlockLoc, TSHandler);
}
}
if (FullyRemove)
@@ -976,11 +971,12 @@ public:
private:
void lock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp,
- LockKind kind, SourceLocation loc, ThreadSafetyHandler *Handler,
- StringRef DiagKind) const {
+ LockKind kind, SourceLocation loc,
+ ThreadSafetyHandler *Handler) const {
if (const FactEntry *Fact = FSet.findLock(FactMan, Cp)) {
if (Handler)
- Handler->handleDoubleLock(DiagKind, Cp.toString(), Fact->loc(), loc);
+ Handler->handleDoubleLock(Cp.getKind(), Cp.toString(), Fact->loc(),
+ loc);
} else {
FSet.removeLock(FactMan, !Cp);
FSet.addLock(FactMan,
@@ -989,8 +985,7 @@ private:
}
void unlock(FactSet &FSet, FactManager &FactMan, const CapabilityExpr &Cp,
- SourceLocation loc, ThreadSafetyHandler *Handler,
- StringRef DiagKind) const {
+ SourceLocation loc, ThreadSafetyHandler *Handler) const {
if (FSet.findLock(FactMan, Cp)) {
FSet.removeLock(FactMan, Cp);
FSet.addLock(FactMan, std::make_unique<LockableFactEntry>(
@@ -999,7 +994,7 @@ private:
SourceLocation PrevLoc;
if (const FactEntry *Neg = FSet.findLock(FactMan, !Cp))
PrevLoc = Neg->loc();
- Handler->handleUnmatchedUnlock(DiagKind, Cp.toString(), loc, PrevLoc);
+ Handler->handleUnmatchedUnlock(Cp.getKind(), Cp.toString(), loc, PrevLoc);
}
}
};
@@ -1028,10 +1023,9 @@ public:
bool inCurrentScope(const CapabilityExpr &CapE);
void addLock(FactSet &FSet, std::unique_ptr<FactEntry> Entry,
- StringRef DiagKind, bool ReqAttr = false);
+ bool ReqAttr = false);
void removeLock(FactSet &FSet, const CapabilityExpr &CapE,
- SourceLocation UnlockLoc, bool FullyRemove, LockKind Kind,
- StringRef DiagKind);
+ SourceLocation UnlockLoc, bool FullyRemove, LockKind Kind);
template <typename AttrType>
void getMutexIDs(CapExprSet &Mtxs, AttrType *Attr, const Expr *Exp,
@@ -1219,53 +1213,6 @@ public:
} // namespace
-static StringRef ClassifyDiagnostic(const CapabilityAttr *A) {
- return A->getName();
-}
-
-static StringRef ClassifyDiagnostic(QualType VDT) {
- // We need to look at the declaration of the type of the value to determine
- // which it is. The type should either be a record or a typedef, or a pointer
- // or reference thereof.
- if (const auto *RT = VDT->getAs<RecordType>()) {
- if (const auto *RD = RT->getDecl())
- if (const auto *CA = RD->getAttr<CapabilityAttr>())
- return ClassifyDiagnostic(CA);
- } else if (const auto *TT = VDT->getAs<TypedefType>()) {
- if (const auto *TD = TT->getDecl())
- if (const auto *CA = TD->getAttr<CapabilityAttr>())
- return ClassifyDiagnostic(CA);
- } else if (VDT->isPointerType() || VDT->isReferenceType())
- return ClassifyDiagnostic(VDT->getPointeeType());
-
- return "mutex";
-}
-
-static StringRef ClassifyDiagnostic(const ValueDecl *VD) {
- assert(VD && "No ValueDecl passed");
-
- // The ValueDecl is the declaration of a mutex or role (hopefully).
- return ClassifyDiagnostic(VD->getType());
-}
-
-template <typename AttrTy>
-static std::enable_if_t<!has_arg_iterator_range<AttrTy>::value, StringRef>
-ClassifyDiagnostic(const AttrTy *A) {
- if (const ValueDecl *VD = getValueDecl(A->getArg()))
- return ClassifyDiagnostic(VD);
- return "mutex";
-}
-
-template <typename AttrTy>
-static std::enable_if_t<has_arg_iterator_range<AttrTy>::value, StringRef>
-ClassifyDiagnostic(const AttrTy *A) {
- for (const auto *Arg : A->args()) {
- if (const ValueDecl *VD = getValueDecl(Arg))
- return ClassifyDiagnostic(VD);
- }
- return "mutex";
-}
-
bool ThreadSafetyAnalyzer::inCurrentScope(const CapabilityExpr &CapE) {
const threadSafety::til::SExpr *SExp = CapE.sexpr();
assert(SExp && "Null expressions should be ignored");
@@ -1297,7 +1244,7 @@ bool ThreadSafetyAnalyzer::inCurrentScope(const CapabilityExpr &CapE) {
/// \param ReqAttr -- true if this is part of an initial Requires attribute.
void ThreadSafetyAnalyzer::addLock(FactSet &FSet,
std::unique_ptr<FactEntry> Entry,
- StringRef DiagKind, bool ReqAttr) {
+ bool ReqAttr) {
if (Entry->shouldIgnore())
return;
@@ -1310,7 +1257,7 @@ void ThreadSafetyAnalyzer::addLock(FactSet &FSet,
}
else {
if (inCurrentScope(*Entry) && !Entry->asserted())
- Handler.handleNegativeNotHeld(DiagKind, Entry->toString(),
+ Handler.handleNegativeNotHeld(Entry->getKind(), Entry->toString(),
NegC.toString(), Entry->loc());
}
}
@@ -1319,13 +1266,13 @@ void ThreadSafetyAnalyzer::addLock(FactSet &FSet,
if (Handler.issueBetaWarnings() &&
!Entry->asserted() && !Entry->declared()) {
GlobalBeforeSet->checkBeforeAfter(Entry->valueDecl(), FSet, *this,
- Entry->loc(), DiagKind);
+ Entry->loc(), Entry->getKind());
}
// FIXME: Don't always warn when we have support for reentrant locks.
if (const FactEntry *Cp = FSet.findLock(FactMan, *Entry)) {
if (!Entry->asserted())
- Cp->handleLock(FSet, FactMan, *Entry, Handler, DiagKind);
+ Cp->handleLock(FSet, FactMan, *Entry, Handler);
} else {
FSet.addLock(FactMan, std::move(Entry));
}
@@ -1335,8 +1282,7 @@ void ThreadSafetyAnalyzer::addLock(FactSet &FSet,
/// \param UnlockLoc The source location of the unlock (only used in error msg)
void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const CapabilityExpr &Cp,
SourceLocation UnlockLoc,
- bool FullyRemove, LockKind ReceivedKind,
- StringRef DiagKind) {
+ bool FullyRemove, LockKind ReceivedKind) {
if (Cp.shouldIgnore())
return;
@@ -1345,19 +1291,19 @@ void ThreadSafetyAnalyzer::removeLock(FactSet &FSet, const CapabilityExpr &Cp,
SourceLocation PrevLoc;
if (const FactEntry *Neg = FSet.findLock(FactMan, !Cp))
PrevLoc = Neg->loc();
- Handler.handleUnmatchedUnlock(DiagKind, Cp.toString(), UnlockLoc, PrevLoc);
+ Handler.handleUnmatchedUnlock(Cp.getKind(), Cp.toString(), UnlockLoc,
+ PrevLoc);
return;
}
// Generic lock removal doesn't care about lock kind mismatches, but
// otherwise diagnose when the lock kinds are mismatched.
if (ReceivedKind != LK_Generic && LDat->kind() != ReceivedKind) {
- Handler.handleIncorrectUnlockKind(DiagKind, Cp.toString(), LDat->kind(),
+ Handler.handleIncorrectUnlockKind(Cp.getKind(), Cp.toString(), LDat->kind(),
ReceivedKind, LDat->loc(), UnlockLoc);
}
- LDat->handleUnlock(FSet, FactMan, Cp, UnlockLoc, FullyRemove, Handler,
- DiagKind);
+ LDat->handleUnlock(FSet, FactMan, Cp, UnlockLoc, FullyRemove, Handler);
}
/// Extract the list of mutexIDs from the attribute on an expression,
@@ -1370,8 +1316,8 @@ void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
// The mutex held is the "this" object.
CapabilityExpr Cp = SxBuilder.translateAttrExpr(nullptr, D, Exp, SelfDecl);
if (Cp.isInvalid()) {
- warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr));
- return;
+ warnInvalidLock(Handler, nullptr, D, Exp, Cp.getKind());
+ return;
}
//else
if (!Cp.shouldIgnore())
@@ -1382,8 +1328,8 @@ void ThreadSafetyAnalyzer::getMutexIDs(CapExprSet &Mtxs, AttrType *Attr,
for (const auto *Arg : Attr->args()) {
CapabilityExpr Cp = SxBuilder.translateAttrExpr(Arg, D, Exp, SelfDecl);
if (Cp.isInvalid()) {
- warnInvalidLock(Handler, nullptr, D, Exp, ClassifyDiagnostic(Attr));
- continue;
+ warnInvalidLock(Handler, nullptr, D, Exp, Cp.getKind());
+ continue;
}
//else
if (!Cp.shouldIgnore())
@@ -1522,7 +1468,6 @@ void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result,
bool Negate = false;
const CFGBlockInfo *PredBlockInfo = &BlockInfo[PredBlock->getBlockID()];
const LocalVarContext &LVarCtx = PredBlockInfo->ExitContext;
- StringRef CapDiagKind = "mutex";
const auto *Exp = getTrylockCallExpr(Cond, LVarCtx, Negate);
if (!Exp)
@@ -1543,21 +1488,18 @@ void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result,
getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
Exp, FunDecl, PredBlock, CurrBlock, A->getSuccessValue(),
Negate);
- CapDiagKind = ClassifyDiagnostic(A);
break;
};
case attr::ExclusiveTrylockFunction: {
const auto *A = cast<ExclusiveTrylockFunctionAttr>(Attr);
- getMutexIDs(ExclusiveLocksToAdd, A, Exp, FunDecl,
- PredBlock, CurrBlock, A->getSuccessValue(), Negate);
- CapDiagKind = ClassifyDiagnostic(A);
+ getMutexIDs(ExclusiveLocksToAdd, A, Exp, FunDecl, PredBlock, CurrBlock,
+ A->getSuccessValue(), Negate);
break;
}
case attr::SharedTrylockFunction: {
const auto *A = cast<SharedTrylockFunctionAttr>(Attr);
- getMutexIDs(SharedLocksToAdd, A, Exp, FunDecl,
- PredBlock, CurrBlock, A->getSuccessValue(), Negate);
- CapDiagKind = ClassifyDiagnostic(A);
+ getMutexIDs(SharedLocksToAdd, A, Exp, FunDecl, PredBlock, CurrBlock,
+ A->getSuccessValue(), Negate);
break;
}
default:
@@ -1569,12 +1511,10 @@ void ThreadSafetyAnalyzer::getEdgeLockset(FactSet& Result,
SourceLocation Loc = Exp->getExprLoc();
for (const auto &ExclusiveLockToAdd : ExclusiveLocksToAdd)
addLock(Result, std::make_unique<LockableFactEntry>(ExclusiveLockToAdd,
- LK_Exclusive, Loc),
- CapDiagKind);
+ LK_Exclusive, Loc));
for (const auto &SharedLockToAdd : SharedLocksToAdd)
addLock(Result, std::make_unique<LockableFactEntry>(SharedLockToAdd,
- LK_Shared, Loc),
- CapDiagKind);
+ LK_Shared, Loc));
}
namespace {
@@ -1595,9 +1535,8 @@ class BuildLockset : public ConstStmtVisitor<BuildLockset> {
// helper functions
void warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp, AccessKind AK,
Expr *MutexExp, ProtectedOperationKind POK,
- StringRef DiagKind, SourceLocation Loc);
- void warnIfMutexHeld(const NamedDecl *D, const Expr *Exp, Expr *MutexExp,
- StringRef DiagKind);
+ SourceLocation Loc);
+ void warnIfMutexHeld(const NamedDecl *D, const Expr *Exp, Expr *MutexExp);
void checkAccess(const Expr *Exp, AccessKind AK,
ProtectedOperationKind POK = POK_VarAccess);
@@ -1630,12 +1569,12 @@ public:
void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp,
AccessKind AK, Expr *MutexExp,
ProtectedOperationKind POK,
- StringRef DiagKind, SourceLocation Loc) {
+ SourceLocation Loc) {
LockKind LK = getLockKindFromAccessKind(AK);
CapabilityExpr Cp = Analyzer->SxBuilder.translateAttrExpr(MutexExp, D, Exp);
if (Cp.isInvalid()) {
- warnInvalidLock(Analyzer->Handler, MutexExp, D, Exp, DiagKind);
+ warnInvalidLock(Analyzer->Handler, MutexExp, D, Exp, Cp.getKind());
return;
} else if (Cp.shouldIgnore()) {
return;
@@ -1646,7 +1585,7 @@ void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp,
const FactEntry *LDat = FSet.findLock(Analyzer->FactMan, !Cp);
if (LDat) {
Analyzer->Handler.handleFunExcludesLock(
- DiagKind, D->getNameAsString(), (!Cp).toString(), Loc);
+ Cp.getKind(), D->getNameAsString(), (!Cp).toString(), Loc);
return;
}
@@ -1672,28 +1611,28 @@ void BuildLockset::warnIfMutexNotHeld(const NamedDecl *D, const Expr *Exp,
// Warn that there's no precise match.
std::string PartMatchStr = LDat->toString();
StringRef PartMatchName(PartMatchStr);
- Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Cp.toString(),
+ Analyzer->Handler.handleMutexNotHeld(Cp.getKind(), D, POK, Cp.toString(),
LK, Loc, &PartMatchName);
} else {
// Warn that there's no match at all.
- Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Cp.toString(),
+ Analyzer->Handler.handleMutexNotHeld(Cp.getKind(), D, POK, Cp.toString(),
LK, Loc);
}
NoError = false;
}
// Make sure the mutex we found is the right kind.
if (NoError && LDat && !LDat->isAtLeast(LK)) {
- Analyzer->Handler.handleMutexNotHeld(DiagKind, D, POK, Cp.toString(),
+ Analyzer->Handler.handleMutexNotHeld(Cp.getKind(), D, POK, Cp.toString(),
LK, Loc);
}
}
/// Warn if the LSet contains the given lock.
void BuildLockset::warnIfMutexHeld(const NamedDecl *D, const Expr *Exp,
- Expr *MutexExp, StringRef DiagKind) {
+ Expr *MutexExp) {
CapabilityExpr Cp = Analyzer->SxBuilder.translateAttrExpr(MutexExp, D, Exp);
if (Cp.isInvalid()) {
- warnInvalidLock(Analyzer->Handler, MutexExp, D, Exp, DiagKind);
+ warnInvalidLock(Analyzer->Handler, MutexExp, D, Exp, Cp.getKind());
return;
} else if (Cp.shouldIgnore()) {
return;
@@ -1701,8 +1640,8 @@ void BuildLockset::warnIfMutexHeld(const NamedDecl *D, const Expr *Exp,
const FactEntry *LDat = FSet.findLock(Analyzer->FactMan, Cp);
if (LDat) {
- Analyzer->Handler.handleFunExcludesLock(
- DiagKind, D->getNameAsString(), Cp.toString(), Exp->getExprLoc());
+ Analyzer->Handler.handleFunExcludesLock(Cp.getKind(), D->getNameAsString(),
+ Cp.toString(), Exp->getExprLoc());
}
}
@@ -1757,12 +1696,11 @@ void BuildLockset::checkAccess(const Expr *Exp, AccessKind AK,
return;
if (D->hasAttr<GuardedVarAttr>() && FSet.isEmpty(Analyzer->FactMan)) {
- Analyzer->Handler.handleNoMutexHeld("mutex", D, POK, AK, Loc);
+ Analyzer->Handler.handleNoMutexHeld(D, POK, AK, Loc);
}
for (const auto *I : D->specific_attrs<GuardedByAttr>())
- warnIfMutexNotHeld(D, Exp, AK, I->getArg(), POK,
- ClassifyDiagnostic(I), Loc);
+ warnIfMutexNotHeld(D, Exp, AK, I->getArg(), POK, Loc);
}
/// Checks pt_guarded_by and pt_guarded_var attributes.
@@ -1796,12 +1734,10 @@ void BuildLockset::checkPtAccess(const Expr *Exp, AccessKind AK,
return;
if (D->hasAttr<PtGuardedVarAttr>() && FSet.isEmpty(Analyzer->FactMan))
- Analyzer->Handler.handleNoMutexHeld("mutex", D, PtPOK, AK,
- Exp->getExprLoc());
+ Analyzer->Handler.handleNoMutexHeld(D, PtPOK, AK, Exp->getExprLoc());
for (auto const *I : D->specific_attrs<PtGuardedByAttr>())
- warnIfMutexNotHeld(D, Exp, AK, I->getArg(), PtPOK,
- ClassifyDiagnostic(I), Exp->getExprLoc());
+ warnIfMutexNotHeld(D, Exp, AK, I->getArg(), PtPOK, Exp->getExprLoc());
}
/// Process a function call, method call, constructor call,
@@ -1820,7 +1756,6 @@ void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
CapExprSet ExclusiveLocksToAdd, SharedLocksToAdd;
CapExprSet ExclusiveLocksToRemove, SharedLocksToRemove, GenericLocksToRemove;
CapExprSet ScopedReqsAndExcludes;
- StringRef CapDiagKind = "mutex";
// Figure out if we're constructing an object of scoped lockable class
bool isScopedVar = false;
@@ -1841,8 +1776,6 @@ void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
Analyzer->getMutexIDs(A->isShared() ? SharedLocksToAdd
: ExclusiveLocksToAdd,
A, Exp, D, VD);
-
- CapDiagKind = ClassifyDiagnostic(A);
break;
}
@@ -1856,10 +1789,8 @@ void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD);
for (const auto &AssertLock : AssertLocks)
Analyzer->addLock(
- FSet,
- std::make_unique<LockableFactEntry>(AssertLock, LK_Exclusive, Loc,
- FactEntry::Asserted),
- ClassifyDiagnostic(A));
+ FSet, std::make_unique<LockableFactEntry>(
+ AssertLock, LK_Exclusive, Loc, FactEntry::Asserted));
break;
}
case attr::AssertSharedLock: {
@@ -1869,10 +1800,8 @@ void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD);
for (const auto &AssertLock : AssertLocks)
Analyzer->addLock(
- FSet,
- std::make_unique<LockableFactEntry>(AssertLock, LK_Shared, Loc,
- FactEntry::Asserted),
- ClassifyDiagnostic(A));
+ FSet, std::make_unique<LockableFactEntry>(
+ AssertLock, LK_Shared, Loc, FactEntry::Asserted));
break;
}
@@ -1881,12 +1810,10 @@ void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
CapExprSet AssertLocks;
Analyzer->getMutexIDs(AssertLocks, A, Exp, D, VD);
for (const auto &AssertLock : AssertLocks)
- Analyzer->addLock(FSet,
- std::make_unique<LockableFactEntry>(
- AssertLock,
- A->isShared() ? LK_Shared : LK_Exclusive, Loc,
- FactEntry::Asserted),
- ClassifyDiagnostic(A));
+ Analyzer->addLock(FSet, std::make_unique<LockableFactEntry>(
+ AssertLock,
+ A->isShared() ? LK_Shared : LK_Exclusive,
+ Loc, FactEntry::Asserted));
break;
}
@@ -1900,8 +1827,6 @@ void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
Analyzer->getMutexIDs(SharedLocksToRemove, A, Exp, D, VD);
else
Analyzer->getMutexIDs(ExclusiveLocksToRemove, A, Exp, D, VD);
-
- CapDiagKind = ClassifyDiagnostic(A);
break;
}
@@ -1909,8 +1834,7 @@ void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
const auto *A = cast<RequiresCapabilityAttr>(At);
for (auto *Arg : A->args()) {
warnIfMutexNotHeld(D, Exp, A->isShared() ? AK_Read : AK_Written, Arg,
- POK_FunctionCall, ClassifyDiagnostic(A),
- Exp->getExprLoc());
+ POK_FunctionCall, Exp->getExprLoc());
// use for adopting a lock
if (isScopedVar)
Analyzer->getMutexIDs(ScopedReqsAndExcludes, A, Exp, D, VD);
@@ -1921,7 +1845,7 @@ void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
case attr::LocksExcluded: {
const auto *A = cast<LocksExcludedAttr>(At);
for (auto *Arg : A->args()) {
- warnIfMutexHeld(D, Exp, Arg, ClassifyDiagnostic(A));
+ warnIfMutexHeld(D, Exp, Arg);
// use for deferring a lock
if (isScopedVar)
Analyzer->getMutexIDs(ScopedReqsAndExcludes, A, Exp, D, VD);
@@ -1939,23 +1863,21 @@ void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
// FIXME -- should only fully remove if the attribute refers to 'this'.
bool Dtor = isa<CXXDestructorDecl>(D);
for (const auto &M : ExclusiveLocksToRemove)
- Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Exclusive, CapDiagKind);
+ Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Exclusive);
for (const auto &M : SharedLocksToRemove)
- Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Shared, CapDiagKind);
+ Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Shared);
for (const auto &M : GenericLocksToRemove)
- Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Generic, CapDiagKind);
+ Analyzer->removeLock(FSet, M, Loc, Dtor, LK_Generic);
// Add locks.
FactEntry::SourceKind Source =
isScopedVar ? FactEntry::Managed : FactEntry::Acquired;
for (const auto &M : ExclusiveLocksToAdd)
- Analyzer->addLock(
- FSet, std::make_unique<LockableFactEntry>(M, LK_Exclusive, Loc, Source),
- CapDiagKind);
+ Analyzer->addLock(FSet, std::make_unique<LockableFactEntry>(M, LK_Exclusive,
+ Loc, Source));
for (const auto &M : SharedLocksToAdd)
Analyzer->addLock(
- FSet, std::make_unique<LockableFactEntry>(M, LK_Shared, Loc, Source),
- CapDiagKind);
+ FSet, std::make_unique<LockableFactEntry>(M, LK_Shared, Loc, Source));
if (isScopedVar) {
// Add the managing object as a dummy mutex, mapped to the underlying mutex.
@@ -1976,7 +1898,7 @@ void BuildLockset::handleCall(const Expr *Exp, const NamedDecl *D,
ScopedEntry->addExclusiveUnlock(M);
for (const auto &M : SharedLocksToRemove)
ScopedEntry->addSharedUnlock(M);
- Analyzer->addLock(FSet, std::move(ScopedEntry), CapDiagKind);
+ Analyzer->addLock(FSet, std::move(ScopedEntry));
}
}
@@ -2066,16 +1988,27 @@ void BuildLockset::VisitCallExpr(const CallExpr *Exp) {
examineArguments(CE->getDirectCallee(), CE->arg_begin(), CE->arg_end());
} else if (const auto *OE = dyn_cast<CXXOperatorCallExpr>(Exp)) {
- auto OEop = OE->getOperator();
+ OverloadedOperatorKind OEop = OE->getOperator();
switch (OEop) {
- case OO_Equal: {
- const Expr *Target = OE->getArg(0);
- const Expr *Source = OE->getArg(1);
- checkAccess(Target, AK_Written);
- checkAccess(Source, AK_Read);
+ case OO_Equal:
+ case OO_PlusEqual:
+ case OO_MinusEqual:
+ case OO_StarEqual:
+ case OO_SlashEqual:
+ case OO_PercentEqual:
+ case OO_CaretEqual:
+ case OO_AmpEqual:
+ case OO_PipeEqual:
+ case OO_LessLessEqual:
+ case OO_GreaterGreaterEqual:
+ checkAccess(OE->getArg(1), AK_Read);
+ LLVM_FALLTHROUGH;
+ case OO_PlusPlus:
+ case OO_MinusMinus:
+ checkAccess(OE->getArg(0), AK_Written);
break;
- }
case OO_Star:
+ case OO_ArrowStar:
case OO_Arrow:
case OO_Subscript:
if (!(OEop == OO_Star && OE->getNumArgs() > 1)) {
@@ -2204,7 +2137,8 @@ bool ThreadSafetyAnalyzer::join(const FactEntry &A, const FactEntry &B,
if (CanModify || !ShouldTakeB)
return ShouldTakeB;
}
- Handler.handleExclusiveAndShared("mutex", B.toString(), B.loc(), A.loc());
+ Handler.handleExclusiveAndShared(B.getKind(), B.toString(), B.loc(),
+ A.loc());
// Take the exclusive capability to reduce further warnings.
return CanModify && B.kind() == LK_Exclusive;
} else {
@@ -2344,7 +2278,6 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
CapExprSet ExclusiveLocksToAdd;
CapExprSet SharedLocksToAdd;
- StringRef CapDiagKind = "mutex";
SourceLocation Loc = D->getLocation();
for (const auto *Attr : D->attrs()) {
@@ -2352,7 +2285,6 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
if (const auto *A = dyn_cast<RequiresCapabilityAttr>(Attr)) {
getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
nullptr, D);
- CapDiagKind = ClassifyDiagnostic(A);
} else if (const auto *A = dyn_cast<ReleaseCapabilityAttr>(Attr)) {
// UNLOCK_FUNCTION() is used to hide the underlying lock implementation.
// We must ignore such methods.
@@ -2361,14 +2293,12 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
getMutexIDs(A->isShared() ? SharedLocksToAdd : ExclusiveLocksToAdd, A,
nullptr, D);
getMutexIDs(LocksReleased, A, nullptr, D);
- CapDiagKind = ClassifyDiagnostic(A);
} else if (const auto *A = dyn_cast<AcquireCapabilityAttr>(Attr)) {
if (A->args_size() == 0)
return;
getMutexIDs(A->isShared() ? SharedLocksAcquired
: ExclusiveLocksAcquired,
A, nullptr, D);
- CapDiagKind = ClassifyDiagnostic(A);
} else if (isa<ExclusiveTrylockFunctionAttr>(Attr)) {
// Don't try to check trylock functions for now.
return;
@@ -2385,12 +2315,12 @@ void ThreadSafetyAnalyzer::runAnalysis(AnalysisDeclContext &AC) {
for (const auto &Mu : ExclusiveLocksToAdd) {
auto Entry = std::make_unique<LockableFactEntry>(Mu, LK_Exclusive, Loc,
FactEntry::Declared);
- addLock(InitialLockset, std::move(Entry), CapDiagKind, true);
+ addLock(InitialLockset, std::move(Entry), true);
}
for (const auto &Mu : SharedLocksToAdd) {
auto Entry = std::make_unique<LockableFactEntry>(Mu, LK_Shared, Loc,
FactEntry::Declared);
- addLock(InitialLockset, std::move(Entry), CapDiagKind, true);
+ addLock(InitialLockset, std::move(Entry), true);
}
}
diff --git a/contrib/llvm-project/clang/lib/Analysis/ThreadSafetyCommon.cpp b/contrib/llvm-project/clang/lib/Analysis/ThreadSafetyCommon.cpp
index e6b4a05501e2..66413f84907a 100644
--- a/contrib/llvm-project/clang/lib/Analysis/ThreadSafetyCommon.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/ThreadSafetyCommon.cpp
@@ -86,6 +86,28 @@ static bool isCalleeArrow(const Expr *E) {
return ME ? ME->isArrow() : false;
}
+static StringRef ClassifyDiagnostic(const CapabilityAttr *A) {
+ return A->getName();
+}
+
+static StringRef ClassifyDiagnostic(QualType VDT) {
+ // We need to look at the declaration of the type of the value to determine
+ // which it is. The type should either be a record or a typedef, or a pointer
+ // or reference thereof.
+ if (const auto *RT = VDT->getAs<RecordType>()) {
+ if (const auto *RD = RT->getDecl())
+ if (const auto *CA = RD->getAttr<CapabilityAttr>())
+ return ClassifyDiagnostic(CA);
+ } else if (const auto *TT = VDT->getAs<TypedefType>()) {
+ if (const auto *TD = TT->getDecl())
+ if (const auto *CA = TD->getAttr<CapabilityAttr>())
+ return ClassifyDiagnostic(CA);
+ } else if (VDT->isPointerType() || VDT->isReferenceType())
+ return ClassifyDiagnostic(VDT->getPointeeType());
+
+ return "mutex";
+}
+
/// Translate a clang expression in an attribute to a til::SExpr.
/// Constructs the context from D, DeclExp, and SelfDecl.
///
@@ -152,16 +174,17 @@ CapabilityExpr SExprBuilder::translateAttrExpr(const Expr *AttrExp,
CapabilityExpr SExprBuilder::translateAttrExpr(const Expr *AttrExp,
CallingContext *Ctx) {
if (!AttrExp)
- return CapabilityExpr(nullptr, false);
+ return CapabilityExpr();
if (const auto* SLit = dyn_cast<StringLiteral>(AttrExp)) {
if (SLit->getString() == StringRef("*"))
// The "*" expr is a universal lock, which essentially turns off
// checks until it is removed from the lockset.
- return CapabilityExpr(new (Arena) til::Wildcard(), false);
+ return CapabilityExpr(new (Arena) til::Wildcard(), StringRef("wildcard"),
+ false);
else
// Ignore other string literals for now.
- return CapabilityExpr(nullptr, false);
+ return CapabilityExpr();
}
bool Neg = false;
@@ -183,14 +206,16 @@ CapabilityExpr SExprBuilder::translateAttrExpr(const Expr *AttrExp,
// Trap mutex expressions like nullptr, or 0.
// Any literal value is nonsense.
if (!E || isa<til::Literal>(E))
- return CapabilityExpr(nullptr, false);
+ return CapabilityExpr();
+
+ StringRef Kind = ClassifyDiagnostic(AttrExp->getType());
// Hack to deal with smart pointers -- strip off top-level pointer casts.
if (const auto *CE = dyn_cast<til::Cast>(E)) {
if (CE->castOpcode() == til::CAST_objToPtr)
- return CapabilityExpr(CE->expr(), Neg);
+ return CapabilityExpr(CE->expr(), Kind, Neg);
}
- return CapabilityExpr(E, Neg);
+ return CapabilityExpr(E, Kind, Neg);
}
// Translate a clang statement or expression to a TIL expression.
diff --git a/contrib/llvm-project/clang/lib/Analysis/UninitializedValues.cpp b/contrib/llvm-project/clang/lib/Analysis/UninitializedValues.cpp
index 811146e50b45..800943a99d87 100644
--- a/contrib/llvm-project/clang/lib/Analysis/UninitializedValues.cpp
+++ b/contrib/llvm-project/clang/lib/Analysis/UninitializedValues.cpp
@@ -148,7 +148,7 @@ public:
Value getValue(const CFGBlock *block, const CFGBlock *dstBlock,
const VarDecl *vd) {
const Optional<unsigned> &idx = declToIndex.getValueIndex(vd);
- assert(idx.hasValue());
+ assert(idx);
return getValueVector(block)[idx.getValue()];
}
};
@@ -209,7 +209,7 @@ void CFGBlockValues::resetScratch() {
ValueVector::reference CFGBlockValues::operator[](const VarDecl *vd) {
const Optional<unsigned> &idx = declToIndex.getValueIndex(vd);
- assert(idx.hasValue());
+ assert(idx);
return scratch[idx.getValue()];
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Attributes.cpp b/contrib/llvm-project/clang/lib/Basic/Attributes.cpp
index 62eea9c59082..960c9773d192 100644
--- a/contrib/llvm-project/clang/lib/Basic/Attributes.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Attributes.cpp
@@ -5,9 +5,9 @@
#include "llvm/ADT/StringSwitch.h"
using namespace clang;
-int clang::hasAttribute(AttrSyntax Syntax, const IdentifierInfo *Scope,
- const IdentifierInfo *Attr, const TargetInfo &Target,
- const LangOptions &LangOpts) {
+int clang::hasAttribute(AttributeCommonInfo::Syntax Syntax,
+ const IdentifierInfo *Scope, const IdentifierInfo *Attr,
+ const TargetInfo &Target, const LangOptions &LangOpts) {
StringRef Name = Attr->getName();
// Normalize the attribute name, __foo__ becomes foo.
if (Name.size() >= 4 && Name.startswith("__") && Name.endswith("__"))
@@ -85,6 +85,10 @@ bool AttributeCommonInfo::isGNUScope() const {
return ScopeName && (ScopeName->isStr("gnu") || ScopeName->isStr("__gnu__"));
}
+bool AttributeCommonInfo::isClangScope() const {
+ return ScopeName && (ScopeName->isStr("clang") || ScopeName->isStr("_Clang"));
+}
+
#include "clang/Sema/AttrParsedAttrKinds.inc"
static SmallString<64> normalizeName(const IdentifierInfo *Name,
diff --git a/contrib/llvm-project/clang/lib/Basic/BuiltinTargetFeatures.h b/contrib/llvm-project/clang/lib/Basic/BuiltinTargetFeatures.h
new file mode 100644
index 000000000000..4d3358de5076
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Basic/BuiltinTargetFeatures.h
@@ -0,0 +1,95 @@
+//===-- CodeGenFunction.h - Target features for builtin ---------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This is the internal required target features for builtin.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_BASIC_BUILTINTARGETFEATURES_H
+#define LLVM_CLANG_LIB_BASIC_BUILTINTARGETFEATURES_H
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringRef.h"
+
+using llvm::StringRef;
+
+namespace clang {
+namespace Builtin {
+/// TargetFeatures - This class is used to check whether the builtin function
+/// has the required tagert specific features. It is able to support the
+/// combination of ','(and), '|'(or), and '()'. By default, the priority of
+/// ',' is higher than that of '|' .
+/// E.g:
+/// A,B|C means the builtin function requires both A and B, or C.
+/// If we want the builtin function requires both A and B, or both A and C,
+/// there are two ways: A,B|A,C or A,(B|C).
+/// The FeaturesList should not contain spaces, and brackets must appear in
+/// pairs.
+class TargetFeatures {
+ struct FeatureListStatus {
+ bool HasFeatures;
+ StringRef CurFeaturesList;
+ };
+
+ const llvm::StringMap<bool> &CallerFeatureMap;
+
+ FeatureListStatus getAndFeatures(StringRef FeatureList) {
+ int InParentheses = 0;
+ bool HasFeatures = true;
+ size_t SubexpressionStart = 0;
+ for (size_t i = 0, e = FeatureList.size(); i < e; ++i) {
+ char CurrentToken = FeatureList[i];
+ switch (CurrentToken) {
+ default:
+ break;
+ case '(':
+ if (InParentheses == 0)
+ SubexpressionStart = i + 1;
+ ++InParentheses;
+ break;
+ case ')':
+ --InParentheses;
+ assert(InParentheses >= 0 && "Parentheses are not in pair");
+ LLVM_FALLTHROUGH;
+ case '|':
+ case ',':
+ if (InParentheses == 0) {
+ if (HasFeatures && i != SubexpressionStart) {
+ StringRef F = FeatureList.slice(SubexpressionStart, i);
+ HasFeatures = CurrentToken == ')' ? hasRequiredFeatures(F)
+ : CallerFeatureMap.lookup(F);
+ }
+ SubexpressionStart = i + 1;
+ if (CurrentToken == '|') {
+ return {HasFeatures, FeatureList.substr(SubexpressionStart)};
+ }
+ }
+ break;
+ }
+ }
+ assert(InParentheses == 0 && "Parentheses are not in pair");
+ if (HasFeatures && SubexpressionStart != FeatureList.size())
+ HasFeatures =
+ CallerFeatureMap.lookup(FeatureList.substr(SubexpressionStart));
+ return {HasFeatures, StringRef()};
+ }
+
+public:
+ bool hasRequiredFeatures(StringRef FeatureList) {
+ FeatureListStatus FS = {false, FeatureList};
+ while (!FS.HasFeatures && !FS.CurFeaturesList.empty())
+ FS = getAndFeatures(FS.CurFeaturesList);
+ return FS.HasFeatures;
+ }
+
+ TargetFeatures(const llvm::StringMap<bool> &CallerFeatureMap)
+ : CallerFeatureMap(CallerFeatureMap) {}
+};
+
+} // namespace Builtin
+} // namespace clang
+#endif /* CLANG_LIB_BASIC_BUILTINTARGETFEATURES_H */
diff --git a/contrib/llvm-project/clang/lib/Basic/Builtins.cpp b/contrib/llvm-project/clang/lib/Basic/Builtins.cpp
index 6d278e9c4a22..b42e8f416cfc 100644
--- a/contrib/llvm-project/clang/lib/Basic/Builtins.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Builtins.cpp
@@ -11,6 +11,7 @@
//===----------------------------------------------------------------------===//
#include "clang/Basic/Builtins.h"
+#include "BuiltinTargetFeatures.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/TargetInfo.h"
@@ -48,18 +49,22 @@ void Builtin::Context::InitializeTarget(const TargetInfo &Target,
}
bool Builtin::Context::isBuiltinFunc(llvm::StringRef FuncName) {
- for (unsigned i = Builtin::NotBuiltin + 1; i != Builtin::FirstTSBuiltin; ++i)
- if (FuncName.equals(BuiltinInfo[i].Name))
+ bool InStdNamespace = FuncName.consume_front("std-");
+ for (unsigned i = Builtin::NotBuiltin + 1; i != Builtin::FirstTSBuiltin;
+ ++i) {
+ if (FuncName.equals(BuiltinInfo[i].Name) &&
+ (bool)strchr(BuiltinInfo[i].Attributes, 'z') == InStdNamespace)
return strchr(BuiltinInfo[i].Attributes, 'f') != nullptr;
+ }
return false;
}
-bool Builtin::Context::builtinIsSupported(const Builtin::Info &BuiltinInfo,
- const LangOptions &LangOpts) {
+/// Is this builtin supported according to the given language options?
+static bool builtinIsSupported(const Builtin::Info &BuiltinInfo,
+ const LangOptions &LangOpts) {
bool BuiltinsUnsupported =
- (LangOpts.NoBuiltin || LangOpts.isNoBuiltinFunc(BuiltinInfo.Name)) &&
- strchr(BuiltinInfo.Attributes, 'f');
+ LangOpts.NoBuiltin && strchr(BuiltinInfo.Attributes, 'f') != nullptr;
bool CorBuiltinsUnsupported =
!LangOpts.Coroutines && (BuiltinInfo.Langs & COR_LANG);
bool MathBuiltinsUnsupported =
@@ -111,6 +116,19 @@ void Builtin::Context::initializeBuiltins(IdentifierTable &Table,
for (unsigned i = 0, e = AuxTSRecords.size(); i != e; ++i)
Table.get(AuxTSRecords[i].Name)
.setBuiltinID(i + Builtin::FirstTSBuiltin + TSRecords.size());
+
+ // Step #4: Unregister any builtins specified by -fno-builtin-foo.
+ for (llvm::StringRef Name : LangOpts.NoBuiltinFuncs) {
+ bool InStdNamespace = Name.consume_front("std-");
+ auto NameIt = Table.find(Name);
+ if (NameIt != Table.end()) {
+ unsigned ID = NameIt->second->getBuiltinID();
+ if (ID != Builtin::NotBuiltin && isPredefinedLibFunction(ID) &&
+ isInStdNamespace(ID) == InStdNamespace) {
+ Table.get(Name).setBuiltinID(Builtin::NotBuiltin);
+ }
+ }
+ }
}
unsigned Builtin::Context::getRequiredVectorWidth(unsigned ID) const {
@@ -190,8 +208,18 @@ bool Builtin::Context::performsCallback(unsigned ID,
}
bool Builtin::Context::canBeRedeclared(unsigned ID) const {
- return ID == Builtin::NotBuiltin ||
- ID == Builtin::BI__va_start ||
- (!hasReferenceArgsOrResult(ID) &&
- !hasCustomTypechecking(ID));
+ return ID == Builtin::NotBuiltin || ID == Builtin::BI__va_start ||
+ (!hasReferenceArgsOrResult(ID) && !hasCustomTypechecking(ID)) ||
+ isInStdNamespace(ID);
+}
+
+bool Builtin::evaluateRequiredTargetFeatures(
+ StringRef RequiredFeatures, const llvm::StringMap<bool> &TargetFetureMap) {
+ // Return true if the builtin doesn't have any required features.
+ if (RequiredFeatures.empty())
+ return true;
+ assert(!RequiredFeatures.contains(' ') && "Space in feature list");
+
+ TargetFeatures TF(TargetFetureMap);
+ return TF.hasRequiredFeatures(RequiredFeatures);
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Cuda.cpp b/contrib/llvm-project/clang/lib/Basic/Cuda.cpp
index 2d75578b3de0..d75cc410f2d9 100644
--- a/contrib/llvm-project/clang/lib/Basic/Cuda.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Cuda.cpp
@@ -113,6 +113,7 @@ static const CudaArchToStringMap arch_names[] = {
GFX(909), // gfx909
GFX(90a), // gfx90a
GFX(90c), // gfx90c
+ GFX(940), // gfx940
GFX(1010), // gfx1010
GFX(1011), // gfx1011
GFX(1012), // gfx1012
@@ -123,6 +124,11 @@ static const CudaArchToStringMap arch_names[] = {
GFX(1033), // gfx1033
GFX(1034), // gfx1034
GFX(1035), // gfx1035
+ GFX(1036), // gfx1036
+ GFX(1100), // gfx1100
+ GFX(1101), // gfx1101
+ GFX(1102), // gfx1102
+ GFX(1103), // gfx1103
{CudaArch::Generic, "generic", ""},
// clang-format on
};
@@ -214,8 +220,7 @@ CudaVersion MaxVersionForCudaArch(CudaArch A) {
}
CudaVersion ToCudaVersion(llvm::VersionTuple Version) {
- int IVer =
- Version.getMajor() * 10 + Version.getMinor().getValueOr(0);
+ int IVer = Version.getMajor() * 10 + Version.getMinor().value_or(0);
switch(IVer) {
case 70:
return CudaVersion::CUDA_70;
diff --git a/contrib/llvm-project/clang/lib/Basic/Diagnostic.cpp b/contrib/llvm-project/clang/lib/Basic/Diagnostic.cpp
index ac4b9d2cd5a2..dbe62ecb50d3 100644
--- a/contrib/llvm-project/clang/lib/Basic/Diagnostic.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Diagnostic.cpp
@@ -25,8 +25,9 @@
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/CrashRecoveryContext.h"
-#include "llvm/Support/Locale.h"
+#include "llvm/Support/Unicode.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
#include <cassert>
@@ -130,7 +131,7 @@ bool DiagnosticsEngine::popMappings(SourceLocation Loc) {
return true;
}
-void DiagnosticsEngine::Reset() {
+void DiagnosticsEngine::Reset(bool soft /*=false*/) {
ErrorOccurred = false;
UncompilableErrorOccurred = false;
FatalErrorOccurred = false;
@@ -145,15 +146,17 @@ void DiagnosticsEngine::Reset() {
LastDiagLevel = DiagnosticIDs::Ignored;
DelayedDiagID = 0;
- // Clear state related to #pragma diagnostic.
- DiagStates.clear();
- DiagStatesByLoc.clear();
- DiagStateOnPushStack.clear();
+ if (!soft) {
+ // Clear state related to #pragma diagnostic.
+ DiagStates.clear();
+ DiagStatesByLoc.clear();
+ DiagStateOnPushStack.clear();
- // Create a DiagState and DiagStatePoint representing diagnostic changes
- // through command-line.
- DiagStates.emplace_back();
- DiagStatesByLoc.appendFirst(&DiagStates.back());
+ // Create a DiagState and DiagStatePoint representing diagnostic changes
+ // through command-line.
+ DiagStates.emplace_back();
+ DiagStatesByLoc.appendFirst(&DiagStates.back());
+ }
}
void DiagnosticsEngine::SetDelayedDiagnostic(unsigned DiagID, StringRef Arg1,
@@ -801,6 +804,50 @@ FormatDiagnostic(SmallVectorImpl<char> &OutStr) const {
FormatDiagnostic(Diag.begin(), Diag.end(), OutStr);
}
+/// pushEscapedString - Append Str to the diagnostic buffer,
+/// escaping non-printable characters and ill-formed code unit sequences.
+static void pushEscapedString(StringRef Str, SmallVectorImpl<char> &OutStr) {
+ OutStr.reserve(OutStr.size() + Str.size());
+ auto *Begin = reinterpret_cast<const unsigned char *>(Str.data());
+ llvm::raw_svector_ostream OutStream(OutStr);
+ const unsigned char *End = Begin + Str.size();
+ while (Begin != End) {
+ // ASCII case
+ if (isPrintable(*Begin) || isWhitespace(*Begin)) {
+ OutStream << *Begin;
+ ++Begin;
+ continue;
+ }
+ if (llvm::isLegalUTF8Sequence(Begin, End)) {
+ llvm::UTF32 CodepointValue;
+ llvm::UTF32 *CpPtr = &CodepointValue;
+ const unsigned char *CodepointBegin = Begin;
+ const unsigned char *CodepointEnd =
+ Begin + llvm::getNumBytesForUTF8(*Begin);
+ llvm::ConversionResult Res = llvm::ConvertUTF8toUTF32(
+ &Begin, CodepointEnd, &CpPtr, CpPtr + 1, llvm::strictConversion);
+ (void)Res;
+ assert(
+ llvm::conversionOK == Res &&
+ "the sequence is legal UTF-8 but we couldn't convert it to UTF-32");
+ assert(Begin == CodepointEnd &&
+ "we must be further along in the string now");
+ if (llvm::sys::unicode::isPrintable(CodepointValue) ||
+ llvm::sys::unicode::isFormatting(CodepointValue)) {
+ OutStr.append(CodepointBegin, CodepointEnd);
+ continue;
+ }
+ // Unprintable code point.
+ OutStream << "<U+" << llvm::format_hex_no_prefix(CodepointValue, 4, true)
+ << ">";
+ continue;
+ }
+ // Invalid code unit.
+ OutStream << "<" << llvm::format_hex_no_prefix(*Begin, 2, true) << ">";
+ ++Begin;
+ }
+}
+
void Diagnostic::
FormatDiagnostic(const char *DiagStr, const char *DiagEnd,
SmallVectorImpl<char> &OutStr) const {
@@ -811,11 +858,7 @@ FormatDiagnostic(const char *DiagStr, const char *DiagEnd,
StringRef(DiagStr, DiagEnd - DiagStr).equals("%0") &&
getArgKind(0) == DiagnosticsEngine::ak_std_string) {
const std::string &S = getArgStdStr(0);
- for (char c : S) {
- if (llvm::sys::locale::isPrint(c) || c == '\t') {
- OutStr.push_back(c);
- }
- }
+ pushEscapedString(S, OutStr);
return;
}
@@ -922,7 +965,7 @@ FormatDiagnostic(const char *DiagStr, const char *DiagEnd,
case DiagnosticsEngine::ak_std_string: {
const std::string &S = getArgStdStr(ArgNo);
assert(ModifierLen == 0 && "No modifiers for strings yet");
- OutStr.append(S.begin(), S.end());
+ pushEscapedString(S, OutStr);
break;
}
case DiagnosticsEngine::ak_c_string: {
@@ -932,8 +975,7 @@ FormatDiagnostic(const char *DiagStr, const char *DiagEnd,
// Don't crash if get passed a null pointer by accident.
if (!S)
S = "(null)";
-
- OutStr.append(S, S + strlen(S));
+ pushEscapedString(S, OutStr);
break;
}
// ---- INTEGERS ----
@@ -983,13 +1025,13 @@ FormatDiagnostic(const char *DiagStr, const char *DiagEnd,
if (const char *S = tok::getPunctuatorSpelling(Kind))
// Quoted token spelling for punctuators.
Out << '\'' << S << '\'';
- else if (const char *S = tok::getKeywordSpelling(Kind))
+ else if ((S = tok::getKeywordSpelling(Kind)))
// Unquoted token spelling for keywords.
Out << S;
- else if (const char *S = getTokenDescForDiagnostic(Kind))
+ else if ((S = getTokenDescForDiagnostic(Kind)))
// Unquoted translatable token name.
Out << S;
- else if (const char *S = tok::getTokenName(Kind))
+ else if ((S = tok::getTokenName(Kind)))
// Debug name, shouldn't appear in user-facing diagnostics.
Out << '<' << S << '>';
else
@@ -1138,6 +1180,14 @@ StoredDiagnostic::StoredDiagnostic(DiagnosticsEngine::Level Level, unsigned ID,
{
}
+llvm::raw_ostream &clang::operator<<(llvm::raw_ostream &OS,
+ const StoredDiagnostic &SD) {
+ if (SD.getLocation().hasManager())
+ OS << SD.getLocation().printToString(SD.getLocation().getManager()) << ": ";
+ OS << SD.getMessage();
+ return OS;
+}
+
/// IncludeInDiagnosticCounts - This method (whose default implementation
/// returns true) indicates whether the diagnostics handled by this
/// DiagnosticConsumer should be included in the number of diagnostics
diff --git a/contrib/llvm-project/clang/lib/Basic/DiagnosticIDs.cpp b/contrib/llvm-project/clang/lib/Basic/DiagnosticIDs.cpp
index 87db131992e4..8e2593b103d1 100644
--- a/contrib/llvm-project/clang/lib/Basic/DiagnosticIDs.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/DiagnosticIDs.cpp
@@ -607,6 +607,7 @@ namespace {
uint16_t NameOffset;
uint16_t Members;
uint16_t SubGroups;
+ StringRef Documentation;
// String is stored with a pascal-style length byte.
StringRef getName() const {
@@ -618,28 +619,47 @@ namespace {
// Second the table of options, sorted by name for fast binary lookup.
static const WarningOption OptionTable[] = {
-#define DIAG_ENTRY(GroupName, FlagNameOffset, Members, SubGroups) \
- {FlagNameOffset, Members, SubGroups},
+#define DIAG_ENTRY(GroupName, FlagNameOffset, Members, SubGroups, Docs) \
+ {FlagNameOffset, Members, SubGroups, Docs},
#include "clang/Basic/DiagnosticGroups.inc"
#undef DIAG_ENTRY
};
+/// Given a diagnostic group ID, return its documentation.
+StringRef DiagnosticIDs::getWarningOptionDocumentation(diag::Group Group) {
+ return OptionTable[static_cast<int>(Group)].Documentation;
+}
+
StringRef DiagnosticIDs::getWarningOptionForGroup(diag::Group Group) {
return OptionTable[static_cast<int>(Group)].getName();
}
+llvm::Optional<diag::Group>
+DiagnosticIDs::getGroupForWarningOption(StringRef Name) {
+ const auto *Found = llvm::partition_point(
+ OptionTable, [=](const WarningOption &O) { return O.getName() < Name; });
+ if (Found == std::end(OptionTable) || Found->getName() != Name)
+ return llvm::None;
+ return static_cast<diag::Group>(Found - OptionTable);
+}
+
+llvm::Optional<diag::Group> DiagnosticIDs::getGroupForDiag(unsigned DiagID) {
+ if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID))
+ return static_cast<diag::Group>(Info->getOptionGroupIndex());
+ return llvm::None;
+}
+
/// getWarningOptionForDiag - Return the lowest-level warning option that
/// enables the specified diagnostic. If there is no -Wfoo flag that controls
/// the diagnostic, this returns null.
StringRef DiagnosticIDs::getWarningOptionForDiag(unsigned DiagID) {
- if (const StaticDiagInfoRec *Info = GetDiagInfo(DiagID))
- return getWarningOptionForGroup(
- static_cast<diag::Group>(Info->getOptionGroupIndex()));
+ if (auto G = getGroupForDiag(DiagID))
+ return getWarningOptionForGroup(*G);
return StringRef();
}
std::vector<std::string> DiagnosticIDs::getDiagnosticFlags() {
- std::vector<std::string> Res;
+ std::vector<std::string> Res{"-W", "-Wno-"};
for (size_t I = 1; DiagGroupNames[I] != '\0';) {
std::string Diag(DiagGroupNames + I + 1, DiagGroupNames[I]);
I += DiagGroupNames[I] + 1;
@@ -683,12 +703,10 @@ static bool getDiagnosticsInGroup(diag::Flavor Flavor,
bool
DiagnosticIDs::getDiagnosticsInGroup(diag::Flavor Flavor, StringRef Group,
SmallVectorImpl<diag::kind> &Diags) const {
- auto Found = llvm::partition_point(
- OptionTable, [=](const WarningOption &O) { return O.getName() < Group; });
- if (Found == std::end(OptionTable) || Found->getName() != Group)
- return true; // Option not found.
-
- return ::getDiagnosticsInGroup(Flavor, Found, Diags);
+ if (llvm::Optional<diag::Group> G = getGroupForWarningOption(Group))
+ return ::getDiagnosticsInGroup(
+ Flavor, &OptionTable[static_cast<unsigned>(*G)], Diags);
+ return true;
}
void DiagnosticIDs::getAllDiagnostics(diag::Flavor Flavor,
diff --git a/contrib/llvm-project/clang/lib/Basic/FileManager.cpp b/contrib/llvm-project/clang/lib/Basic/FileManager.cpp
index f4cf27848d7d..b66780a1d1d1 100644
--- a/contrib/llvm-project/clang/lib/Basic/FileManager.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/FileManager.cpp
@@ -105,10 +105,10 @@ void FileManager::addAncestorsAsVirtualDirs(StringRef Path) {
return;
// Add the virtual directory to the cache.
- auto UDE = std::make_unique<DirectoryEntry>();
+ auto *UDE = new (DirsAlloc.Allocate()) DirectoryEntry();
UDE->Name = NamedDirEnt.first();
- NamedDirEnt.second = *UDE.get();
- VirtualDirectoryEntries.push_back(std::move(UDE));
+ NamedDirEnt.second = *UDE;
+ VirtualDirectoryEntries.push_back(UDE);
// Recursively add the other ancestors.
addAncestorsAsVirtualDirs(DirName);
@@ -172,14 +172,15 @@ FileManager::getDirectoryRef(StringRef DirName, bool CacheFailure) {
// same inode (this occurs on Unix-like systems when one dir is
// symlinked to another, for example) or the same path (on
// Windows).
- DirectoryEntry &UDE = UniqueRealDirs[Status.getUniqueID()];
+ DirectoryEntry *&UDE = UniqueRealDirs[Status.getUniqueID()];
- NamedDirEnt.second = UDE;
- if (UDE.getName().empty()) {
+ if (!UDE) {
// We don't have this directory yet, add it. We use the string
// key from the SeenDirEntries map as the string.
- UDE.Name = InterndDirName;
+ UDE = new (DirsAlloc.Allocate()) DirectoryEntry();
+ UDE->Name = InterndDirName;
}
+ NamedDirEnt.second = *UDE;
return DirectoryEntryRef(NamedDirEnt);
}
@@ -268,11 +269,14 @@ FileManager::getFileRef(StringRef Filename, bool openFile, bool CacheFailure) {
// It exists. See if we have already opened a file with the same inode.
// This occurs when one dir is symlinked to another, for example.
- FileEntry &UFE = UniqueRealFiles[Status.getUniqueID()];
+ FileEntry *&UFE = UniqueRealFiles[Status.getUniqueID()];
+ bool ReusingEntry = UFE != nullptr;
+ if (!UFE)
+ UFE = new (FilesAlloc.Allocate()) FileEntry();
if (Status.getName() == Filename) {
// The name matches. Set the FileEntry.
- NamedFileEnt->second = FileEntryRef::MapValue(UFE, DirInfo);
+ NamedFileEnt->second = FileEntryRef::MapValue(*UFE, DirInfo);
} else {
// Name mismatch. We need a redirect. First grab the actual entry we want
// to return.
@@ -283,18 +287,55 @@ FileManager::getFileRef(StringRef Filename, bool openFile, bool CacheFailure) {
// name to users (in diagnostics) and to tools that don't have access to
// the VFS (in debug info and dependency '.d' files).
//
- // FIXME: This is pretty complicated. It's also inconsistent with how
- // "real" filesystems behave and confuses parts of clang expect to see the
- // name-as-accessed on the \a FileEntryRef. Maybe the returned \a
- // FileEntryRef::getName() could return the accessed name unmodified, but
- // make the external name available via a separate API.
+ // FIXME: This is pretty complex and has some very complicated interactions
+ // with the rest of clang. It's also inconsistent with how "real"
+ // filesystems behave and confuses parts of clang expect to see the
+ // name-as-accessed on the \a FileEntryRef.
+ //
+ // Further, it isn't *just* external names, but will also give back absolute
+ // paths when a relative path was requested - the check is comparing the
+ // name from the status, which is passed an absolute path resolved from the
+ // current working directory. `clang-apply-replacements` appears to depend
+ // on this behaviour, though it's adjusting the working directory, which is
+ // definitely not supported. Once that's fixed this hack should be able to
+ // be narrowed to only when there's an externally mapped name given back.
+ //
+ // A potential plan to remove this is as follows -
+ // - Add API to determine if the name has been rewritten by the VFS.
+ // - Fix `clang-apply-replacements` to pass down the absolute path rather
+ // than changing the CWD. Narrow this hack down to just externally
+ // mapped paths.
+ // - Expose the requested filename. One possibility would be to allow
+ // redirection-FileEntryRefs to be returned, rather than returning
+ // the pointed-at-FileEntryRef, and customizing `getName()` to look
+ // through the indirection.
+ // - Update callers such as `HeaderSearch::findUsableModuleForHeader()`
+ // to explicitly use the requested filename rather than just using
+ // `getName()`.
+ // - Add a `FileManager::getExternalPath` API for explicitly getting the
+ // remapped external filename when there is one available. Adopt it in
+ // callers like diagnostics/deps reporting instead of calling
+ // `getName()` directly.
+ // - Switch the meaning of `FileEntryRef::getName()` to get the requested
+ // name, not the external name. Once that sticks, revert callers that
+ // want the requested name back to calling `getName()`.
+ // - Update the VFS to always return the requested name. This could also
+ // return the external name, or just have an API to request it
+ // lazily. The latter has the benefit of making accesses of the
+ // external path easily tracked, but may also require extra work than
+ // just returning up front.
+ // - (Optionally) Add an API to VFS to get the external filename lazily
+ // and update `FileManager::getExternalPath()` to use it instead. This
+ // has the benefit of making such accesses easily tracked, though isn't
+ // necessarily required (and could cause extra work than just adding to
+ // eg. `vfs::Status` up front).
auto &Redirection =
*SeenFileEntries
- .insert({Status.getName(), FileEntryRef::MapValue(UFE, DirInfo)})
+ .insert({Status.getName(), FileEntryRef::MapValue(*UFE, DirInfo)})
.first;
assert(Redirection.second->V.is<FileEntry *>() &&
"filename redirected to a non-canonical filename?");
- assert(Redirection.second->V.get<FileEntry *>() == &UFE &&
+ assert(Redirection.second->V.get<FileEntry *>() == UFE &&
"filename from getStatValue() refers to wrong file");
// Cache the redirection in the previously-inserted entry, still available
@@ -306,16 +347,18 @@ FileManager::getFileRef(StringRef Filename, bool openFile, bool CacheFailure) {
}
FileEntryRef ReturnedRef(*NamedFileEnt);
- if (UFE.isValid()) { // Already have an entry with this inode, return it.
+ if (ReusingEntry) { // Already have an entry with this inode, return it.
- // FIXME: this hack ensures that if we look up a file by a virtual path in
- // the VFS that the getDir() will have the virtual path, even if we found
- // the file by a 'real' path first. This is required in order to find a
- // module's structure when its headers/module map are mapped in the VFS.
- // We should remove this as soon as we can properly support a file having
- // multiple names.
- if (&DirInfo.getDirEntry() != UFE.Dir && Status.IsVFSMapped)
- UFE.Dir = &DirInfo.getDirEntry();
+ // FIXME: This hack ensures that `getDir()` will use the path that was
+ // used to lookup this file, even if we found a file by different path
+ // first. This is required in order to find a module's structure when its
+ // headers/module map are mapped in the VFS.
+ //
+ // See above for how this will eventually be removed. `IsVFSMapped`
+ // *cannot* be narrowed to `ExposesExternalVFSPath` as crash reproducers
+ // also depend on this logic and they have `use-external-paths: false`.
+ if (&DirInfo.getDirEntry() != UFE->Dir && Status.IsVFSMapped)
+ UFE->Dir = &DirInfo.getDirEntry();
// Always update LastRef to the last name by which a file was accessed.
// FIXME: Neither this nor always using the first reference is correct; we
@@ -324,28 +367,27 @@ FileManager::getFileRef(StringRef Filename, bool openFile, bool CacheFailure) {
// corresponding FileEntry.
// FIXME: LastRef should be removed from FileEntry once all clients adopt
// FileEntryRef.
- UFE.LastRef = ReturnedRef;
+ UFE->LastRef = ReturnedRef;
return ReturnedRef;
}
// Otherwise, we don't have this file yet, add it.
- UFE.LastRef = ReturnedRef;
- UFE.Size = Status.getSize();
- UFE.ModTime = llvm::sys::toTimeT(Status.getLastModificationTime());
- UFE.Dir = &DirInfo.getDirEntry();
- UFE.UID = NextFileUID++;
- UFE.UniqueID = Status.getUniqueID();
- UFE.IsNamedPipe = Status.getType() == llvm::sys::fs::file_type::fifo_file;
- UFE.File = std::move(F);
- UFE.IsValid = true;
-
- if (UFE.File) {
- if (auto PathName = UFE.File->getName())
- fillRealPathName(&UFE, *PathName);
+ UFE->LastRef = ReturnedRef;
+ UFE->Size = Status.getSize();
+ UFE->ModTime = llvm::sys::toTimeT(Status.getLastModificationTime());
+ UFE->Dir = &DirInfo.getDirEntry();
+ UFE->UID = NextFileUID++;
+ UFE->UniqueID = Status.getUniqueID();
+ UFE->IsNamedPipe = Status.getType() == llvm::sys::fs::file_type::fifo_file;
+ UFE->File = std::move(F);
+
+ if (UFE->File) {
+ if (auto PathName = UFE->File->getName())
+ fillRealPathName(UFE, *PathName);
} else if (!openFile) {
// We should still fill the path even if we aren't opening the file.
- fillRealPathName(&UFE, InterndFileName);
+ fillRealPathName(UFE, InterndFileName);
}
return ReturnedRef;
}
@@ -409,43 +451,46 @@ FileEntryRef FileManager::getVirtualFileRef(StringRef Filename, off_t Size,
llvm::vfs::Status Status;
const char *InterndFileName = NamedFileEnt.first().data();
if (!getStatValue(InterndFileName, Status, true, nullptr)) {
- UFE = &UniqueRealFiles[Status.getUniqueID()];
Status = llvm::vfs::Status(
Status.getName(), Status.getUniqueID(),
llvm::sys::toTimePoint(ModificationTime),
Status.getUser(), Status.getGroup(), Size,
Status.getType(), Status.getPermissions());
- NamedFileEnt.second = FileEntryRef::MapValue(*UFE, *DirInfo);
-
- // If we had already opened this file, close it now so we don't
- // leak the descriptor. We're not going to use the file
- // descriptor anyway, since this is a virtual file.
- if (UFE->File)
- UFE->closeFile();
-
- // If we already have an entry with this inode, return it.
- //
- // FIXME: Surely this should add a reference by the new name, and return
- // it instead...
- if (UFE->isValid())
+ auto &RealFE = UniqueRealFiles[Status.getUniqueID()];
+ if (RealFE) {
+ // If we had already opened this file, close it now so we don't
+ // leak the descriptor. We're not going to use the file
+ // descriptor anyway, since this is a virtual file.
+ if (RealFE->File)
+ RealFE->closeFile();
+ // If we already have an entry with this inode, return it.
+ //
+ // FIXME: Surely this should add a reference by the new name, and return
+ // it instead...
+ NamedFileEnt.second = FileEntryRef::MapValue(*RealFE, *DirInfo);
return FileEntryRef(NamedFileEnt);
-
- UFE->UniqueID = Status.getUniqueID();
- UFE->IsNamedPipe = Status.getType() == llvm::sys::fs::file_type::fifo_file;
- fillRealPathName(UFE, Status.getName());
+ }
+ // File exists, but no entry - create it.
+ RealFE = new (FilesAlloc.Allocate()) FileEntry();
+ RealFE->UniqueID = Status.getUniqueID();
+ RealFE->IsNamedPipe =
+ Status.getType() == llvm::sys::fs::file_type::fifo_file;
+ fillRealPathName(RealFE, Status.getName());
+
+ UFE = RealFE;
} else {
- VirtualFileEntries.push_back(std::make_unique<FileEntry>());
- UFE = VirtualFileEntries.back().get();
- NamedFileEnt.second = FileEntryRef::MapValue(*UFE, *DirInfo);
+ // File does not exist, create a virtual entry.
+ UFE = new (FilesAlloc.Allocate()) FileEntry();
+ VirtualFileEntries.push_back(UFE);
}
+ NamedFileEnt.second = FileEntryRef::MapValue(*UFE, *DirInfo);
UFE->LastRef = FileEntryRef(NamedFileEnt);
UFE->Size = Size;
UFE->ModTime = ModificationTime;
UFE->Dir = &DirInfo->getDirEntry();
UFE->UID = NextFileUID++;
- UFE->IsValid = true;
UFE->File.reset();
return FileEntryRef(NamedFileEnt);
}
@@ -467,16 +512,14 @@ llvm::Optional<FileEntryRef> FileManager::getBypassFile(FileEntryRef VF) {
return FileEntryRef(*Insertion.first);
// Fill in the new entry from the stat.
- BypassFileEntries.push_back(std::make_unique<FileEntry>());
- const FileEntry &VFE = VF.getFileEntry();
- FileEntry &BFE = *BypassFileEntries.back();
- Insertion.first->second = FileEntryRef::MapValue(BFE, VF.getDir());
- BFE.LastRef = FileEntryRef(*Insertion.first);
- BFE.Size = Status.getSize();
- BFE.Dir = VFE.Dir;
- BFE.ModTime = llvm::sys::toTimeT(Status.getLastModificationTime());
- BFE.UID = NextFileUID++;
- BFE.IsValid = true;
+ FileEntry *BFE = new (FilesAlloc.Allocate()) FileEntry();
+ BypassFileEntries.push_back(BFE);
+ Insertion.first->second = FileEntryRef::MapValue(*BFE, VF.getDir());
+ BFE->LastRef = FileEntryRef(*Insertion.first);
+ BFE->Size = Status.getSize();
+ BFE->Dir = VF.getFileEntry().Dir;
+ BFE->ModTime = llvm::sys::toTimeT(Status.getLastModificationTime());
+ BFE->UID = NextFileUID++;
// Save the entry in the bypass table and return.
return FileEntryRef(*Insertion.first);
@@ -593,7 +636,7 @@ FileManager::getNoncachedStatValue(StringRef Path,
}
void FileManager::GetUniqueIDMapping(
- SmallVectorImpl<const FileEntry *> &UIDToFiles) const {
+ SmallVectorImpl<const FileEntry *> &UIDToFiles) const {
UIDToFiles.clear();
UIDToFiles.resize(NextFileUID);
@@ -610,7 +653,7 @@ void FileManager::GetUniqueIDMapping(
// Map virtual file entries
for (const auto &VFE : VirtualFileEntries)
- UIDToFiles[VFE->getUID()] = VFE.get();
+ UIDToFiles[VFE->getUID()] = VFE;
}
StringRef FileManager::getCanonicalName(const DirectoryEntry *Dir) {
diff --git a/contrib/llvm-project/clang/lib/Basic/IdentifierTable.cpp b/contrib/llvm-project/clang/lib/Basic/IdentifierTable.cpp
index b86cb7af69bd..82cee4aa052d 100644
--- a/contrib/llvm-project/clang/lib/Basic/IdentifierTable.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/IdentifierTable.cpp
@@ -108,9 +108,11 @@ namespace {
KEYOPENCLCXX = 0x400000,
KEYMSCOMPAT = 0x800000,
KEYSYCL = 0x1000000,
+ KEYCUDA = 0x2000000,
+ KEYMAX = KEYCUDA, // The maximum key
KEYALLCXX = KEYCXX | KEYCXX11 | KEYCXX20,
- KEYALL = (0x1ffffff & ~KEYNOMS18 &
- ~KEYNOOPENCL) // KEYNOMS18 and KEYNOOPENCL are used to exclude.
+ KEYALL = (KEYMAX | (KEYMAX-1)) & ~KEYNOMS18 &
+ ~KEYNOOPENCL // KEYNOMS18 and KEYNOOPENCL are used to exclude.
};
/// How a keyword is treated in the selected standard.
@@ -158,6 +160,8 @@ static KeywordStatus getKeywordStatus(const LangOptions &LangOpts,
return KS_Future;
if (LangOpts.isSYCL() && (Flags & KEYSYCL))
return KS_Enabled;
+ if (LangOpts.CUDA && (Flags & KEYCUDA))
+ return KS_Enabled;
return KS_Disabled;
}
diff --git a/contrib/llvm-project/clang/lib/Basic/LangOptions.cpp b/contrib/llvm-project/clang/lib/Basic/LangOptions.cpp
index b6dc73d66304..753b6bfe18a3 100644
--- a/contrib/llvm-project/clang/lib/Basic/LangOptions.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/LangOptions.cpp
@@ -62,7 +62,7 @@ unsigned LangOptions::getOpenCLCompatibleVersion() const {
llvm_unreachable("Unknown OpenCL version");
}
-void LangOptions::remapPathPrefix(SmallString<256> &Path) const {
+void LangOptions::remapPathPrefix(SmallVectorImpl<char> &Path) const {
for (const auto &Entry : MacroPrefixMap)
if (llvm::sys::path::replace_path_prefix(Path, Entry.first, Entry.second))
break;
@@ -78,11 +78,141 @@ std::string LangOptions::getOpenCLVersionString() const {
return Result;
}
+void LangOptions::setLangDefaults(LangOptions &Opts, Language Lang,
+ const llvm::Triple &T,
+ std::vector<std::string> &Includes,
+ LangStandard::Kind LangStd) {
+ // Set some properties which depend solely on the input kind; it would be nice
+ // to move these to the language standard, and have the driver resolve the
+ // input kind + language standard.
+ //
+ // FIXME: Perhaps a better model would be for a single source file to have
+ // multiple language standards (C / C++ std, ObjC std, OpenCL std, OpenMP std)
+ // simultaneously active?
+ if (Lang == Language::Asm) {
+ Opts.AsmPreprocessor = 1;
+ } else if (Lang == Language::ObjC || Lang == Language::ObjCXX) {
+ Opts.ObjC = 1;
+ }
+
+ if (LangStd == LangStandard::lang_unspecified)
+ LangStd = getDefaultLanguageStandard(Lang, T);
+ const LangStandard &Std = LangStandard::getLangStandardForKind(LangStd);
+ Opts.LangStd = LangStd;
+ Opts.LineComment = Std.hasLineComments();
+ Opts.C99 = Std.isC99();
+ Opts.C11 = Std.isC11();
+ Opts.C17 = Std.isC17();
+ Opts.C2x = Std.isC2x();
+ Opts.CPlusPlus = Std.isCPlusPlus();
+ Opts.CPlusPlus11 = Std.isCPlusPlus11();
+ Opts.CPlusPlus14 = Std.isCPlusPlus14();
+ Opts.CPlusPlus17 = Std.isCPlusPlus17();
+ Opts.CPlusPlus20 = Std.isCPlusPlus20();
+ Opts.CPlusPlus2b = Std.isCPlusPlus2b();
+ Opts.GNUMode = Std.isGNUMode();
+ Opts.GNUCVersion = 0;
+ Opts.HexFloats = Std.hasHexFloats();
+ Opts.WChar = Std.isCPlusPlus();
+ Opts.Digraphs = Std.hasDigraphs();
+
+ Opts.HLSL = Lang == Language::HLSL;
+ if (Opts.HLSL && Opts.IncludeDefaultHeader)
+ Includes.push_back("hlsl.h");
+
+ // Set OpenCL Version.
+ Opts.OpenCL = Std.isOpenCL();
+ if (LangStd == LangStandard::lang_opencl10)
+ Opts.OpenCLVersion = 100;
+ else if (LangStd == LangStandard::lang_opencl11)
+ Opts.OpenCLVersion = 110;
+ else if (LangStd == LangStandard::lang_opencl12)
+ Opts.OpenCLVersion = 120;
+ else if (LangStd == LangStandard::lang_opencl20)
+ Opts.OpenCLVersion = 200;
+ else if (LangStd == LangStandard::lang_opencl30)
+ Opts.OpenCLVersion = 300;
+ else if (LangStd == LangStandard::lang_openclcpp10)
+ Opts.OpenCLCPlusPlusVersion = 100;
+ else if (LangStd == LangStandard::lang_openclcpp2021)
+ Opts.OpenCLCPlusPlusVersion = 202100;
+ else if (LangStd == LangStandard::lang_hlsl2015)
+ Opts.HLSLVersion = (unsigned)LangOptions::HLSL_2015;
+ else if (LangStd == LangStandard::lang_hlsl2016)
+ Opts.HLSLVersion = (unsigned)LangOptions::HLSL_2016;
+ else if (LangStd == LangStandard::lang_hlsl2017)
+ Opts.HLSLVersion = (unsigned)LangOptions::HLSL_2017;
+ else if (LangStd == LangStandard::lang_hlsl2018)
+ Opts.HLSLVersion = (unsigned)LangOptions::HLSL_2018;
+ else if (LangStd == LangStandard::lang_hlsl2021)
+ Opts.HLSLVersion = (unsigned)LangOptions::HLSL_2021;
+ else if (LangStd == LangStandard::lang_hlsl202x)
+ Opts.HLSLVersion = (unsigned)LangOptions::HLSL_202x;
+
+ // OpenCL has some additional defaults.
+ if (Opts.OpenCL) {
+ Opts.AltiVec = 0;
+ Opts.ZVector = 0;
+ Opts.setDefaultFPContractMode(LangOptions::FPM_On);
+ Opts.OpenCLCPlusPlus = Opts.CPlusPlus;
+ Opts.OpenCLPipes = Opts.getOpenCLCompatibleVersion() == 200;
+ Opts.OpenCLGenericAddressSpace = Opts.getOpenCLCompatibleVersion() == 200;
+
+ // Include default header file for OpenCL.
+ if (Opts.IncludeDefaultHeader) {
+ if (Opts.DeclareOpenCLBuiltins) {
+ // Only include base header file for builtin types and constants.
+ Includes.push_back("opencl-c-base.h");
+ } else {
+ Includes.push_back("opencl-c.h");
+ }
+ }
+ }
+
+ Opts.HIP = Lang == Language::HIP;
+ Opts.CUDA = Lang == Language::CUDA || Opts.HIP;
+ if (Opts.HIP) {
+ // HIP toolchain does not support 'Fast' FPOpFusion in backends since it
+ // fuses multiplication/addition instructions without contract flag from
+ // device library functions in LLVM bitcode, which causes accuracy loss in
+ // certain math functions, e.g. tan(-1e20) becomes -0.933 instead of 0.8446.
+ // For device library functions in bitcode to work, 'Strict' or 'Standard'
+ // FPOpFusion options in backends is needed. Therefore 'fast-honor-pragmas'
+ // FP contract option is used to allow fuse across statements in frontend
+ // whereas respecting contract flag in backend.
+ Opts.setDefaultFPContractMode(LangOptions::FPM_FastHonorPragmas);
+ } else if (Opts.CUDA) {
+ if (T.isSPIRV()) {
+ // Emit OpenCL version metadata in LLVM IR when targeting SPIR-V.
+ Opts.OpenCLVersion = 200;
+ }
+ // Allow fuse across statements disregarding pragmas.
+ Opts.setDefaultFPContractMode(LangOptions::FPM_Fast);
+ }
+
+ Opts.RenderScript = Lang == Language::RenderScript;
+
+ // OpenCL, C++ and C2x have bool, true, false keywords.
+ Opts.Bool = Opts.OpenCL || Opts.CPlusPlus || Opts.C2x;
+
+ // OpenCL and HLSL have half keyword
+ Opts.Half = Opts.OpenCL || Opts.HLSL;
+}
+
FPOptions FPOptions::defaultWithoutTrailingStorage(const LangOptions &LO) {
FPOptions result(LO);
return result;
}
+FPOptionsOverride FPOptions::getChangesSlow(const FPOptions &Base) const {
+ FPOptions::storage_type OverrideMask = 0;
+#define OPTION(NAME, TYPE, WIDTH, PREVIOUS) \
+ if (get##NAME() != Base.get##NAME()) \
+ OverrideMask |= NAME##Mask;
+#include "clang/Basic/FPOptions.def"
+ return FPOptionsOverride(*this, OverrideMask);
+}
+
LLVM_DUMP_METHOD void FPOptions::dump() {
#define OPTION(NAME, TYPE, WIDTH, PREVIOUS) \
llvm::errs() << "\n " #NAME " " << get##NAME();
diff --git a/contrib/llvm-project/clang/lib/Basic/LangStandards.cpp b/contrib/llvm-project/clang/lib/Basic/LangStandards.cpp
index ee27bfd12113..5bacc3b16496 100644
--- a/contrib/llvm-project/clang/lib/Basic/LangStandards.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/LangStandards.cpp
@@ -7,7 +7,9 @@
//===----------------------------------------------------------------------===//
#include "clang/Basic/LangStandard.h"
+#include "clang/Config/config.h"
#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/Triple.h"
#include "llvm/Support/ErrorHandling.h"
using namespace clang;
@@ -42,4 +44,47 @@ const LangStandard *LangStandard::getLangStandardForName(StringRef Name) {
return &getLangStandardForKind(K);
}
+LangStandard::Kind clang::getDefaultLanguageStandard(clang::Language Lang,
+ const llvm::Triple &T) {
+ switch (Lang) {
+ case Language::Unknown:
+ case Language::LLVM_IR:
+ llvm_unreachable("Invalid input kind!");
+ case Language::OpenCL:
+ return LangStandard::lang_opencl12;
+ case Language::OpenCLCXX:
+ return LangStandard::lang_openclcpp10;
+ case Language::CUDA:
+ return LangStandard::lang_cuda;
+ case Language::Asm:
+ case Language::C:
+ if (CLANG_DEFAULT_STD_C != LangStandard::lang_unspecified)
+ return CLANG_DEFAULT_STD_C;
+ // The PS4 and PS5 use C99 as the default C standard.
+ if (T.isPS())
+ return LangStandard::lang_gnu99;
+ return LangStandard::lang_gnu17;
+ case Language::ObjC:
+ if (CLANG_DEFAULT_STD_C != LangStandard::lang_unspecified)
+ return CLANG_DEFAULT_STD_C;
+
+ return LangStandard::lang_gnu11;
+ case Language::CXX:
+ case Language::ObjCXX:
+ if (CLANG_DEFAULT_STD_CXX != LangStandard::lang_unspecified)
+ return CLANG_DEFAULT_STD_CXX;
+
+ if (T.isDriverKit())
+ return LangStandard::lang_gnucxx17;
+ else
+ return LangStandard::lang_gnucxx14;
+ case Language::RenderScript:
+ return LangStandard::lang_c99;
+ case Language::HIP:
+ return LangStandard::lang_hip;
+ case Language::HLSL:
+ return LangStandard::lang_hlsl2021;
+ }
+ llvm_unreachable("unhandled Language kind!");
+}
diff --git a/contrib/llvm-project/clang/lib/Basic/Module.cpp b/contrib/llvm-project/clang/lib/Basic/Module.cpp
index 09bd3251fea0..17b83184abb6 100644
--- a/contrib/llvm-project/clang/lib/Basic/Module.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Module.cpp
@@ -267,7 +267,7 @@ ArrayRef<const FileEntry *> Module::getTopHeaders(FileManager &FileMgr) {
return llvm::makeArrayRef(TopHeaders.begin(), TopHeaders.end());
}
-bool Module::directlyUses(const Module *Requested) const {
+bool Module::directlyUses(const Module *Requested) {
auto *Top = getTopLevelModule();
// A top-level module implicitly uses itself.
@@ -282,6 +282,9 @@ bool Module::directlyUses(const Module *Requested) const {
if (!Requested->Parent && Requested->Name == "_Builtin_stddef_max_align_t")
return true;
+ if (NoUndeclaredIncludes)
+ UndeclaredUses.insert(Requested);
+
return false;
}
diff --git a/contrib/llvm-project/clang/lib/Basic/OpenCLOptions.cpp b/contrib/llvm-project/clang/lib/Basic/OpenCLOptions.cpp
index 7e89b3f1b804..44edf5402540 100644
--- a/contrib/llvm-project/clang/lib/Basic/OpenCLOptions.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/OpenCLOptions.cpp
@@ -12,14 +12,16 @@
namespace clang {
-const OpenCLOptions::FeatureDepList OpenCLOptions::DependentFeaturesList = {
+// First feature in a pair requires the second one to be supported.
+static const std::pair<StringRef, StringRef> DependentFeaturesList[] = {
{"__opencl_c_read_write_images", "__opencl_c_images"},
{"__opencl_c_3d_image_writes", "__opencl_c_images"},
{"__opencl_c_pipes", "__opencl_c_generic_address_space"},
{"__opencl_c_device_enqueue", "__opencl_c_generic_address_space"},
{"__opencl_c_device_enqueue", "__opencl_c_program_scope_global_variables"}};
-const llvm::StringMap<llvm::StringRef> OpenCLOptions::FeatureExtensionMap = {
+// Extensions and equivalent feature pairs.
+static const std::pair<StringRef, StringRef> FeatureExtensionMap[] = {
{"cl_khr_fp64", "__opencl_c_fp64"},
{"cl_khr_3d_image_writes", "__opencl_c_3d_image_writes"}};
@@ -140,11 +142,11 @@ bool OpenCLOptions::diagnoseFeatureExtensionDifferences(
bool IsValid = true;
for (auto &ExtAndFeat : FeatureExtensionMap)
- if (TI.hasFeatureEnabled(OpenCLFeaturesMap, ExtAndFeat.getKey()) !=
- TI.hasFeatureEnabled(OpenCLFeaturesMap, ExtAndFeat.getValue())) {
+ if (TI.hasFeatureEnabled(OpenCLFeaturesMap, ExtAndFeat.first) !=
+ TI.hasFeatureEnabled(OpenCLFeaturesMap, ExtAndFeat.second)) {
IsValid = false;
Diags.Report(diag::err_opencl_extension_and_feature_differs)
- << ExtAndFeat.getKey() << ExtAndFeat.getValue();
+ << ExtAndFeat.first << ExtAndFeat.second;
}
return IsValid;
}
diff --git a/contrib/llvm-project/clang/lib/Basic/OpenMPKinds.cpp b/contrib/llvm-project/clang/lib/Basic/OpenMPKinds.cpp
index 1761c6d3d89b..2f2e6537ebd3 100644
--- a/contrib/llvm-project/clang/lib/Basic/OpenMPKinds.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/OpenMPKinds.cpp
@@ -41,11 +41,15 @@ unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind, StringRef Str,
.Case(#Name, static_cast<unsigned>(OMPC_SCHEDULE_MODIFIER_##Name))
#include "clang/Basic/OpenMPKinds.def"
.Default(OMPC_SCHEDULE_unknown);
- case OMPC_depend:
- return llvm::StringSwitch<OpenMPDependClauseKind>(Str)
+ case OMPC_depend: {
+ unsigned Type = llvm::StringSwitch<unsigned>(Str)
#define OPENMP_DEPEND_KIND(Name) .Case(#Name, OMPC_DEPEND_##Name)
#include "clang/Basic/OpenMPKinds.def"
- .Default(OMPC_DEPEND_unknown);
+ .Default(OMPC_DEPEND_unknown);
+ if (LangOpts.OpenMP < 51 && Type == OMPC_DEPEND_inoutset)
+ return OMPC_DEPEND_unknown;
+ return Type;
+ }
case OMPC_linear:
return llvm::StringSwitch<OpenMPLinearClauseKind>(Str)
#define OPENMP_LINEAR_KIND(Name) .Case(#Name, OMPC_LINEAR_##Name)
@@ -182,6 +186,7 @@ unsigned clang::getOpenMPSimpleClauseType(OpenMPClauseKind Kind, StringRef Str,
case OMPC_use_device_ptr:
case OMPC_use_device_addr:
case OMPC_is_device_ptr:
+ case OMPC_has_device_addr:
case OMPC_unified_address:
case OMPC_unified_shared_memory:
case OMPC_reverse_offload:
@@ -448,6 +453,7 @@ const char *clang::getOpenMPSimpleClauseTypeName(OpenMPClauseKind Kind,
case OMPC_use_device_ptr:
case OMPC_use_device_addr:
case OMPC_is_device_ptr:
+ case OMPC_has_device_addr:
case OMPC_unified_address:
case OMPC_unified_shared_memory:
case OMPC_reverse_offload:
@@ -478,7 +484,10 @@ bool clang::isOpenMPLoopDirective(OpenMPDirectiveKind DKind) {
DKind == OMPD_master_taskloop || DKind == OMPD_master_taskloop_simd ||
DKind == OMPD_parallel_master_taskloop ||
DKind == OMPD_parallel_master_taskloop_simd ||
- DKind == OMPD_distribute || DKind == OMPD_target_parallel_for ||
+ DKind == OMPD_masked_taskloop || DKind == OMPD_masked_taskloop_simd ||
+ DKind == OMPD_parallel_masked_taskloop || DKind == OMPD_distribute ||
+ DKind == OMPD_parallel_masked_taskloop_simd ||
+ DKind == OMPD_target_parallel_for ||
DKind == OMPD_distribute_parallel_for ||
DKind == OMPD_distribute_parallel_for_simd ||
DKind == OMPD_distribute_simd ||
@@ -491,7 +500,9 @@ bool clang::isOpenMPLoopDirective(OpenMPDirectiveKind DKind) {
DKind == OMPD_target_teams_distribute_parallel_for ||
DKind == OMPD_target_teams_distribute_parallel_for_simd ||
DKind == OMPD_target_teams_distribute_simd || DKind == OMPD_tile ||
- DKind == OMPD_unroll || DKind == OMPD_loop;
+ DKind == OMPD_unroll || DKind == OMPD_loop ||
+ DKind == OMPD_teams_loop || DKind == OMPD_target_teams_loop ||
+ DKind == OMPD_parallel_loop || DKind == OMPD_target_parallel_loop;
}
bool clang::isOpenMPWorksharingDirective(OpenMPDirectiveKind DKind) {
@@ -513,6 +524,9 @@ bool clang::isOpenMPTaskLoopDirective(OpenMPDirectiveKind DKind) {
return DKind == OMPD_taskloop || DKind == OMPD_taskloop_simd ||
DKind == OMPD_master_taskloop || DKind == OMPD_master_taskloop_simd ||
DKind == OMPD_parallel_master_taskloop ||
+ DKind == OMPD_masked_taskloop || DKind == OMPD_masked_taskloop_simd ||
+ DKind == OMPD_parallel_masked_taskloop ||
+ DKind == OMPD_parallel_masked_taskloop_simd ||
DKind == OMPD_parallel_master_taskloop_simd;
}
@@ -527,9 +541,12 @@ bool clang::isOpenMPParallelDirective(OpenMPDirectiveKind DKind) {
DKind == OMPD_teams_distribute_parallel_for_simd ||
DKind == OMPD_target_teams_distribute_parallel_for ||
DKind == OMPD_target_teams_distribute_parallel_for_simd ||
- DKind == OMPD_parallel_master ||
+ DKind == OMPD_parallel_master || DKind == OMPD_parallel_masked ||
DKind == OMPD_parallel_master_taskloop ||
- DKind == OMPD_parallel_master_taskloop_simd;
+ DKind == OMPD_parallel_master_taskloop_simd ||
+ DKind == OMPD_parallel_masked_taskloop ||
+ DKind == OMPD_parallel_masked_taskloop_simd ||
+ DKind == OMPD_parallel_loop || DKind == OMPD_target_parallel_loop;
}
bool clang::isOpenMPTargetExecutionDirective(OpenMPDirectiveKind DKind) {
@@ -539,7 +556,8 @@ bool clang::isOpenMPTargetExecutionDirective(OpenMPDirectiveKind DKind) {
DKind == OMPD_target_teams || DKind == OMPD_target_teams_distribute ||
DKind == OMPD_target_teams_distribute_parallel_for ||
DKind == OMPD_target_teams_distribute_parallel_for_simd ||
- DKind == OMPD_target_teams_distribute_simd;
+ DKind == OMPD_target_teams_distribute_simd ||
+ DKind == OMPD_target_teams_loop || DKind == OMPD_target_parallel_loop;
}
bool clang::isOpenMPTargetDataManagementDirective(OpenMPDirectiveKind DKind) {
@@ -551,22 +569,26 @@ bool clang::isOpenMPNestingTeamsDirective(OpenMPDirectiveKind DKind) {
return DKind == OMPD_teams || DKind == OMPD_teams_distribute ||
DKind == OMPD_teams_distribute_simd ||
DKind == OMPD_teams_distribute_parallel_for_simd ||
- DKind == OMPD_teams_distribute_parallel_for;
+ DKind == OMPD_teams_distribute_parallel_for ||
+ DKind == OMPD_teams_loop;
}
bool clang::isOpenMPTeamsDirective(OpenMPDirectiveKind DKind) {
- return isOpenMPNestingTeamsDirective(DKind) ||
- DKind == OMPD_target_teams || DKind == OMPD_target_teams_distribute ||
+ return isOpenMPNestingTeamsDirective(DKind) || DKind == OMPD_target_teams ||
+ DKind == OMPD_target_teams_distribute ||
DKind == OMPD_target_teams_distribute_parallel_for ||
DKind == OMPD_target_teams_distribute_parallel_for_simd ||
- DKind == OMPD_target_teams_distribute_simd;
+ DKind == OMPD_target_teams_distribute_simd ||
+ DKind == OMPD_target_teams_loop;
}
bool clang::isOpenMPSimdDirective(OpenMPDirectiveKind DKind) {
return DKind == OMPD_simd || DKind == OMPD_for_simd ||
DKind == OMPD_parallel_for_simd || DKind == OMPD_taskloop_simd ||
DKind == OMPD_master_taskloop_simd ||
+ DKind == OMPD_masked_taskloop_simd ||
DKind == OMPD_parallel_master_taskloop_simd ||
+ DKind == OMPD_parallel_masked_taskloop_simd ||
DKind == OMPD_distribute_parallel_for_simd ||
DKind == OMPD_distribute_simd || DKind == OMPD_target_simd ||
DKind == OMPD_teams_distribute_simd ||
@@ -595,7 +617,9 @@ bool clang::isOpenMPDistributeDirective(OpenMPDirectiveKind Kind) {
}
bool clang::isOpenMPGenericLoopDirective(OpenMPDirectiveKind Kind) {
- return Kind == OMPD_loop;
+ return Kind == OMPD_loop || Kind == OMPD_teams_loop ||
+ Kind == OMPD_target_teams_loop || Kind == OMPD_parallel_loop ||
+ Kind == OMPD_target_parallel_loop;
}
bool clang::isOpenMPPrivate(OpenMPClauseKind Kind) {
@@ -638,14 +662,17 @@ void clang::getOpenMPCaptureRegions(
case OMPD_parallel_for:
case OMPD_parallel_for_simd:
case OMPD_parallel_master:
+ case OMPD_parallel_masked:
case OMPD_parallel_sections:
case OMPD_distribute_parallel_for:
case OMPD_distribute_parallel_for_simd:
+ case OMPD_parallel_loop:
CaptureRegions.push_back(OMPD_parallel);
break;
case OMPD_target_teams:
case OMPD_target_teams_distribute:
case OMPD_target_teams_distribute_simd:
+ case OMPD_target_teams_loop:
CaptureRegions.push_back(OMPD_task);
CaptureRegions.push_back(OMPD_target);
CaptureRegions.push_back(OMPD_teams);
@@ -668,6 +695,7 @@ void clang::getOpenMPCaptureRegions(
case OMPD_target_parallel:
case OMPD_target_parallel_for:
case OMPD_target_parallel_for_simd:
+ case OMPD_target_parallel_loop:
CaptureRegions.push_back(OMPD_task);
CaptureRegions.push_back(OMPD_target);
CaptureRegions.push_back(OMPD_parallel);
@@ -682,8 +710,12 @@ void clang::getOpenMPCaptureRegions(
case OMPD_taskloop_simd:
case OMPD_master_taskloop:
case OMPD_master_taskloop_simd:
+ case OMPD_masked_taskloop:
+ case OMPD_masked_taskloop_simd:
CaptureRegions.push_back(OMPD_taskloop);
break;
+ case OMPD_parallel_masked_taskloop:
+ case OMPD_parallel_masked_taskloop_simd:
case OMPD_parallel_master_taskloop:
case OMPD_parallel_master_taskloop_simd:
CaptureRegions.push_back(OMPD_parallel);
@@ -696,6 +728,12 @@ void clang::getOpenMPCaptureRegions(
CaptureRegions.push_back(OMPD_teams);
CaptureRegions.push_back(OMPD_parallel);
break;
+ case OMPD_teams_loop:
+ CaptureRegions.push_back(OMPD_teams);
+ break;
+ case OMPD_nothing:
+ CaptureRegions.push_back(OMPD_nothing);
+ break;
case OMPD_loop:
// TODO: 'loop' may require different capture regions depending on the bind
// clause or the parent directive when there is no bind clause. Use
diff --git a/contrib/llvm-project/clang/lib/Basic/SourceManager.cpp b/contrib/llvm-project/clang/lib/Basic/SourceManager.cpp
index ec3e35595bb7..98e731eb12e6 100644
--- a/contrib/llvm-project/clang/lib/Basic/SourceManager.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/SourceManager.cpp
@@ -629,23 +629,21 @@ FileID SourceManager::createFileIDImpl(ContentCache &File, StringRef Filename,
return LastFileIDLookup = FID;
}
-SourceLocation
-SourceManager::createMacroArgExpansionLoc(SourceLocation SpellingLoc,
- SourceLocation ExpansionLoc,
- unsigned TokLength) {
+SourceLocation SourceManager::createMacroArgExpansionLoc(
+ SourceLocation SpellingLoc, SourceLocation ExpansionLoc, unsigned Length) {
ExpansionInfo Info = ExpansionInfo::createForMacroArg(SpellingLoc,
ExpansionLoc);
- return createExpansionLocImpl(Info, TokLength);
+ return createExpansionLocImpl(Info, Length);
}
SourceLocation SourceManager::createExpansionLoc(
SourceLocation SpellingLoc, SourceLocation ExpansionLocStart,
- SourceLocation ExpansionLocEnd, unsigned TokLength,
+ SourceLocation ExpansionLocEnd, unsigned Length,
bool ExpansionIsTokenRange, int LoadedID,
SourceLocation::UIntTy LoadedOffset) {
ExpansionInfo Info = ExpansionInfo::create(
SpellingLoc, ExpansionLocStart, ExpansionLocEnd, ExpansionIsTokenRange);
- return createExpansionLocImpl(Info, TokLength, LoadedID, LoadedOffset);
+ return createExpansionLocImpl(Info, Length, LoadedID, LoadedOffset);
}
SourceLocation SourceManager::createTokenSplitLoc(SourceLocation Spelling,
@@ -660,7 +658,7 @@ SourceLocation SourceManager::createTokenSplitLoc(SourceLocation Spelling,
SourceLocation
SourceManager::createExpansionLocImpl(const ExpansionInfo &Info,
- unsigned TokLength, int LoadedID,
+ unsigned Length, int LoadedID,
SourceLocation::UIntTy LoadedOffset) {
if (LoadedID < 0) {
assert(LoadedID != -1 && "Loading sentinel FileID");
@@ -672,12 +670,12 @@ SourceManager::createExpansionLocImpl(const ExpansionInfo &Info,
return SourceLocation::getMacroLoc(LoadedOffset);
}
LocalSLocEntryTable.push_back(SLocEntry::get(NextLocalOffset, Info));
- assert(NextLocalOffset + TokLength + 1 > NextLocalOffset &&
- NextLocalOffset + TokLength + 1 <= CurrentLoadedOffset &&
+ assert(NextLocalOffset + Length + 1 > NextLocalOffset &&
+ NextLocalOffset + Length + 1 <= CurrentLoadedOffset &&
"Ran out of source locations!");
// See createFileID for that +1.
- NextLocalOffset += TokLength + 1;
- return SourceLocation::getMacroLoc(NextLocalOffset - (TokLength + 1));
+ NextLocalOffset += Length + 1;
+ return SourceLocation::getMacroLoc(NextLocalOffset - (Length + 1));
}
llvm::Optional<llvm::MemoryBufferRef>
diff --git a/contrib/llvm-project/clang/lib/Basic/TargetID.cpp b/contrib/llvm-project/clang/lib/Basic/TargetID.cpp
index 3b8f4c13b9bf..abfbe49e1a91 100644
--- a/contrib/llvm-project/clang/lib/Basic/TargetID.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/TargetID.cpp
@@ -109,8 +109,7 @@ parseTargetID(const llvm::Triple &T, llvm::StringRef TargetID,
if (!OptionalProcessor)
return llvm::None;
- llvm::StringRef Processor =
- getCanonicalProcessorName(T, OptionalProcessor.getValue());
+ llvm::StringRef Processor = getCanonicalProcessorName(T, *OptionalProcessor);
if (Processor.empty())
return llvm::None;
@@ -150,8 +149,7 @@ getConflictTargetIDCombination(const std::set<llvm::StringRef> &TargetIDs) {
llvm::StringMap<Info> FeatureMap;
for (auto &&ID : TargetIDs) {
llvm::StringMap<bool> Features;
- llvm::StringRef Proc =
- parseTargetIDWithFormatCheckingOnly(ID, &Features).getValue();
+ llvm::StringRef Proc = *parseTargetIDWithFormatCheckingOnly(ID, &Features);
auto Loc = FeatureMap.find(Proc);
if (Loc == FeatureMap.end())
FeatureMap[Proc] = Info{ID, Features};
diff --git a/contrib/llvm-project/clang/lib/Basic/TargetInfo.cpp b/contrib/llvm-project/clang/lib/Basic/TargetInfo.cpp
index 188ffb5f2f78..e22ed34e7da4 100644
--- a/contrib/llvm-project/clang/lib/Basic/TargetInfo.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/TargetInfo.cpp
@@ -131,7 +131,7 @@ TargetInfo::TargetInfo(const llvm::Triple &T) : Triple(T) {
ARMCDECoprocMask = 0;
// Default to no types using fpret.
- RealTypeUsesObjCFPRet = 0;
+ RealTypeUsesObjCFPRetMask = 0;
// Default to not using fp2ret for __Complex long double
ComplexLongDoubleUsesFP2Ret = false;
@@ -150,6 +150,9 @@ TargetInfo::TargetInfo(const llvm::Triple &T) : Triple(T) {
PlatformMinVersion = VersionTuple();
MaxOpenCLWorkGroupSize = 1024;
+
+ MaxBitIntWidth.reset();
+
ProgramAddrSpace = 0;
}
@@ -284,6 +287,8 @@ TargetInfo::IntType TargetInfo::getLeastIntTypeByWidth(unsigned BitWidth,
FloatModeKind TargetInfo::getRealTypeByWidth(unsigned BitWidth,
FloatModeKind ExplicitType) const {
+ if (getHalfWidth() == BitWidth)
+ return FloatModeKind::Half;
if (getFloatWidth() == BitWidth)
return FloatModeKind::Float;
if (getDoubleWidth() == BitWidth)
@@ -449,6 +454,20 @@ void TargetInfo::adjust(DiagnosticsEngine &Diags, LangOptions &Opts) {
} else if (Opts.LongDoubleSize == 128) {
LongDoubleWidth = LongDoubleAlign = 128;
LongDoubleFormat = &llvm::APFloat::IEEEquad();
+ } else if (Opts.LongDoubleSize == 80) {
+ LongDoubleFormat = &llvm::APFloat::x87DoubleExtended();
+ if (getTriple().isWindowsMSVCEnvironment()) {
+ LongDoubleWidth = 128;
+ LongDoubleAlign = 128;
+ } else { // Linux
+ if (getTriple().getArch() == llvm::Triple::x86) {
+ LongDoubleWidth = 96;
+ LongDoubleAlign = 32;
+ } else {
+ LongDoubleWidth = 128;
+ LongDoubleAlign = 128;
+ }
+ }
}
}
@@ -464,6 +483,9 @@ void TargetInfo::adjust(DiagnosticsEngine &Diags, LangOptions &Opts) {
Diags.Report(diag::err_opt_not_valid_on_target) << "-fprotect-parens";
Opts.ProtectParens = false;
}
+
+ if (Opts.MaxBitIntWidth)
+ MaxBitIntWidth = Opts.MaxBitIntWidth;
}
bool TargetInfo::initFeatureMap(
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets.cpp b/contrib/llvm-project/clang/lib/Basic/Targets.cpp
index 994a491cddf2..2d6ef998485a 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets.cpp
@@ -19,6 +19,8 @@
#include "Targets/ARM.h"
#include "Targets/AVR.h"
#include "Targets/BPF.h"
+#include "Targets/CSKY.h"
+#include "Targets/DirectX.h"
#include "Targets/Hexagon.h"
#include "Targets/Lanai.h"
#include "Targets/Le64.h"
@@ -590,6 +592,8 @@ TargetInfo *AllocateTarget(const llvm::Triple &Triple,
return new NaClTargetInfo<X86_64TargetInfo>(Triple, Opts);
case llvm::Triple::PS4:
return new PS4OSTargetInfo<X86_64TargetInfo>(Triple, Opts);
+ case llvm::Triple::PS5:
+ return new PS5OSTargetInfo<X86_64TargetInfo>(Triple, Opts);
default:
return new X86_64TargetInfo(Triple, Opts);
}
@@ -649,6 +653,8 @@ TargetInfo *AllocateTarget(const llvm::Triple &Triple,
return nullptr;
}
+ case llvm::Triple::dxil:
+ return new DirectXTargetInfo(Triple,Opts);
case llvm::Triple::renderscript32:
return new LinuxTargetInfo<RenderScript32TargetInfo>(Triple, Opts);
case llvm::Triple::renderscript64:
@@ -656,6 +662,14 @@ TargetInfo *AllocateTarget(const llvm::Triple &Triple,
case llvm::Triple::ve:
return new LinuxTargetInfo<VETargetInfo>(Triple, Opts);
+
+ case llvm::Triple::csky:
+ switch (os) {
+ case llvm::Triple::Linux:
+ return new LinuxTargetInfo<CSKYTargetInfo>(Triple, Opts);
+ default:
+ return new CSKYTargetInfo(Triple, Opts);
+ }
}
}
} // namespace targets
@@ -731,6 +745,10 @@ TargetInfo::CreateTargetInfo(DiagnosticsEngine &Diags,
Target->setCommandLineOpenCLOpts();
Target->setMaxAtomicWidth();
+ if (!Opts->DarwinTargetVariantTriple.empty())
+ Target->DarwinTargetVariantTriple =
+ llvm::Triple(Opts->DarwinTargetVariantTriple);
+
if (!Target->validateTarget(Diags))
return nullptr;
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp
index 34bdb58dffc1..60ef52ac3f0d 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.cpp
@@ -435,6 +435,9 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
if (HasRandGen)
Builder.defineMacro("__ARM_FEATURE_RNG", "1");
+ if (HasMOPS)
+ Builder.defineMacro("__ARM_FEATURE_MOPS", "1");
+
switch (ArchKind) {
default:
break;
@@ -482,6 +485,10 @@ void AArch64TargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4");
Builder.defineMacro("__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8");
+ // Allow detection of fast FMA support.
+ Builder.defineMacro("__FP_FAST_FMA", "1");
+ Builder.defineMacro("__FP_FAST_FMAF", "1");
+
if (Opts.VScaleMin && Opts.VScaleMin == Opts.VScaleMax) {
Builder.defineMacro("__ARM_FEATURE_SVE_BITS", Twine(Opts.VScaleMin * 128));
Builder.defineMacro("__ARM_FEATURE_SVE_VECTOR_OPERATORS");
@@ -506,21 +513,18 @@ AArch64TargetInfo::getVScaleRange(const LangOptions &LangOpts) const {
}
bool AArch64TargetInfo::hasFeature(StringRef Feature) const {
- return Feature == "aarch64" || Feature == "arm64" || Feature == "arm" ||
- (Feature == "neon" && (FPU & NeonMode)) ||
- ((Feature == "sve" || Feature == "sve2" || Feature == "sve2-bitperm" ||
- Feature == "sve2-aes" || Feature == "sve2-sha3" ||
- Feature == "sve2-sm4" || Feature == "f64mm" || Feature == "f32mm" ||
- Feature == "i8mm" || Feature == "bf16") &&
- (FPU & SveMode)) ||
- (Feature == "ls64" && HasLS64);
+ return llvm::StringSwitch<bool>(Feature)
+ .Cases("aarch64", "arm64", "arm", true)
+ .Case("neon", FPU & NeonMode)
+ .Cases("sve", "sve2", "sve2-bitperm", "sve2-aes", "sve2-sha3", "sve2-sm4", "f64mm", "f32mm", "i8mm", "bf16", FPU & SveMode)
+ .Case("ls64", HasLS64)
+ .Default(false);
}
bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
DiagnosticsEngine &Diags) {
FPU = FPUMode;
HasCRC = false;
- HasCrypto = false;
HasAES = false;
HasSHA2 = false;
HasSHA3 = false;
@@ -543,7 +547,6 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasMatmulFP64 = false;
HasMatmulFP32 = false;
HasLSE = false;
- HasHBC = false;
HasMOPS = false;
ArchKind = llvm::AArch64::ArchKind::INVALID;
@@ -594,8 +597,6 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
}
if (Feature == "+crc")
HasCRC = true;
- if (Feature == "+crypto")
- HasCrypto = true;
if (Feature == "+aes")
HasAES = true;
if (Feature == "+sha2")
@@ -660,8 +661,8 @@ bool AArch64TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasRandGen = true;
if (Feature == "+flagm")
HasFlagM = true;
- if (Feature == "+hbc")
- HasHBC = true;
+ if (Feature == "+mops")
+ HasMOPS = true;
}
setDataLayout();
@@ -679,6 +680,7 @@ AArch64TargetInfo::checkCallingConvention(CallingConv CC) const {
case CC_PreserveAll:
case CC_OpenCLKernel:
case CC_AArch64VectorCall:
+ case CC_AArch64SVEPCS:
case CC_Win64:
return CCCR_OK;
default:
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.h b/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.h
index 9e22aeaff251..bd6812d1257c 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/AArch64.h
@@ -30,7 +30,6 @@ class LLVM_LIBRARY_VISIBILITY AArch64TargetInfo : public TargetInfo {
unsigned FPU;
bool HasCRC;
- bool HasCrypto;
bool HasAES;
bool HasSHA2;
bool HasSHA3;
@@ -54,7 +53,6 @@ class LLVM_LIBRARY_VISIBILITY AArch64TargetInfo : public TargetInfo {
bool HasMatmulFP32;
bool HasLSE;
bool HasFlagM;
- bool HasHBC;
bool HasMOPS;
llvm::AArch64::ArchKind ArchKind;
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.cpp
index ba7ffa34c73e..50256d8e210c 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.cpp
@@ -183,6 +183,27 @@ bool AMDGPUTargetInfo::initFeatureMap(
// XXX - What does the member GPU mean if device name string passed here?
if (isAMDGCN(getTriple())) {
switch (llvm::AMDGPU::parseArchAMDGCN(CPU)) {
+ case GK_GFX1103:
+ case GK_GFX1102:
+ case GK_GFX1101:
+ case GK_GFX1100:
+ Features["ci-insts"] = true;
+ Features["dot1-insts"] = true;
+ Features["dot5-insts"] = true;
+ Features["dot6-insts"] = true;
+ Features["dot7-insts"] = true;
+ Features["dot8-insts"] = true;
+ Features["dl-insts"] = true;
+ Features["flat-address-space"] = true;
+ Features["16-bit-insts"] = true;
+ Features["dpp"] = true;
+ Features["gfx8-insts"] = true;
+ Features["gfx9-insts"] = true;
+ Features["gfx10-insts"] = true;
+ Features["gfx10-3-insts"] = true;
+ Features["gfx11-insts"] = true;
+ break;
+ case GK_GFX1036:
case GK_GFX1035:
case GK_GFX1034:
case GK_GFX1033:
@@ -227,6 +248,9 @@ bool AMDGPUTargetInfo::initFeatureMap(
Features["s-memrealtime"] = true;
Features["s-memtime-inst"] = true;
break;
+ case GK_GFX940:
+ Features["gfx940-insts"] = true;
+ LLVM_FALLTHROUGH;
case GK_GFX90A:
Features["gfx90a-insts"] = true;
LLVM_FALLTHROUGH;
@@ -384,12 +408,17 @@ void AMDGPUTargetInfo::getTargetDefines(const LangOptions &Opts,
StringRef CanonName = isAMDGCN(getTriple()) ?
getArchNameAMDGCN(GPUKind) : getArchNameR600(GPUKind);
Builder.defineMacro(Twine("__") + Twine(CanonName) + Twine("__"));
+ // Emit macros for gfx family e.g. gfx906 -> __GFX9__, gfx1030 -> __GFX10___
+ if (isAMDGCN(getTriple())) {
+ assert(CanonName.startswith("gfx") && "Invalid amdgcn canonical name");
+ Builder.defineMacro(Twine("__") + Twine(CanonName.drop_back(2).upper()) +
+ Twine("__"));
+ }
if (isAMDGCN(getTriple())) {
Builder.defineMacro("__amdgcn_processor__",
Twine("\"") + Twine(CanonName) + Twine("\""));
Builder.defineMacro("__amdgcn_target_id__",
- Twine("\"") + Twine(getTargetID().getValue()) +
- Twine("\""));
+ Twine("\"") + Twine(*getTargetID()) + Twine("\""));
for (auto F : getAllPossibleTargetIDFeatures(getTriple(), CanonName)) {
auto Loc = OffloadArchFeatures.find(F);
if (Loc != OffloadArchFeatures.end()) {
@@ -403,6 +432,9 @@ void AMDGPUTargetInfo::getTargetDefines(const LangOptions &Opts,
}
}
+ if (AllowAMDGPUUnsafeFPAtomics)
+ Builder.defineMacro("__AMDGCN_UNSAFE_FP_ATOMICS__");
+
// TODO: __HAS_FMAF__, __HAS_LDEXPF__, __HAS_FP64__ are deprecated and will be
// removed in the near future.
if (hasFMAF())
@@ -429,9 +461,13 @@ void AMDGPUTargetInfo::setAuxTarget(const TargetInfo *Aux) {
// supported by AMDGPU. Therefore keep its own format for these two types.
auto SaveLongDoubleFormat = LongDoubleFormat;
auto SaveFloat128Format = Float128Format;
+ auto SaveLongDoubleWidth = LongDoubleWidth;
+ auto SaveLongDoubleAlign = LongDoubleAlign;
copyAuxTarget(Aux);
LongDoubleFormat = SaveLongDoubleFormat;
Float128Format = SaveFloat128Format;
+ LongDoubleWidth = SaveLongDoubleWidth;
+ LongDoubleAlign = SaveLongDoubleAlign;
// For certain builtin types support on the host target, claim they are
// support to pass the compilation of the host code during the device-side
// compilation.
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.h b/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.h
index 974922191488..5e73a3cb8019 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/AMDGPU.h
@@ -411,6 +411,7 @@ public:
return CCCR_Warning;
case CC_C:
case CC_OpenCLKernel:
+ case CC_AMDGPUKernelCall:
return CCCR_OK;
}
}
@@ -434,17 +435,17 @@ public:
DiagnosticsEngine &Diags) override {
auto TargetIDFeatures =
getAllPossibleTargetIDFeatures(getTriple(), getArchNameAMDGCN(GPUKind));
- llvm::for_each(Features, [&](const auto &F) {
+ for (const auto &F : Features) {
assert(F.front() == '+' || F.front() == '-');
if (F == "+wavefrontsize64")
WavefrontSize = 64;
bool IsOn = F.front() == '+';
StringRef Name = StringRef(F).drop_front();
if (!llvm::is_contained(TargetIDFeatures, Name))
- return;
+ continue;
assert(OffloadArchFeatures.find(Name) == OffloadArchFeatures.end());
OffloadArchFeatures[Name] = IsOn;
- });
+ }
return true;
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp
index 9c9d198e8f32..b2f61cff81c9 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/ARM.cpp
@@ -955,6 +955,7 @@ void ARMTargetInfo::getTargetDefines(const LangOptions &Opts,
case llvm::ARM::ArchKind::ARMV8_4A:
case llvm::ARM::ArchKind::ARMV8_5A:
case llvm::ARM::ArchKind::ARMV8_6A:
+ case llvm::ARM::ArchKind::ARMV8_7A:
case llvm::ARM::ArchKind::ARMV8_8A:
case llvm::ARM::ArchKind::ARMV9A:
case llvm::ARM::ArchKind::ARMV9_1A:
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/AVR.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/AVR.cpp
index 93ed0671119f..67e27ebd58de 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/AVR.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/AVR.cpp
@@ -28,246 +28,261 @@ struct LLVM_LIBRARY_VISIBILITY MCUInfo {
bool IsTiny; // Set to true for the devices belong to the avrtiny family.
};
-// This list should be kept up-to-date with AVRDevices.td in LLVM.
+// NOTE: This list has been synchronized with gcc-avr 5.4.0 and avr-libc 2.0.0.
static MCUInfo AVRMcus[] = {
- {"at90s1200", "__AVR_AT90S1200__", 0},
- {"attiny11", "__AVR_ATtiny11__", 0},
- {"attiny12", "__AVR_ATtiny12__", 0},
- {"attiny15", "__AVR_ATtiny15__", 0},
- {"attiny28", "__AVR_ATtiny28__", 0},
- {"at90s2313", "__AVR_AT90S2313__", 1},
- {"at90s2323", "__AVR_AT90S2323__", 1},
- {"at90s2333", "__AVR_AT90S2333__", 1},
- {"at90s2343", "__AVR_AT90S2343__", 1},
- {"attiny22", "__AVR_ATtiny22__", 1},
- {"attiny26", "__AVR_ATtiny26__", 1},
- {"at86rf401", "__AVR_AT86RF401__", 1},
- {"at90s4414", "__AVR_AT90S4414__", 1},
- {"at90s4433", "__AVR_AT90S4433__", 1},
- {"at90s4434", "__AVR_AT90S4434__", 1},
- {"at90s8515", "__AVR_AT90S8515__", 1},
- {"at90c8534", "__AVR_AT90c8534__", 1},
- {"at90s8535", "__AVR_AT90S8535__", 1},
- {"ata5272", "__AVR_ATA5272__", 1},
- {"attiny13", "__AVR_ATtiny13__", 1},
- {"attiny13a", "__AVR_ATtiny13A__", 1},
- {"attiny2313", "__AVR_ATtiny2313__", 1},
- {"attiny2313a", "__AVR_ATtiny2313A__", 1},
- {"attiny24", "__AVR_ATtiny24__", 1},
- {"attiny24a", "__AVR_ATtiny24A__", 1},
- {"attiny4313", "__AVR_ATtiny4313__", 1},
- {"attiny44", "__AVR_ATtiny44__", 1},
- {"attiny44a", "__AVR_ATtiny44A__", 1},
- {"attiny84", "__AVR_ATtiny84__", 1},
- {"attiny84a", "__AVR_ATtiny84A__", 1},
- {"attiny25", "__AVR_ATtiny25__", 1},
- {"attiny45", "__AVR_ATtiny45__", 1},
- {"attiny85", "__AVR_ATtiny85__", 1},
- {"attiny261", "__AVR_ATtiny261__", 1},
- {"attiny261a", "__AVR_ATtiny261A__", 1},
- {"attiny441", "__AVR_ATtiny441__", 1},
- {"attiny461", "__AVR_ATtiny461__", 1},
- {"attiny461a", "__AVR_ATtiny461A__", 1},
- {"attiny841", "__AVR_ATtiny841__", 1},
- {"attiny861", "__AVR_ATtiny861__", 1},
- {"attiny861a", "__AVR_ATtiny861A__", 1},
- {"attiny87", "__AVR_ATtiny87__", 1},
- {"attiny43u", "__AVR_ATtiny43U__", 1},
- {"attiny48", "__AVR_ATtiny48__", 1},
- {"attiny88", "__AVR_ATtiny88__", 1},
- {"attiny828", "__AVR_ATtiny828__", 1},
- {"at43usb355", "__AVR_AT43USB355__", 1},
- {"at76c711", "__AVR_AT76C711__", 1},
- {"atmega103", "__AVR_ATmega103__", 1},
- {"at43usb320", "__AVR_AT43USB320__", 1},
- {"attiny167", "__AVR_ATtiny167__", 1},
- {"at90usb82", "__AVR_AT90USB82__", 1},
- {"at90usb162", "__AVR_AT90USB162__", 1},
- {"ata5505", "__AVR_ATA5505__", 1},
- {"atmega8u2", "__AVR_ATmega8U2__", 1},
- {"atmega16u2", "__AVR_ATmega16U2__", 1},
- {"atmega32u2", "__AVR_ATmega32U2__", 1},
- {"attiny1634", "__AVR_ATtiny1634__", 1},
- {"atmega8", "__AVR_ATmega8__", 1},
- {"ata6289", "__AVR_ATA6289__", 1},
- {"atmega8a", "__AVR_ATmega8A__", 1},
- {"ata6285", "__AVR_ATA6285__", 1},
- {"ata6286", "__AVR_ATA6286__", 1},
- {"atmega48", "__AVR_ATmega48__", 1},
- {"atmega48a", "__AVR_ATmega48A__", 1},
- {"atmega48pa", "__AVR_ATmega48PA__", 1},
- {"atmega48pb", "__AVR_ATmega48PB__", 1},
- {"atmega48p", "__AVR_ATmega48P__", 1},
- {"atmega88", "__AVR_ATmega88__", 1},
- {"atmega88a", "__AVR_ATmega88A__", 1},
- {"atmega88p", "__AVR_ATmega88P__", 1},
- {"atmega88pa", "__AVR_ATmega88PA__", 1},
- {"atmega88pb", "__AVR_ATmega88PB__", 1},
- {"atmega8515", "__AVR_ATmega8515__", 1},
- {"atmega8535", "__AVR_ATmega8535__", 1},
- {"atmega8hva", "__AVR_ATmega8HVA__", 1},
- {"at90pwm1", "__AVR_AT90PWM1__", 1},
- {"at90pwm2", "__AVR_AT90PWM2__", 1},
- {"at90pwm2b", "__AVR_AT90PWM2B__", 1},
- {"at90pwm3", "__AVR_AT90PWM3__", 1},
- {"at90pwm3b", "__AVR_AT90PWM3B__", 1},
- {"at90pwm81", "__AVR_AT90PWM81__", 1},
- {"ata5790", "__AVR_ATA5790__", 1},
- {"ata5795", "__AVR_ATA5795__", 1},
- {"atmega16", "__AVR_ATmega16__", 1},
- {"atmega16a", "__AVR_ATmega16A__", 1},
- {"atmega161", "__AVR_ATmega161__", 1},
- {"atmega162", "__AVR_ATmega162__", 1},
- {"atmega163", "__AVR_ATmega163__", 1},
- {"atmega164a", "__AVR_ATmega164A__", 1},
- {"atmega164p", "__AVR_ATmega164P__", 1},
- {"atmega164pa", "__AVR_ATmega164PA__", 1},
- {"atmega165", "__AVR_ATmega165__", 1},
- {"atmega165a", "__AVR_ATmega165A__", 1},
- {"atmega165p", "__AVR_ATmega165P__", 1},
- {"atmega165pa", "__AVR_ATmega165PA__", 1},
- {"atmega168", "__AVR_ATmega168__", 1},
- {"atmega168a", "__AVR_ATmega168A__", 1},
- {"atmega168p", "__AVR_ATmega168P__", 1},
- {"atmega168pa", "__AVR_ATmega168PA__", 1},
- {"atmega168pb", "__AVR_ATmega168PB__", 1},
- {"atmega169", "__AVR_ATmega169__", 1},
- {"atmega169a", "__AVR_ATmega169A__", 1},
- {"atmega169p", "__AVR_ATmega169P__", 1},
- {"atmega169pa", "__AVR_ATmega169PA__", 1},
- {"atmega32", "__AVR_ATmega32__", 1},
- {"atmega32a", "__AVR_ATmega32A__", 1},
- {"atmega323", "__AVR_ATmega323__", 1},
- {"atmega324a", "__AVR_ATmega324A__", 1},
- {"atmega324p", "__AVR_ATmega324P__", 1},
- {"atmega324pa", "__AVR_ATmega324PA__", 1},
- {"atmega324pb", "__AVR_ATmega324PB__", 1},
- {"atmega325", "__AVR_ATmega325__", 1},
- {"atmega325a", "__AVR_ATmega325A__", 1},
- {"atmega325p", "__AVR_ATmega325P__", 1},
- {"atmega325pa", "__AVR_ATmega325PA__", 1},
- {"atmega3250", "__AVR_ATmega3250__", 1},
- {"atmega3250a", "__AVR_ATmega3250A__", 1},
- {"atmega3250p", "__AVR_ATmega3250P__", 1},
- {"atmega3250pa", "__AVR_ATmega3250PA__", 1},
- {"atmega328", "__AVR_ATmega328__", 1},
- {"atmega328p", "__AVR_ATmega328P__", 1},
- {"atmega328pb", "__AVR_ATmega328PB__", 1},
- {"atmega329", "__AVR_ATmega329__", 1},
- {"atmega329a", "__AVR_ATmega329A__", 1},
- {"atmega329p", "__AVR_ATmega329P__", 1},
- {"atmega329pa", "__AVR_ATmega329PA__", 1},
- {"atmega3290", "__AVR_ATmega3290__", 1},
- {"atmega3290a", "__AVR_ATmega3290A__", 1},
- {"atmega3290p", "__AVR_ATmega3290P__", 1},
- {"atmega3290pa", "__AVR_ATmega3290PA__", 1},
- {"atmega406", "__AVR_ATmega406__", 1},
- {"atmega64", "__AVR_ATmega64__", 1},
- {"atmega64a", "__AVR_ATmega64A__", 1},
- {"atmega640", "__AVR_ATmega640__", 1},
- {"atmega644", "__AVR_ATmega644__", 1},
- {"atmega644a", "__AVR_ATmega644A__", 1},
- {"atmega644p", "__AVR_ATmega644P__", 1},
- {"atmega644pa", "__AVR_ATmega644PA__", 1},
- {"atmega645", "__AVR_ATmega645__", 1},
- {"atmega645a", "__AVR_ATmega645A__", 1},
- {"atmega645p", "__AVR_ATmega645P__", 1},
- {"atmega649", "__AVR_ATmega649__", 1},
- {"atmega649a", "__AVR_ATmega649A__", 1},
- {"atmega649p", "__AVR_ATmega649P__", 1},
- {"atmega6450", "__AVR_ATmega6450__", 1},
- {"atmega6450a", "__AVR_ATmega6450A__", 1},
- {"atmega6450p", "__AVR_ATmega6450P__", 1},
- {"atmega6490", "__AVR_ATmega6490__", 1},
- {"atmega6490a", "__AVR_ATmega6490A__", 1},
- {"atmega6490p", "__AVR_ATmega6490P__", 1},
- {"atmega64rfr2", "__AVR_ATmega64RFR2__", 1},
- {"atmega644rfr2", "__AVR_ATmega644RFR2__", 1},
- {"atmega16hva", "__AVR_ATmega16HVA__", 1},
- {"atmega16hva2", "__AVR_ATmega16HVA2__", 1},
- {"atmega16hvb", "__AVR_ATmega16HVB__", 1},
- {"atmega16hvbrevb", "__AVR_ATmega16HVBREVB__", 1},
- {"atmega32hvb", "__AVR_ATmega32HVB__", 1},
- {"atmega32hvbrevb", "__AVR_ATmega32HVBREVB__", 1},
- {"atmega64hve", "__AVR_ATmega64HVE__", 1},
- {"at90can32", "__AVR_AT90CAN32__", 1},
- {"at90can64", "__AVR_AT90CAN64__", 1},
- {"at90pwm161", "__AVR_AT90PWM161__", 1},
- {"at90pwm216", "__AVR_AT90PWM216__", 1},
- {"at90pwm316", "__AVR_AT90PWM316__", 1},
- {"atmega32c1", "__AVR_ATmega32C1__", 1},
- {"atmega64c1", "__AVR_ATmega64C1__", 1},
- {"atmega16m1", "__AVR_ATmega16M1__", 1},
- {"atmega32m1", "__AVR_ATmega32M1__", 1},
- {"atmega64m1", "__AVR_ATmega64M1__", 1},
- {"atmega16u4", "__AVR_ATmega16U4__", 1},
- {"atmega32u4", "__AVR_ATmega32U4__", 1},
- {"atmega32u6", "__AVR_ATmega32U6__", 1},
- {"at90usb646", "__AVR_AT90USB646__", 1},
- {"at90usb647", "__AVR_AT90USB647__", 1},
- {"at90scr100", "__AVR_AT90SCR100__", 1},
- {"at94k", "__AVR_AT94K__", 1},
- {"m3000", "__AVR_AT000__", 1},
- {"atmega128", "__AVR_ATmega128__", 2},
- {"atmega128a", "__AVR_ATmega128A__", 2},
- {"atmega1280", "__AVR_ATmega1280__", 2},
- {"atmega1281", "__AVR_ATmega1281__", 2},
- {"atmega1284", "__AVR_ATmega1284__", 2},
- {"atmega1284p", "__AVR_ATmega1284P__", 2},
- {"atmega128rfa1", "__AVR_ATmega128RFA1__", 2},
- {"atmega128rfr2", "__AVR_ATmega128RFR2__", 2},
- {"atmega1284rfr2", "__AVR_ATmega1284RFR2__", 2},
- {"at90can128", "__AVR_AT90CAN128__", 2},
- {"at90usb1286", "__AVR_AT90USB1286__", 2},
- {"at90usb1287", "__AVR_AT90USB1287__", 2},
- {"atmega2560", "__AVR_ATmega2560__", 4},
- {"atmega2561", "__AVR_ATmega2561__", 4},
- {"atmega256rfr2", "__AVR_ATmega256RFR2__", 4},
- {"atmega2564rfr2", "__AVR_ATmega2564RFR2__", 4},
- {"atxmega16a4", "__AVR_ATxmega16A4__", 1},
- {"atxmega16a4u", "__AVR_ATxmega16A4U__", 1},
- {"atxmega16c4", "__AVR_ATxmega16C4__", 1},
- {"atxmega16d4", "__AVR_ATxmega16D4__", 1},
- {"atxmega32a4", "__AVR_ATxmega32A4__", 1},
- {"atxmega32a4u", "__AVR_ATxmega32A4U__", 1},
- {"atxmega32c4", "__AVR_ATxmega32C4__", 1},
- {"atxmega32d4", "__AVR_ATxmega32D4__", 1},
- {"atxmega32e5", "__AVR_ATxmega32E5__", 1},
- {"atxmega16e5", "__AVR_ATxmega16E5__", 1},
- {"atxmega8e5", "__AVR_ATxmega8E5__", 1},
- {"atxmega32x1", "__AVR_ATxmega32X1__", 1},
- {"atxmega64a3", "__AVR_ATxmega64A3__", 1},
- {"atxmega64a3u", "__AVR_ATxmega64A3U__", 1},
- {"atxmega64a4u", "__AVR_ATxmega64A4U__", 1},
- {"atxmega64b1", "__AVR_ATxmega64B1__", 1},
- {"atxmega64b3", "__AVR_ATxmega64B3__", 1},
- {"atxmega64c3", "__AVR_ATxmega64C3__", 1},
- {"atxmega64d3", "__AVR_ATxmega64D3__", 1},
- {"atxmega64d4", "__AVR_ATxmega64D4__", 1},
- {"atxmega64a1", "__AVR_ATxmega64A1__", 1},
- {"atxmega64a1u", "__AVR_ATxmega64A1U__", 1},
- {"atxmega128a3", "__AVR_ATxmega128A3__", 2},
- {"atxmega128a3u", "__AVR_ATxmega128A3U__", 2},
- {"atxmega128b1", "__AVR_ATxmega128B1__", 2},
- {"atxmega128b3", "__AVR_ATxmega128B3__", 2},
- {"atxmega128c3", "__AVR_ATxmega128C3__", 2},
- {"atxmega128d3", "__AVR_ATxmega128D3__", 2},
- {"atxmega128d4", "__AVR_ATxmega128D4__", 2},
- {"atxmega192a3", "__AVR_ATxmega192A3__", 3},
- {"atxmega192a3u", "__AVR_ATxmega192A3U__", 3},
- {"atxmega192c3", "__AVR_ATxmega192C3__", 3},
- {"atxmega192d3", "__AVR_ATxmega192D3__", 3},
- {"atxmega256a3", "__AVR_ATxmega256A3__", 4},
- {"atxmega256a3u", "__AVR_ATxmega256A3U__", 4},
- {"atxmega256a3b", "__AVR_ATxmega256A3B__", 4},
- {"atxmega256a3bu", "__AVR_ATxmega256A3BU__", 4},
- {"atxmega256c3", "__AVR_ATxmega256C3__", 4},
- {"atxmega256d3", "__AVR_ATxmega256D3__", 4},
- {"atxmega384c3", "__AVR_ATxmega384C3__", 6},
- {"atxmega384d3", "__AVR_ATxmega384D3__", 6},
- {"atxmega128a1", "__AVR_ATxmega128A1__", 2},
- {"atxmega128a1u", "__AVR_ATxmega128A1U__", 2},
- {"atxmega128a4u", "__AVR_ATxmega128A4U__", 2},
+ {"at90s1200", "__AVR_AT90S1200__", 0, false},
+ {"attiny11", "__AVR_ATtiny11__", 0, false},
+ {"attiny12", "__AVR_ATtiny12__", 0, false},
+ {"attiny15", "__AVR_ATtiny15__", 0, false},
+ {"attiny28", "__AVR_ATtiny28__", 0, false},
+ {"at90s2313", "__AVR_AT90S2313__", 1, false},
+ {"at90s2323", "__AVR_AT90S2323__", 1, false},
+ {"at90s2333", "__AVR_AT90S2333__", 1, false},
+ {"at90s2343", "__AVR_AT90S2343__", 1, false},
+ {"attiny22", "__AVR_ATtiny22__", 1, false},
+ {"attiny26", "__AVR_ATtiny26__", 1, false},
+ {"at86rf401", "__AVR_AT86RF401__", 1, false},
+ {"at90s4414", "__AVR_AT90S4414__", 1, false},
+ {"at90s4433", "__AVR_AT90S4433__", 1, false},
+ {"at90s4434", "__AVR_AT90S4434__", 1, false},
+ {"at90s8515", "__AVR_AT90S8515__", 1, false},
+ {"at90c8534", "__AVR_AT90c8534__", 1, false},
+ {"at90s8535", "__AVR_AT90S8535__", 1, false},
+ {"ata5272", "__AVR_ATA5272__", 1, false},
+ {"ata6616c", "__AVR_ATA6616c__", 1, false},
+ {"attiny13", "__AVR_ATtiny13__", 1, false},
+ {"attiny13a", "__AVR_ATtiny13A__", 1, false},
+ {"attiny2313", "__AVR_ATtiny2313__", 1, false},
+ {"attiny2313a", "__AVR_ATtiny2313A__", 1, false},
+ {"attiny24", "__AVR_ATtiny24__", 1, false},
+ {"attiny24a", "__AVR_ATtiny24A__", 1, false},
+ {"attiny4313", "__AVR_ATtiny4313__", 1, false},
+ {"attiny44", "__AVR_ATtiny44__", 1, false},
+ {"attiny44a", "__AVR_ATtiny44A__", 1, false},
+ {"attiny84", "__AVR_ATtiny84__", 1, false},
+ {"attiny84a", "__AVR_ATtiny84A__", 1, false},
+ {"attiny25", "__AVR_ATtiny25__", 1, false},
+ {"attiny45", "__AVR_ATtiny45__", 1, false},
+ {"attiny85", "__AVR_ATtiny85__", 1, false},
+ {"attiny261", "__AVR_ATtiny261__", 1, false},
+ {"attiny261a", "__AVR_ATtiny261A__", 1, false},
+ {"attiny441", "__AVR_ATtiny441__", 1, false},
+ {"attiny461", "__AVR_ATtiny461__", 1, false},
+ {"attiny461a", "__AVR_ATtiny461A__", 1, false},
+ {"attiny841", "__AVR_ATtiny841__", 1, false},
+ {"attiny861", "__AVR_ATtiny861__", 1, false},
+ {"attiny861a", "__AVR_ATtiny861A__", 1, false},
+ {"attiny87", "__AVR_ATtiny87__", 1, false},
+ {"attiny43u", "__AVR_ATtiny43U__", 1, false},
+ {"attiny48", "__AVR_ATtiny48__", 1, false},
+ {"attiny88", "__AVR_ATtiny88__", 1, false},
+ {"attiny828", "__AVR_ATtiny828__", 1, false},
+ {"at43usb355", "__AVR_AT43USB355__", 1, false},
+ {"at76c711", "__AVR_AT76C711__", 1, false},
+ {"atmega103", "__AVR_ATmega103__", 1, false},
+ {"at43usb320", "__AVR_AT43USB320__", 1, false},
+ {"attiny167", "__AVR_ATtiny167__", 1, false},
+ {"at90usb82", "__AVR_AT90USB82__", 1, false},
+ {"at90usb162", "__AVR_AT90USB162__", 1, false},
+ {"ata5505", "__AVR_ATA5505__", 1, false},
+ {"ata6617c", "__AVR_ATA6617C__", 1, false},
+ {"ata664251", "__AVR_ATA664251__", 1, false},
+ {"atmega8u2", "__AVR_ATmega8U2__", 1, false},
+ {"atmega16u2", "__AVR_ATmega16U2__", 1, false},
+ {"atmega32u2", "__AVR_ATmega32U2__", 1, false},
+ {"attiny1634", "__AVR_ATtiny1634__", 1, false},
+ {"atmega8", "__AVR_ATmega8__", 1, false},
+ {"ata6289", "__AVR_ATA6289__", 1, false},
+ {"atmega8a", "__AVR_ATmega8A__", 1, false},
+ {"ata6285", "__AVR_ATA6285__", 1, false},
+ {"ata6286", "__AVR_ATA6286__", 1, false},
+ {"ata6612c", "__AVR_ATA6612C__", 1, false},
+ {"atmega48", "__AVR_ATmega48__", 1, false},
+ {"atmega48a", "__AVR_ATmega48A__", 1, false},
+ {"atmega48pa", "__AVR_ATmega48PA__", 1, false},
+ {"atmega48pb", "__AVR_ATmega48PB__", 1, false},
+ {"atmega48p", "__AVR_ATmega48P__", 1, false},
+ {"atmega88", "__AVR_ATmega88__", 1, false},
+ {"atmega88a", "__AVR_ATmega88A__", 1, false},
+ {"atmega88p", "__AVR_ATmega88P__", 1, false},
+ {"atmega88pa", "__AVR_ATmega88PA__", 1, false},
+ {"atmega88pb", "__AVR_ATmega88PB__", 1, false},
+ {"atmega8515", "__AVR_ATmega8515__", 1, false},
+ {"atmega8535", "__AVR_ATmega8535__", 1, false},
+ {"atmega8hva", "__AVR_ATmega8HVA__", 1, false},
+ {"at90pwm1", "__AVR_AT90PWM1__", 1, false},
+ {"at90pwm2", "__AVR_AT90PWM2__", 1, false},
+ {"at90pwm2b", "__AVR_AT90PWM2B__", 1, false},
+ {"at90pwm3", "__AVR_AT90PWM3__", 1, false},
+ {"at90pwm3b", "__AVR_AT90PWM3B__", 1, false},
+ {"at90pwm81", "__AVR_AT90PWM81__", 1, false},
+ {"ata5702m322", "__AVR_ATA5702M322__", 1, false},
+ {"ata5782", "__AVR_ATA5782__", 1, false},
+ {"ata5790", "__AVR_ATA5790__", 1, false},
+ {"ata5790n", "__AVR_ATA5790N__", 1, false},
+ {"ata5791", "__AVR_ATA5791__", 1, false},
+ {"ata5795", "__AVR_ATA5795__", 1, false},
+ {"ata5831", "__AVR_ATA5831__", 1, false},
+ {"ata6613c", "__AVR_ATA6613C__", 1, false},
+ {"ata6614q", "__AVR_ATA6614Q__", 1, false},
+ {"ata8210", "__AVR_ATA8210__", 1, false},
+ {"ata8510", "__AVR_ATA8510__", 1, false},
+ {"atmega16", "__AVR_ATmega16__", 1, false},
+ {"atmega16a", "__AVR_ATmega16A__", 1, false},
+ {"atmega161", "__AVR_ATmega161__", 1, false},
+ {"atmega162", "__AVR_ATmega162__", 1, false},
+ {"atmega163", "__AVR_ATmega163__", 1, false},
+ {"atmega164a", "__AVR_ATmega164A__", 1, false},
+ {"atmega164p", "__AVR_ATmega164P__", 1, false},
+ {"atmega164pa", "__AVR_ATmega164PA__", 1, false},
+ {"atmega165", "__AVR_ATmega165__", 1, false},
+ {"atmega165a", "__AVR_ATmega165A__", 1, false},
+ {"atmega165p", "__AVR_ATmega165P__", 1, false},
+ {"atmega165pa", "__AVR_ATmega165PA__", 1, false},
+ {"atmega168", "__AVR_ATmega168__", 1, false},
+ {"atmega168a", "__AVR_ATmega168A__", 1, false},
+ {"atmega168p", "__AVR_ATmega168P__", 1, false},
+ {"atmega168pa", "__AVR_ATmega168PA__", 1, false},
+ {"atmega168pb", "__AVR_ATmega168PB__", 1, false},
+ {"atmega169", "__AVR_ATmega169__", 1, false},
+ {"atmega169a", "__AVR_ATmega169A__", 1, false},
+ {"atmega169p", "__AVR_ATmega169P__", 1, false},
+ {"atmega169pa", "__AVR_ATmega169PA__", 1, false},
+ {"atmega32", "__AVR_ATmega32__", 1, false},
+ {"atmega32a", "__AVR_ATmega32A__", 1, false},
+ {"atmega323", "__AVR_ATmega323__", 1, false},
+ {"atmega324a", "__AVR_ATmega324A__", 1, false},
+ {"atmega324p", "__AVR_ATmega324P__", 1, false},
+ {"atmega324pa", "__AVR_ATmega324PA__", 1, false},
+ {"atmega324pb", "__AVR_ATmega324PB__", 1, false},
+ {"atmega325", "__AVR_ATmega325__", 1, false},
+ {"atmega325a", "__AVR_ATmega325A__", 1, false},
+ {"atmega325p", "__AVR_ATmega325P__", 1, false},
+ {"atmega325pa", "__AVR_ATmega325PA__", 1, false},
+ {"atmega3250", "__AVR_ATmega3250__", 1, false},
+ {"atmega3250a", "__AVR_ATmega3250A__", 1, false},
+ {"atmega3250p", "__AVR_ATmega3250P__", 1, false},
+ {"atmega3250pa", "__AVR_ATmega3250PA__", 1, false},
+ {"atmega328", "__AVR_ATmega328__", 1, false},
+ {"atmega328p", "__AVR_ATmega328P__", 1, false},
+ {"atmega328pb", "__AVR_ATmega328PB__", 1, false},
+ {"atmega329", "__AVR_ATmega329__", 1, false},
+ {"atmega329a", "__AVR_ATmega329A__", 1, false},
+ {"atmega329p", "__AVR_ATmega329P__", 1, false},
+ {"atmega329pa", "__AVR_ATmega329PA__", 1, false},
+ {"atmega3290", "__AVR_ATmega3290__", 1, false},
+ {"atmega3290a", "__AVR_ATmega3290A__", 1, false},
+ {"atmega3290p", "__AVR_ATmega3290P__", 1, false},
+ {"atmega3290pa", "__AVR_ATmega3290PA__", 1, false},
+ {"atmega406", "__AVR_ATmega406__", 1, false},
+ {"atmega64", "__AVR_ATmega64__", 1, false},
+ {"atmega64a", "__AVR_ATmega64A__", 1, false},
+ {"atmega640", "__AVR_ATmega640__", 1, false},
+ {"atmega644", "__AVR_ATmega644__", 1, false},
+ {"atmega644a", "__AVR_ATmega644A__", 1, false},
+ {"atmega644p", "__AVR_ATmega644P__", 1, false},
+ {"atmega644pa", "__AVR_ATmega644PA__", 1, false},
+ {"atmega645", "__AVR_ATmega645__", 1, false},
+ {"atmega645a", "__AVR_ATmega645A__", 1, false},
+ {"atmega645p", "__AVR_ATmega645P__", 1, false},
+ {"atmega649", "__AVR_ATmega649__", 1, false},
+ {"atmega649a", "__AVR_ATmega649A__", 1, false},
+ {"atmega649p", "__AVR_ATmega649P__", 1, false},
+ {"atmega6450", "__AVR_ATmega6450__", 1, false},
+ {"atmega6450a", "__AVR_ATmega6450A__", 1, false},
+ {"atmega6450p", "__AVR_ATmega6450P__", 1, false},
+ {"atmega6490", "__AVR_ATmega6490__", 1, false},
+ {"atmega6490a", "__AVR_ATmega6490A__", 1, false},
+ {"atmega6490p", "__AVR_ATmega6490P__", 1, false},
+ {"atmega64rfr2", "__AVR_ATmega64RFR2__", 1, false},
+ {"atmega644rfr2", "__AVR_ATmega644RFR2__", 1, false},
+ {"atmega16hva", "__AVR_ATmega16HVA__", 1, false},
+ {"atmega16hva2", "__AVR_ATmega16HVA2__", 1, false},
+ {"atmega16hvb", "__AVR_ATmega16HVB__", 1, false},
+ {"atmega16hvbrevb", "__AVR_ATmega16HVBREVB__", 1, false},
+ {"atmega32hvb", "__AVR_ATmega32HVB__", 1, false},
+ {"atmega32hvbrevb", "__AVR_ATmega32HVBREVB__", 1, false},
+ {"atmega64hve", "__AVR_ATmega64HVE__", 1, false},
+ {"atmega64hve2", "__AVR_ATmega64HVE2__", 1, false},
+ {"at90can32", "__AVR_AT90CAN32__", 1, false},
+ {"at90can64", "__AVR_AT90CAN64__", 1, false},
+ {"at90pwm161", "__AVR_AT90PWM161__", 1, false},
+ {"at90pwm216", "__AVR_AT90PWM216__", 1, false},
+ {"at90pwm316", "__AVR_AT90PWM316__", 1, false},
+ {"atmega32c1", "__AVR_ATmega32C1__", 1, false},
+ {"atmega64c1", "__AVR_ATmega64C1__", 1, false},
+ {"atmega16m1", "__AVR_ATmega16M1__", 1, false},
+ {"atmega32m1", "__AVR_ATmega32M1__", 1, false},
+ {"atmega64m1", "__AVR_ATmega64M1__", 1, false},
+ {"atmega16u4", "__AVR_ATmega16U4__", 1, false},
+ {"atmega32u4", "__AVR_ATmega32U4__", 1, false},
+ {"atmega32u6", "__AVR_ATmega32U6__", 1, false},
+ {"at90usb646", "__AVR_AT90USB646__", 1, false},
+ {"at90usb647", "__AVR_AT90USB647__", 1, false},
+ {"at90scr100", "__AVR_AT90SCR100__", 1, false},
+ {"at94k", "__AVR_AT94K__", 1, false},
+ {"m3000", "__AVR_AT000__", 1, false},
+ {"atmega128", "__AVR_ATmega128__", 2, false},
+ {"atmega128a", "__AVR_ATmega128A__", 2, false},
+ {"atmega1280", "__AVR_ATmega1280__", 2, false},
+ {"atmega1281", "__AVR_ATmega1281__", 2, false},
+ {"atmega1284", "__AVR_ATmega1284__", 2, false},
+ {"atmega1284p", "__AVR_ATmega1284P__", 2, false},
+ {"atmega128rfa1", "__AVR_ATmega128RFA1__", 2, false},
+ {"atmega128rfr2", "__AVR_ATmega128RFR2__", 2, false},
+ {"atmega1284rfr2", "__AVR_ATmega1284RFR2__", 2, false},
+ {"at90can128", "__AVR_AT90CAN128__", 2, false},
+ {"at90usb1286", "__AVR_AT90USB1286__", 2, false},
+ {"at90usb1287", "__AVR_AT90USB1287__", 2, false},
+ {"atmega2560", "__AVR_ATmega2560__", 4, false},
+ {"atmega2561", "__AVR_ATmega2561__", 4, false},
+ {"atmega256rfr2", "__AVR_ATmega256RFR2__", 4, false},
+ {"atmega2564rfr2", "__AVR_ATmega2564RFR2__", 4, false},
+ {"atxmega16a4", "__AVR_ATxmega16A4__", 1, false},
+ {"atxmega16a4u", "__AVR_ATxmega16A4U__", 1, false},
+ {"atxmega16c4", "__AVR_ATxmega16C4__", 1, false},
+ {"atxmega16d4", "__AVR_ATxmega16D4__", 1, false},
+ {"atxmega32a4", "__AVR_ATxmega32A4__", 1, false},
+ {"atxmega32a4u", "__AVR_ATxmega32A4U__", 1, false},
+ {"atxmega32c3", "__AVR_ATxmega32C3__", 1, false},
+ {"atxmega32c4", "__AVR_ATxmega32C4__", 1, false},
+ {"atxmega32d3", "__AVR_ATxmega32D3__", 1, false},
+ {"atxmega32d4", "__AVR_ATxmega32D4__", 1, false},
+ {"atxmega32e5", "__AVR_ATxmega32E5__", 1, false},
+ {"atxmega16e5", "__AVR_ATxmega16E5__", 1, false},
+ {"atxmega8e5", "__AVR_ATxmega8E5__", 1, false},
+ {"atxmega64a3", "__AVR_ATxmega64A3__", 1, false},
+ {"atxmega64a3u", "__AVR_ATxmega64A3U__", 1, false},
+ {"atxmega64a4u", "__AVR_ATxmega64A4U__", 1, false},
+ {"atxmega64b1", "__AVR_ATxmega64B1__", 1, false},
+ {"atxmega64b3", "__AVR_ATxmega64B3__", 1, false},
+ {"atxmega64c3", "__AVR_ATxmega64C3__", 1, false},
+ {"atxmega64d3", "__AVR_ATxmega64D3__", 1, false},
+ {"atxmega64d4", "__AVR_ATxmega64D4__", 1, false},
+ {"atxmega64a1", "__AVR_ATxmega64A1__", 1, false},
+ {"atxmega64a1u", "__AVR_ATxmega64A1U__", 1, false},
+ {"atxmega128a3", "__AVR_ATxmega128A3__", 2, false},
+ {"atxmega128a3u", "__AVR_ATxmega128A3U__", 2, false},
+ {"atxmega128b1", "__AVR_ATxmega128B1__", 2, false},
+ {"atxmega128b3", "__AVR_ATxmega128B3__", 2, false},
+ {"atxmega128c3", "__AVR_ATxmega128C3__", 2, false},
+ {"atxmega128d3", "__AVR_ATxmega128D3__", 2, false},
+ {"atxmega128d4", "__AVR_ATxmega128D4__", 2, false},
+ {"atxmega192a3", "__AVR_ATxmega192A3__", 3, false},
+ {"atxmega192a3u", "__AVR_ATxmega192A3U__", 3, false},
+ {"atxmega192c3", "__AVR_ATxmega192C3__", 3, false},
+ {"atxmega192d3", "__AVR_ATxmega192D3__", 3, false},
+ {"atxmega256a3", "__AVR_ATxmega256A3__", 4, false},
+ {"atxmega256a3u", "__AVR_ATxmega256A3U__", 4, false},
+ {"atxmega256a3b", "__AVR_ATxmega256A3B__", 4, false},
+ {"atxmega256a3bu", "__AVR_ATxmega256A3BU__", 4, false},
+ {"atxmega256c3", "__AVR_ATxmega256C3__", 4, false},
+ {"atxmega256d3", "__AVR_ATxmega256D3__", 4, false},
+ {"atxmega384c3", "__AVR_ATxmega384C3__", 6, false},
+ {"atxmega384d3", "__AVR_ATxmega384D3__", 6, false},
+ {"atxmega128a1", "__AVR_ATxmega128A1__", 2, false},
+ {"atxmega128a1u", "__AVR_ATxmega128A1U__", 2, false},
+ {"atxmega128a4u", "__AVR_ATxmega128A4U__", 2, false},
{"attiny4", "__AVR_ATtiny4__", 0, true},
{"attiny5", "__AVR_ATtiny5__", 0, true},
{"attiny9", "__AVR_ATtiny9__", 0, true},
@@ -276,31 +291,42 @@ static MCUInfo AVRMcus[] = {
{"attiny40", "__AVR_ATtiny40__", 0, true},
{"attiny102", "__AVR_ATtiny102__", 0, true},
{"attiny104", "__AVR_ATtiny104__", 0, true},
- {"attiny202", "__AVR_ATtiny202__", 1},
- {"attiny402", "__AVR_ATtiny402__", 1},
- {"attiny204", "__AVR_ATtiny204__", 1},
- {"attiny404", "__AVR_ATtiny404__", 1},
- {"attiny804", "__AVR_ATtiny804__", 1},
- {"attiny1604", "__AVR_ATtiny1604__", 1},
- {"attiny406", "__AVR_ATtiny406__", 1},
- {"attiny806", "__AVR_ATtiny806__", 1},
- {"attiny1606", "__AVR_ATtiny1606__", 1},
- {"attiny807", "__AVR_ATtiny807__", 1},
- {"attiny1607", "__AVR_ATtiny1607__", 1},
- {"attiny212", "__AVR_ATtiny212__", 1},
- {"attiny412", "__AVR_ATtiny412__", 1},
- {"attiny214", "__AVR_ATtiny214__", 1},
- {"attiny414", "__AVR_ATtiny414__", 1},
- {"attiny814", "__AVR_ATtiny814__", 1},
- {"attiny1614", "__AVR_ATtiny1614__", 1},
- {"attiny416", "__AVR_ATtiny416__", 1},
- {"attiny816", "__AVR_ATtiny816__", 1},
- {"attiny1616", "__AVR_ATtiny1616__", 1},
- {"attiny3216", "__AVR_ATtiny3216__", 1},
- {"attiny417", "__AVR_ATtiny417__", 1},
- {"attiny817", "__AVR_ATtiny817__", 1},
- {"attiny1617", "__AVR_ATtiny1617__", 1},
- {"attiny3217", "__AVR_ATtiny3217__", 1},
+ {"attiny202", "__AVR_ATtiny202__", 1, false},
+ {"attiny402", "__AVR_ATtiny402__", 1, false},
+ {"attiny204", "__AVR_ATtiny204__", 1, false},
+ {"attiny404", "__AVR_ATtiny404__", 1, false},
+ {"attiny804", "__AVR_ATtiny804__", 1, false},
+ {"attiny1604", "__AVR_ATtiny1604__", 1, false},
+ {"attiny406", "__AVR_ATtiny406__", 1, false},
+ {"attiny806", "__AVR_ATtiny806__", 1, false},
+ {"attiny1606", "__AVR_ATtiny1606__", 1, false},
+ {"attiny807", "__AVR_ATtiny807__", 1, false},
+ {"attiny1607", "__AVR_ATtiny1607__", 1, false},
+ {"attiny212", "__AVR_ATtiny212__", 1, false},
+ {"attiny412", "__AVR_ATtiny412__", 1, false},
+ {"attiny214", "__AVR_ATtiny214__", 1, false},
+ {"attiny414", "__AVR_ATtiny414__", 1, false},
+ {"attiny814", "__AVR_ATtiny814__", 1, false},
+ {"attiny1614", "__AVR_ATtiny1614__", 1, false},
+ {"attiny416", "__AVR_ATtiny416__", 1, false},
+ {"attiny816", "__AVR_ATtiny816__", 1, false},
+ {"attiny1616", "__AVR_ATtiny1616__", 1, false},
+ {"attiny3216", "__AVR_ATtiny3216__", 1, false},
+ {"attiny417", "__AVR_ATtiny417__", 1, false},
+ {"attiny817", "__AVR_ATtiny817__", 1, false},
+ {"attiny1617", "__AVR_ATtiny1617__", 1, false},
+ {"attiny3217", "__AVR_ATtiny3217__", 1, false},
+ {"attiny1624", "__AVR_ATtiny1624__", 1, false},
+ {"attiny1626", "__AVR_ATtiny1626__", 1, false},
+ {"attiny1627", "__AVR_ATtiny1627__", 1, false},
+ {"atmega808", "__AVR_ATmega808__", 1, false},
+ {"atmega809", "__AVR_ATmega809__", 1, false},
+ {"atmega1608", "__AVR_ATmega1608__", 1, false},
+ {"atmega1609", "__AVR_ATmega1609__", 1, false},
+ {"atmega3208", "__AVR_ATmega3208__", 1, false},
+ {"atmega3209", "__AVR_ATmega3209__", 1, false},
+ {"atmega4808", "__AVR_ATmega4808__", 1, false},
+ {"atmega4809", "__AVR_ATmega4809__", 1, false},
};
} // namespace targets
@@ -354,6 +380,9 @@ void AVRTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__AVR__");
Builder.defineMacro("__ELF__");
+ if (ABI == "avrtiny")
+ Builder.defineMacro("__AVR_TINY__", "1");
+
if (!this->CPU.empty()) {
auto It = llvm::find_if(
AVRMcus, [&](const MCUInfo &Info) { return Info.Name == this->CPU; });
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/CSKY.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/CSKY.cpp
new file mode 100644
index 000000000000..adcffd90ae78
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/CSKY.cpp
@@ -0,0 +1,314 @@
+//===--- CSKY.cpp - Implement CSKY target feature support -----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements CSKY TargetInfo objects.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CSKY.h"
+
+using namespace clang;
+using namespace clang::targets;
+
+bool CSKYTargetInfo::isValidCPUName(StringRef Name) const {
+ return llvm::CSKY::parseCPUArch(Name) != llvm::CSKY::ArchKind::INVALID;
+}
+
+bool CSKYTargetInfo::setCPU(const std::string &Name) {
+ llvm::CSKY::ArchKind archKind = llvm::CSKY::parseCPUArch(Name);
+ bool isValid = (archKind != llvm::CSKY::ArchKind::INVALID);
+
+ if (isValid) {
+ CPU = Name;
+ Arch = archKind;
+ }
+
+ return isValid;
+}
+
+void CSKYTargetInfo::getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ Builder.defineMacro("__ELF__");
+ Builder.defineMacro("__csky__", "2");
+ Builder.defineMacro("__CSKY__", "2");
+ Builder.defineMacro("__ckcore__", "2");
+ Builder.defineMacro("__CKCORE__", "2");
+
+ Builder.defineMacro("__CSKYABI__", ABI == "abiv2" ? "2" : "1");
+ Builder.defineMacro("__cskyabi__", ABI == "abiv2" ? "2" : "1");
+
+ StringRef ArchName = "ck810";
+ StringRef CPUName = "ck810";
+
+ if (Arch != llvm::CSKY::ArchKind::INVALID) {
+ ArchName = llvm::CSKY::getArchName(Arch);
+ CPUName = CPU;
+ }
+
+ Builder.defineMacro("__" + ArchName.upper() + "__");
+ Builder.defineMacro("__" + ArchName.lower() + "__");
+ Builder.defineMacro("__" + CPUName.upper() + "__");
+ Builder.defineMacro("__" + CPUName.lower() + "__");
+
+ // TODO: Add support for BE if BE was supported later
+ StringRef endian = "__cskyLE__";
+
+ Builder.defineMacro(endian);
+ Builder.defineMacro(endian.upper());
+ Builder.defineMacro(endian.lower());
+
+ if (DSPV2) {
+ StringRef dspv2 = "__CSKY_DSPV2__";
+ Builder.defineMacro(dspv2);
+ Builder.defineMacro(dspv2.lower());
+ }
+
+ if (VDSPV2) {
+ StringRef vdspv2 = "__CSKY_VDSPV2__";
+ Builder.defineMacro(vdspv2);
+ Builder.defineMacro(vdspv2.lower());
+
+ if (HardFloat) {
+ StringRef vdspv2_f = "__CSKY_VDSPV2_F__";
+ Builder.defineMacro(vdspv2_f);
+ Builder.defineMacro(vdspv2_f.lower());
+ }
+ }
+ if (VDSPV1) {
+ StringRef vdspv1_64 = "__CSKY_VDSP64__";
+ StringRef vdspv1_128 = "__CSKY_VDSP128__";
+
+ Builder.defineMacro(vdspv1_64);
+ Builder.defineMacro(vdspv1_64.lower());
+ Builder.defineMacro(vdspv1_128);
+ Builder.defineMacro(vdspv1_128.lower());
+ }
+ if (is3E3R1) {
+ StringRef is3e3r1 = "__CSKY_3E3R1__";
+ Builder.defineMacro(is3e3r1);
+ Builder.defineMacro(is3e3r1.lower());
+ }
+}
+
+bool CSKYTargetInfo::hasFeature(StringRef Feature) const {
+ return llvm::StringSwitch<bool>(Feature)
+ .Case("hard-float", HardFloat)
+ .Case("hard-float-abi", HardFloatABI)
+ .Case("fpuv2_sf", FPUV2_SF)
+ .Case("fpuv2_df", FPUV2_DF)
+ .Case("fpuv3_sf", FPUV3_SF)
+ .Case("fpuv3_df", FPUV3_DF)
+ .Case("vdspv2", VDSPV2)
+ .Case("dspv2", DSPV2)
+ .Case("vdspv1", VDSPV1)
+ .Case("3e3r1", is3E3R1)
+ .Default(false);
+}
+
+bool CSKYTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
+ DiagnosticsEngine &Diags) {
+ for (const auto &Feature : Features) {
+ if (Feature == "+hard-float")
+ HardFloat = true;
+ if (Feature == "+hard-float-abi")
+ HardFloatABI = true;
+ if (Feature == "+fpuv2_sf")
+ FPUV2_SF = true;
+ if (Feature == "+fpuv2_df")
+ FPUV2_DF = true;
+ if (Feature == "+fpuv3_sf")
+ FPUV3_SF = true;
+ if (Feature == "+fpuv3_df")
+ FPUV3_DF = true;
+ if (Feature == "+vdspv2")
+ VDSPV2 = true;
+ if (Feature == "+dspv2")
+ DSPV2 = true;
+ if (Feature == "+vdspv1")
+ VDSPV1 = true;
+ if (Feature == "+3e3r1")
+ is3E3R1 = true;
+ }
+
+ return true;
+}
+
+ArrayRef<Builtin::Info> CSKYTargetInfo::getTargetBuiltins() const {
+ return ArrayRef<Builtin::Info>();
+}
+
+ArrayRef<const char *> CSKYTargetInfo::getGCCRegNames() const {
+ static const char *const GCCRegNames[] = {
+ // Integer registers
+ "r0",
+ "r1",
+ "r2",
+ "r3",
+ "r4",
+ "r5",
+ "r6",
+ "r7",
+ "r8",
+ "r9",
+ "r10",
+ "r11",
+ "r12",
+ "r13",
+ "r14",
+ "r15",
+ "r16",
+ "r17",
+ "r18",
+ "r19",
+ "r20",
+ "r21",
+ "r22",
+ "r23",
+ "r24",
+ "r25",
+ "r26",
+ "r27",
+ "r28",
+ "r29",
+ "r30",
+ "r31",
+
+ // Floating point registers
+ "fr0",
+ "fr1",
+ "fr2",
+ "fr3",
+ "fr4",
+ "fr5",
+ "fr6",
+ "fr7",
+ "fr8",
+ "fr9",
+ "fr10",
+ "fr11",
+ "fr12",
+ "fr13",
+ "fr14",
+ "fr15",
+ "fr16",
+ "fr17",
+ "fr18",
+ "fr19",
+ "fr20",
+ "fr21",
+ "fr22",
+ "fr23",
+ "fr24",
+ "fr25",
+ "fr26",
+ "fr27",
+ "fr28",
+ "fr29",
+ "fr30",
+ "fr31",
+
+ };
+ return llvm::makeArrayRef(GCCRegNames);
+}
+
+ArrayRef<TargetInfo::GCCRegAlias> CSKYTargetInfo::getGCCRegAliases() const {
+ static const TargetInfo::GCCRegAlias GCCRegAliases[] = {
+ {{"a0"}, "r0"},
+ {{"a1"}, "r1"},
+ {{"a2"}, "r2"},
+ {{"a3"}, "r3"},
+ {{"l0"}, "r4"},
+ {{"l1"}, "r5"},
+ {{"l2"}, "r6"},
+ {{"l3"}, "r7"},
+ {{"l4"}, "r8"},
+ {{"l5"}, "r9"},
+ {{"l6"}, "r10"},
+ {{"l7"}, "r11"},
+ {{"t0"}, "r12"},
+ {{"t1"}, "r13"},
+ {{"sp"}, "r14"},
+ {{"lr"}, "r15"},
+ {{"l8"}, "r16"},
+ {{"l9"}, "r17"},
+ {{"t2"}, "r18"},
+ {{"t3"}, "r19"},
+ {{"t4"}, "r20"},
+ {{"t5"}, "r21"},
+ {{"t6"}, "r22"},
+ {{"t7", "fp"}, "r23"},
+ {{"t8", "top"}, "r24"},
+ {{"t9", "bsp"}, "r25"},
+ {{"r26"}, "r26"},
+ {{"r27"}, "r27"},
+ {{"gb", "rgb", "rdb"}, "r28"},
+ {{"tb", "rtb"}, "r29"},
+ {{"svbr"}, "r30"},
+ {{"tls"}, "r31"},
+
+ {{"vr0"}, "fr0"},
+ {{"vr1"}, "fr1"},
+ {{"vr2"}, "fr2"},
+ {{"vr3"}, "fr3"},
+ {{"vr4"}, "fr4"},
+ {{"vr5"}, "fr5"},
+ {{"vr6"}, "fr6"},
+ {{"vr7"}, "fr7"},
+ {{"vr8"}, "fr8"},
+ {{"vr9"}, "fr9"},
+ {{"vr10"}, "fr10"},
+ {{"vr11"}, "fr11"},
+ {{"vr12"}, "fr12"},
+ {{"vr13"}, "fr13"},
+ {{"vr14"}, "fr14"},
+ {{"vr15"}, "fr15"},
+ {{"vr16"}, "fr16"},
+ {{"vr17"}, "fr17"},
+ {{"vr18"}, "fr18"},
+ {{"vr19"}, "fr19"},
+ {{"vr20"}, "fr20"},
+ {{"vr21"}, "fr21"},
+ {{"vr22"}, "fr22"},
+ {{"vr23"}, "fr23"},
+ {{"vr24"}, "fr24"},
+ {{"vr25"}, "fr25"},
+ {{"vr26"}, "fr26"},
+ {{"vr27"}, "fr27"},
+ {{"vr28"}, "fr28"},
+ {{"vr29"}, "fr29"},
+ {{"vr30"}, "fr30"},
+ {{"vr31"}, "fr31"},
+
+ };
+ return llvm::makeArrayRef(GCCRegAliases);
+}
+
+bool CSKYTargetInfo::validateAsmConstraint(
+ const char *&Name, TargetInfo::ConstraintInfo &Info) const {
+ switch (*Name) {
+ default:
+ return false;
+ case 'a':
+ case 'b':
+ case 'c':
+ case 'y':
+ case 'l':
+ case 'h':
+ case 'w':
+ case 'v': // A floating-point and vector register.
+ case 'z':
+ Info.setAllowsRegister();
+ return true;
+ }
+}
+
+unsigned CSKYTargetInfo::getMinGlobalAlign(uint64_t Size) const {
+ if (Size >= 32)
+ return 32;
+ return 0;
+}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/CSKY.h b/contrib/llvm-project/clang/lib/Basic/Targets/CSKY.h
new file mode 100644
index 000000000000..7e932e7c86b1
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/CSKY.h
@@ -0,0 +1,107 @@
+//===--- CSKY.h - Declare CSKY target feature support -----------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares CSKY TargetInfo objects.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_BASIC_TARGETS_CSKY_H
+#define LLVM_CLANG_LIB_BASIC_TARGETS_CSKY_H
+
+#include "clang/Basic/MacroBuilder.h"
+#include "clang/Basic/TargetInfo.h"
+#include "llvm/Support/CSKYTargetParser.h"
+
+namespace clang {
+namespace targets {
+
+class LLVM_LIBRARY_VISIBILITY CSKYTargetInfo : public TargetInfo {
+protected:
+ std::string ABI;
+ llvm::CSKY::ArchKind Arch = llvm::CSKY::ArchKind::INVALID;
+ std::string CPU;
+
+ bool HardFloat = false;
+ bool HardFloatABI = false;
+ bool FPUV2_SF = false;
+ bool FPUV2_DF = false;
+ bool FPUV3_SF = false;
+ bool FPUV3_DF = false;
+ bool VDSPV2 = false;
+ bool VDSPV1 = false;
+ bool DSPV2 = false;
+ bool is3E3R1 = false;
+
+public:
+ CSKYTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
+ : TargetInfo(Triple) {
+ NoAsmVariants = true;
+ LongLongAlign = 32;
+ SuitableAlign = 32;
+ DoubleAlign = LongDoubleAlign = 32;
+ SizeType = UnsignedInt;
+ PtrDiffType = SignedInt;
+ IntPtrType = SignedInt;
+ WCharType = SignedInt;
+ WIntType = UnsignedInt;
+
+ UseZeroLengthBitfieldAlignment = true;
+ MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 32;
+ resetDataLayout("e-m:e-S32-p:32:32-i32:32:32-i64:32:32-f32:32:32-f64:32:32-"
+ "v64:32:32-v128:32:32-a:0:32-Fi32-n32");
+
+ setABI("abiv2");
+ }
+
+ StringRef getABI() const override { return ABI; }
+ bool setABI(const std::string &Name) override {
+ if (Name == "abiv2" || Name == "abiv1") {
+ ABI = Name;
+ return true;
+ }
+ return false;
+ }
+
+ bool setCPU(const std::string &Name) override;
+
+ bool isValidCPUName(StringRef Name) const override;
+
+ virtual unsigned getMinGlobalAlign(uint64_t) const override;
+
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override;
+
+ BuiltinVaListKind getBuiltinVaListKind() const override {
+ return VoidPtrBuiltinVaList;
+ }
+
+ bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &info) const override;
+
+ const char *getClobbers() const override { return ""; }
+
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override;
+ bool hasFeature(StringRef Feature) const override;
+ bool handleTargetFeatures(std::vector<std::string> &Features,
+ DiagnosticsEngine &Diags) override;
+
+ /// Whether target allows to overalign ABI-specified preferred alignment
+ bool allowsLargerPreferedTypeAlignment() const override { return false; }
+
+ bool hasBitIntType() const override { return true; }
+
+protected:
+ ArrayRef<const char *> getGCCRegNames() const override;
+
+ ArrayRef<GCCRegAlias> getGCCRegAliases() const override;
+};
+
+} // namespace targets
+} // namespace clang
+
+#endif // LLVM_CLANG_LIB_BASIC_TARGETS_CSKY_H
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/DirectX.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/DirectX.cpp
new file mode 100644
index 000000000000..0dd27e6e93b3
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/DirectX.cpp
@@ -0,0 +1,22 @@
+//===--- DirectX.cpp - Implement DirectX target feature support -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements DirectX TargetInfo objects.
+//
+//===----------------------------------------------------------------------===//
+
+#include "DirectX.h"
+#include "Targets.h"
+
+using namespace clang;
+using namespace clang::targets;
+
+void DirectXTargetInfo::getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const {
+ DefineStd(Builder, "DIRECTX", Opts);
+}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/DirectX.h b/contrib/llvm-project/clang/lib/Basic/Targets/DirectX.h
new file mode 100644
index 000000000000..a773090b413f
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/DirectX.h
@@ -0,0 +1,93 @@
+//===--- DirectX.h - Declare DirectX target feature support -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declares DXIL TargetInfo objects.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_BASIC_TARGETS_DIRECTX_H
+#define LLVM_CLANG_LIB_BASIC_TARGETS_DIRECTX_H
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/TargetOptions.h"
+#include "llvm/ADT/Triple.h"
+#include "llvm/Support/Compiler.h"
+
+namespace clang {
+namespace targets {
+
+static const unsigned DirectXAddrSpaceMap[] = {
+ 0, // Default
+ 1, // opencl_global
+ 3, // opencl_local
+ 2, // opencl_constant
+ 0, // opencl_private
+ 4, // opencl_generic
+ 5, // opencl_global_device
+ 6, // opencl_global_host
+ 0, // cuda_device
+ 0, // cuda_constant
+ 0, // cuda_shared
+ // SYCL address space values for this map are dummy
+ 0, // sycl_global
+ 0, // sycl_global_device
+ 0, // sycl_global_host
+ 0, // sycl_local
+ 0, // sycl_private
+ 0, // ptr32_sptr
+ 0, // ptr32_uptr
+ 0 // ptr64
+};
+
+class LLVM_LIBRARY_VISIBILITY DirectXTargetInfo : public TargetInfo {
+public:
+ DirectXTargetInfo(const llvm::Triple &Triple, const TargetOptions &)
+ : TargetInfo(Triple) {
+ TLSSupported = false;
+ VLASupported = false;
+ LongWidth = LongAlign = 64;
+ AddrSpaceMap = &DirectXAddrSpaceMap;
+ UseAddrSpaceMapMangling = true;
+ HasLegalHalfType = true;
+ HasFloat16 = true;
+ NoAsmVariants = true;
+ resetDataLayout("e-m:e-p:32:32-i1:32-i8:8-i16:16-i32:32-i64:64-f16:16-f32:"
+ "32-f64:64-n8:16:32:64");
+ TheCXXABI.set(TargetCXXABI::Microsoft);
+ }
+ bool useFP16ConversionIntrinsics() const override { return false; }
+ void getTargetDefines(const LangOptions &Opts,
+ MacroBuilder &Builder) const override;
+
+ bool hasFeature(StringRef Feature) const override {
+ return Feature == "directx";
+ }
+
+ ArrayRef<Builtin::Info> getTargetBuiltins() const override { return None; }
+
+ const char *getClobbers() const override { return ""; }
+
+ ArrayRef<const char *> getGCCRegNames() const override { return None; }
+
+ bool validateAsmConstraint(const char *&Name,
+ TargetInfo::ConstraintInfo &info) const override {
+ return true;
+ }
+
+ ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
+ return None;
+ }
+
+ BuiltinVaListKind getBuiltinVaListKind() const override {
+ return TargetInfo::VoidPtrBuiltinVaList;
+ }
+};
+
+} // namespace targets
+} // namespace clang
+
+#endif // LLVM_CLANG_LIB_BASIC_TARGETS_DIRECTX_H
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.cpp
index 75e82d819900..9dd60adb00fb 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/NVPTX.cpp
@@ -179,7 +179,7 @@ void NVPTXTargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
Builder.defineMacro("__PTX__");
Builder.defineMacro("__NVPTX__");
- if (Opts.CUDAIsDevice) {
+ if (Opts.CUDAIsDevice || Opts.OpenMPIsDevice) {
// Set __CUDA_ARCH__ for the GPU specified.
std::string CUDAArchCode = [this] {
switch (GPU) {
@@ -205,6 +205,7 @@ void NVPTXTargetInfo::getTargetDefines(const LangOptions &Opts,
case CudaArch::GFX909:
case CudaArch::GFX90a:
case CudaArch::GFX90c:
+ case CudaArch::GFX940:
case CudaArch::GFX1010:
case CudaArch::GFX1011:
case CudaArch::GFX1012:
@@ -215,6 +216,11 @@ void NVPTXTargetInfo::getTargetDefines(const LangOptions &Opts,
case CudaArch::GFX1033:
case CudaArch::GFX1034:
case CudaArch::GFX1035:
+ case CudaArch::GFX1036:
+ case CudaArch::GFX1100:
+ case CudaArch::GFX1101:
+ case CudaArch::GFX1102:
+ case CudaArch::GFX1103:
case CudaArch::Generic:
case CudaArch::LAST:
break;
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.cpp
index f8f12daaa072..f2ed076039a0 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.cpp
@@ -73,19 +73,19 @@ void getDarwinDefines(MacroBuilder &Builder, const LangOptions &Opts,
char Str[7];
if (OsVersion.getMajor() < 10) {
Str[0] = '0' + OsVersion.getMajor();
- Str[1] = '0' + (OsVersion.getMinor().getValueOr(0) / 10);
- Str[2] = '0' + (OsVersion.getMinor().getValueOr(0) % 10);
- Str[3] = '0' + (OsVersion.getSubminor().getValueOr(0) / 10);
- Str[4] = '0' + (OsVersion.getSubminor().getValueOr(0) % 10);
+ Str[1] = '0' + (OsVersion.getMinor().value_or(0) / 10);
+ Str[2] = '0' + (OsVersion.getMinor().value_or(0) % 10);
+ Str[3] = '0' + (OsVersion.getSubminor().value_or(0) / 10);
+ Str[4] = '0' + (OsVersion.getSubminor().value_or(0) % 10);
Str[5] = '\0';
} else {
// Handle versions >= 10.
Str[0] = '0' + (OsVersion.getMajor() / 10);
Str[1] = '0' + (OsVersion.getMajor() % 10);
- Str[2] = '0' + (OsVersion.getMinor().getValueOr(0) / 10);
- Str[3] = '0' + (OsVersion.getMinor().getValueOr(0) % 10);
- Str[4] = '0' + (OsVersion.getSubminor().getValueOr(0) / 10);
- Str[5] = '0' + (OsVersion.getSubminor().getValueOr(0) % 10);
+ Str[2] = '0' + (OsVersion.getMinor().value_or(0) / 10);
+ Str[3] = '0' + (OsVersion.getMinor().value_or(0) % 10);
+ Str[4] = '0' + (OsVersion.getSubminor().value_or(0) / 10);
+ Str[5] = '0' + (OsVersion.getSubminor().value_or(0) % 10);
Str[6] = '\0';
}
if (Triple.isTvOS())
@@ -98,12 +98,25 @@ void getDarwinDefines(MacroBuilder &Builder, const LangOptions &Opts,
assert(OsVersion < VersionTuple(10) && "Invalid version!");
char Str[6];
Str[0] = '0' + OsVersion.getMajor();
- Str[1] = '0' + (OsVersion.getMinor().getValueOr(0) / 10);
- Str[2] = '0' + (OsVersion.getMinor().getValueOr(0) % 10);
- Str[3] = '0' + (OsVersion.getSubminor().getValueOr(0) / 10);
- Str[4] = '0' + (OsVersion.getSubminor().getValueOr(0) % 10);
+ Str[1] = '0' + (OsVersion.getMinor().value_or(0) / 10);
+ Str[2] = '0' + (OsVersion.getMinor().value_or(0) % 10);
+ Str[3] = '0' + (OsVersion.getSubminor().value_or(0) / 10);
+ Str[4] = '0' + (OsVersion.getSubminor().value_or(0) % 10);
Str[5] = '\0';
Builder.defineMacro("__ENVIRONMENT_WATCH_OS_VERSION_MIN_REQUIRED__", Str);
+ } else if (Triple.isDriverKit()) {
+ assert(OsVersion.getMajor() < 100 &&
+ OsVersion.getMinor().value_or(0) < 100 &&
+ OsVersion.getSubminor().value_or(0) < 100 && "Invalid version!");
+ char Str[7];
+ Str[0] = '0' + (OsVersion.getMajor() / 10);
+ Str[1] = '0' + (OsVersion.getMajor() % 10);
+ Str[2] = '0' + (OsVersion.getMinor().value_or(0) / 10);
+ Str[3] = '0' + (OsVersion.getMinor().value_or(0) % 10);
+ Str[4] = '0' + (OsVersion.getSubminor().value_or(0) / 10);
+ Str[5] = '0' + (OsVersion.getSubminor().value_or(0) % 10);
+ Str[6] = '\0';
+ Builder.defineMacro("__ENVIRONMENT_DRIVERKIT_VERSION_MIN_REQUIRED__", Str);
} else if (Triple.isMacOSX()) {
// Note that the Driver allows versions which aren't representable in the
// define (because we only get a single digit for the minor and micro
@@ -114,17 +127,17 @@ void getDarwinDefines(MacroBuilder &Builder, const LangOptions &Opts,
if (OsVersion < VersionTuple(10, 10)) {
Str[0] = '0' + (OsVersion.getMajor() / 10);
Str[1] = '0' + (OsVersion.getMajor() % 10);
- Str[2] = '0' + std::min(OsVersion.getMinor().getValueOr(0), 9U);
- Str[3] = '0' + std::min(OsVersion.getSubminor().getValueOr(0), 9U);
+ Str[2] = '0' + std::min(OsVersion.getMinor().value_or(0), 9U);
+ Str[3] = '0' + std::min(OsVersion.getSubminor().value_or(0), 9U);
Str[4] = '\0';
} else {
// Handle versions > 10.9.
Str[0] = '0' + (OsVersion.getMajor() / 10);
Str[1] = '0' + (OsVersion.getMajor() % 10);
- Str[2] = '0' + (OsVersion.getMinor().getValueOr(0) / 10);
- Str[3] = '0' + (OsVersion.getMinor().getValueOr(0) % 10);
- Str[4] = '0' + (OsVersion.getSubminor().getValueOr(0) / 10);
- Str[5] = '0' + (OsVersion.getSubminor().getValueOr(0) % 10);
+ Str[2] = '0' + (OsVersion.getMinor().value_or(0) / 10);
+ Str[3] = '0' + (OsVersion.getMinor().value_or(0) % 10);
+ Str[4] = '0' + (OsVersion.getSubminor().value_or(0) / 10);
+ Str[5] = '0' + (OsVersion.getSubminor().value_or(0) % 10);
Str[6] = '\0';
}
Builder.defineMacro("__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__", Str);
@@ -202,6 +215,9 @@ static void addVisualCDefines(const LangOptions &Opts, MacroBuilder &Builder) {
}
}
+ if (Opts.Kernel)
+ Builder.defineMacro("_KERNEL_MODE");
+
Builder.defineMacro("_INTEGRAL_MAX_BITS", "64");
Builder.defineMacro("__STDC_NO_THREADS__");
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.h b/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.h
index 3c1830d5f8e8..a814f681b146 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/OSTargets.h
@@ -108,6 +108,8 @@ public:
this->TLSSupported = !Triple.isOSVersionLT(2);
else
this->TLSSupported = !Triple.isOSVersionLT(3);
+ } else if (Triple.isDriverKit()) {
+ // No TLS on DriverKit.
}
this->MCountName = "\01mcount";
@@ -539,8 +541,9 @@ public:
}
};
+// Common base class for PS4/PS5 targets.
template <typename Target>
-class LLVM_LIBRARY_VISIBILITY PS4OSTargetInfo : public OSTargetInfo<Target> {
+class LLVM_LIBRARY_VISIBILITY PSOSTargetInfo : public OSTargetInfo<Target> {
protected:
void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
MacroBuilder &Builder) const override {
@@ -550,35 +553,66 @@ protected:
DefineStd(Builder, "unix", Opts);
Builder.defineMacro("__ELF__");
Builder.defineMacro("__SCE__");
- Builder.defineMacro("__ORBIS__");
}
public:
- PS4OSTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
+ PSOSTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: OSTargetInfo<Target>(Triple, Opts) {
this->WCharType = TargetInfo::UnsignedShort;
- // On PS4, TLS variable cannot be aligned to more than 32 bytes (256 bits).
+ // On PS4/PS5, TLS variable cannot be aligned to more than 32 bytes (256
+ // bits).
this->MaxTLSAlign = 256;
- // On PS4, do not honor explicit bit field alignment,
+ // On PS4/PS5, do not honor explicit bit field alignment,
// as in "__attribute__((aligned(2))) int b : 1;".
this->UseExplicitBitFieldAlignment = false;
- switch (Triple.getArch()) {
- default:
- case llvm::Triple::x86_64:
- this->MCountName = ".mcount";
- this->NewAlign = 256;
- break;
- }
+ this->MCountName = ".mcount";
+ this->NewAlign = 256;
+ this->SuitableAlign = 256;
}
+
TargetInfo::CallingConvCheckResult
checkCallingConvention(CallingConv CC) const override {
return (CC == CC_C) ? TargetInfo::CCCR_OK : TargetInfo::CCCR_Error;
}
};
+// PS4 Target
+template <typename Target>
+class LLVM_LIBRARY_VISIBILITY PS4OSTargetInfo : public PSOSTargetInfo<Target> {
+protected:
+ void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const override {
+ // Start with base class defines.
+ PSOSTargetInfo<Target>::getOSDefines(Opts, Triple, Builder);
+
+ Builder.defineMacro("__ORBIS__");
+ }
+
+public:
+ PS4OSTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
+ : PSOSTargetInfo<Target>(Triple, Opts) {}
+};
+
+// PS5 Target
+template <typename Target>
+class LLVM_LIBRARY_VISIBILITY PS5OSTargetInfo : public PSOSTargetInfo<Target> {
+protected:
+ void getOSDefines(const LangOptions &Opts, const llvm::Triple &Triple,
+ MacroBuilder &Builder) const override {
+ // Start with base class defines.
+ PSOSTargetInfo<Target>::getOSDefines(Opts, Triple, Builder);
+
+ Builder.defineMacro("__PROSPERO__");
+ }
+
+public:
+ PS5OSTargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
+ : PSOSTargetInfo<Target>(Triple, Opts) {}
+};
+
// RTEMS Target
template <typename Target>
class LLVM_LIBRARY_VISIBILITY RTEMSTargetInfo : public OSTargetInfo<Target> {
@@ -749,7 +783,9 @@ public:
}
// AIX sets FLT_EVAL_METHOD to be 1.
- unsigned getFloatEvalMethod() const override { return 1; }
+ LangOptions::FPEvalMethodKind getFPEvalMethod() const override {
+ return LangOptions::FPEvalMethodKind::FEM_Double;
+ }
bool defaultsToAIXPowerAlignment() const override { return true; }
};
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/PPC.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/PPC.cpp
index 1eb0317af60b..9120808e298d 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/PPC.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/PPC.cpp
@@ -36,6 +36,8 @@ bool PPCTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasAltivec = true;
} else if (Feature == "+vsx") {
HasVSX = true;
+ } else if (Feature == "+crbits") {
+ UseCRBits = true;
} else if (Feature == "+bpermd") {
HasBPERMD = true;
} else if (Feature == "+extdiv") {
@@ -81,6 +83,8 @@ bool PPCTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
IsISA3_0 = true;
} else if (Feature == "+isa-v31-instructions") {
IsISA3_1 = true;
+ } else if (Feature == "+quadword-atomics") {
+ HasQuadwordAtomics = true;
}
// TODO: Finish this list and add an assert that we've handled them
// all.
@@ -206,6 +210,7 @@ static void defineXLCompatMacros(MacroBuilder &Builder) {
Builder.defineMacro("__dcbf", "__builtin_dcbf");
Builder.defineMacro("__fmadd", "__builtin_fma");
Builder.defineMacro("__fmadds", "__builtin_fmaf");
+ Builder.defineMacro("__abs", "__builtin_abs");
Builder.defineMacro("__labs", "__builtin_labs");
Builder.defineMacro("__llabs", "__builtin_llabs");
Builder.defineMacro("__popcnt4", "__builtin_popcount");
@@ -247,6 +252,14 @@ static void defineXLCompatMacros(MacroBuilder &Builder) {
Builder.defineMacro("__test_data_class", "__builtin_ppc_test_data_class");
Builder.defineMacro("__swdiv", "__builtin_ppc_swdiv");
Builder.defineMacro("__swdivs", "__builtin_ppc_swdivs");
+ Builder.defineMacro("__fnabs", "__builtin_ppc_fnabs");
+ Builder.defineMacro("__fnabss", "__builtin_ppc_fnabss");
+ Builder.defineMacro("__builtin_maxfe", "__builtin_ppc_maxfe");
+ Builder.defineMacro("__builtin_maxfl", "__builtin_ppc_maxfl");
+ Builder.defineMacro("__builtin_maxfs", "__builtin_ppc_maxfs");
+ Builder.defineMacro("__builtin_minfe", "__builtin_ppc_minfe");
+ Builder.defineMacro("__builtin_minfl", "__builtin_ppc_minfl");
+ Builder.defineMacro("__builtin_minfs", "__builtin_ppc_minfs");
}
/// PPCTargetInfo::getTargetDefines - Return a set of the PowerPC-specific
@@ -506,6 +519,11 @@ bool PPCTargetInfo::initFeatureMap(
.Case("pwr9", true)
.Case("pwr8", true)
.Default(false);
+ Features["crbits"] = llvm::StringSwitch<bool>(CPU)
+ .Case("ppc64le", true)
+ .Case("pwr9", true)
+ .Case("pwr8", true)
+ .Default(false);
Features["vsx"] = llvm::StringSwitch<bool>(CPU)
.Case("ppc64le", true)
.Case("pwr9", true)
@@ -533,6 +551,7 @@ bool PPCTargetInfo::initFeatureMap(
.Case("pwr9", true)
.Case("pwr8", true)
.Case("pwr7", true)
+ .Case("a2", true)
.Default(false);
Features["isa-v207-instructions"] = llvm::StringSwitch<bool>(CPU)
@@ -544,6 +563,12 @@ bool PPCTargetInfo::initFeatureMap(
Features["isa-v30-instructions"] =
llvm::StringSwitch<bool>(CPU).Case("pwr9", true).Default(false);
+ Features["quadword-atomics"] =
+ getTriple().isArch64Bit() && llvm::StringSwitch<bool>(CPU)
+ .Case("pwr9", true)
+ .Case("pwr8", true)
+ .Default(false);
+
// Power10 includes all the same features as Power9 plus any features specific
// to the Power10 core.
if (CPU == "pwr10" || CPU == "power10") {
@@ -569,12 +594,12 @@ bool PPCTargetInfo::initFeatureMap(
}
if (!(ArchDefs & ArchDefinePwr10)) {
- if (llvm::find(FeaturesVec, "+mma") != FeaturesVec.end()) {
+ if (llvm::is_contained(FeaturesVec, "+mma")) {
// MMA operations are not available pre-Power10.
Diags.Report(diag::err_opt_not_valid_with_opt) << "-mmma" << CPU;
return false;
}
- if (llvm::find(FeaturesVec, "+pcrel") != FeaturesVec.end()) {
+ if (llvm::is_contained(FeaturesVec, "+pcrel")) {
// PC-Relative instructions are not available pre-Power10,
// and these instructions also require prefixed instructions support.
Diags.Report(diag::err_opt_not_valid_without_opt)
@@ -582,13 +607,13 @@ bool PPCTargetInfo::initFeatureMap(
<< "-mcpu=pwr10 -mprefixed";
return false;
}
- if (llvm::find(FeaturesVec, "+prefixed") != FeaturesVec.end()) {
+ if (llvm::is_contained(FeaturesVec, "+prefixed")) {
// Prefixed instructions are not available pre-Power10.
Diags.Report(diag::err_opt_not_valid_without_opt) << "-mprefixed"
<< "-mcpu=pwr10";
return false;
}
- if (llvm::find(FeaturesVec, "+paired-vector-memops") != FeaturesVec.end()) {
+ if (llvm::is_contained(FeaturesVec, "+paired-vector-memops")) {
// Paired vector memops are not available pre-Power10.
Diags.Report(diag::err_opt_not_valid_without_opt)
<< "-mpaired-vector-memops"
@@ -634,6 +659,7 @@ bool PPCTargetInfo::hasFeature(StringRef Feature) const {
.Case("powerpc", true)
.Case("altivec", HasAltivec)
.Case("vsx", HasVSX)
+ .Case("crbits", UseCRBits)
.Case("power8-vector", HasP8Vector)
.Case("crypto", HasP8Crypto)
.Case("direct-move", HasDirectMove)
@@ -654,6 +680,7 @@ bool PPCTargetInfo::hasFeature(StringRef Feature) const {
.Case("isa-v207-instructions", IsISA2_07)
.Case("isa-v30-instructions", IsISA3_0)
.Case("isa-v31-instructions", IsISA3_1)
+ .Case("quadword-atomics", HasQuadwordAtomics)
.Default(false);
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/PPC.h b/contrib/llvm-project/clang/lib/Basic/Targets/PPC.h
index ac52eb219f54..8148762f446b 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/PPC.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/PPC.h
@@ -62,6 +62,7 @@ class LLVM_LIBRARY_VISIBILITY PPCTargetInfo : public TargetInfo {
bool HasROPProtect = false;
bool HasPrivileged = false;
bool HasVSX = false;
+ bool UseCRBits = false;
bool HasP8Vector = false;
bool HasP8Crypto = false;
bool HasDirectMove = false;
@@ -78,6 +79,7 @@ class LLVM_LIBRARY_VISIBILITY PPCTargetInfo : public TargetInfo {
bool IsISA2_07 = false;
bool IsISA3_0 = false;
bool IsISA3_1 = false;
+ bool HasQuadwordAtomics = false;
protected:
std::string ABI;
@@ -439,8 +441,18 @@ public:
DataLayout += "-S128-v256:256:256-v512:512:512";
resetDataLayout(DataLayout);
- // PPC64 supports atomics up to 8 bytes.
- MaxAtomicPromoteWidth = MaxAtomicInlineWidth = 64;
+ // Newer PPC64 instruction sets support atomics up to 16 bytes.
+ MaxAtomicPromoteWidth = 128;
+ // Baseline PPC64 supports inlining atomics up to 8 bytes.
+ MaxAtomicInlineWidth = 64;
+ }
+
+ void setMaxAtomicWidth() override {
+ // For power8 and up, backend is able to inline 16-byte atomic lock free
+ // code.
+ // TODO: We should allow AIX to inline quadword atomics in the future.
+ if (!getTriple().isOSAIX() && hasFeature("quadword-atomics"))
+ MaxAtomicInlineWidth = 128;
}
BuiltinVaListKind getBuiltinVaListKind() const override {
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.cpp
index 0680cad5b07c..32dd2bad2c5c 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.cpp
@@ -188,7 +188,7 @@ void RISCVTargetInfo::getTargetDefines(const LangOptions &Opts,
if (ISAInfo->hasExtension("c"))
Builder.defineMacro("__riscv_compressed");
- if (ISAInfo->hasExtension("zve32x") || ISAInfo->hasExtension("v"))
+ if (ISAInfo->hasExtension("zve32x"))
Builder.defineMacro("__riscv_vector");
}
@@ -232,8 +232,14 @@ bool RISCVTargetInfo::initFeatureMap(
return false;
}
- return TargetInfo::initFeatureMap(Features, Diags, CPU,
- (*ParseResult)->toFeatureVector());
+ // RISCVISAInfo makes implications for ISA features
+ std::vector<std::string> ImpliedFeatures = (*ParseResult)->toFeatureVector();
+ // Add non-ISA features like `relax` and `save-restore` back
+ for (const std::string &Feature : FeaturesVec)
+ if (!llvm::is_contained(ImpliedFeatures, Feature))
+ ImpliedFeatures.push_back(Feature);
+
+ return TargetInfo::initFeatureMap(Features, Diags, CPU, ImpliedFeatures);
}
/// Return true if has this feature, need to sync with handleTargetFeatures.
@@ -245,7 +251,7 @@ bool RISCVTargetInfo::hasFeature(StringRef Feature) const {
.Case("riscv64", Is64Bit)
.Case("64bit", Is64Bit)
.Default(None);
- if (Result.hasValue())
+ if (Result)
return Result.getValue();
if (ISAInfo->isSupportedExtensionFeature(Feature))
@@ -272,7 +278,7 @@ bool RISCVTargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
}
if (ABI.empty())
- ABI = llvm::RISCV::computeDefaultABIFromArch(*ISAInfo).str();
+ ABI = ISAInfo->computeDefaultABI().str();
return true;
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.h b/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.h
index 5331ed4a50ae..7817e6e81e26 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/RISCV.h
@@ -96,6 +96,10 @@ public:
DiagnosticsEngine &Diags) override;
bool hasBitIntType() const override { return true; }
+
+ bool useFP16ConversionIntrinsics() const override {
+ return false;
+ }
};
class LLVM_LIBRARY_VISIBILITY RISCV32TargetInfo : public RISCVTargetInfo {
public:
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/SPIR.h b/contrib/llvm-project/clang/lib/Basic/Targets/SPIR.h
index a40d4b3ca27e..08c49f018ac7 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/SPIR.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/SPIR.h
@@ -144,16 +144,16 @@ public:
// FIXME: SYCL specification considers unannotated pointers and references
// to be pointing to the generic address space. See section 5.9.3 of
// SYCL 2020 specification.
- // Currently, there is no way of representing SYCL's and HIP's default
+ // Currently, there is no way of representing SYCL's and HIP/CUDA's default
// address space language semantic along with the semantics of embedded C's
// default address space in the same address space map. Hence the map needs
// to be reset to allow mapping to the desired value of 'Default' entry for
- // SYCL and HIP.
+ // SYCL and HIP/CUDA.
setAddressSpaceMap(
/*DefaultIsGeneric=*/Opts.SYCLIsDevice ||
- // The address mapping from HIP language for device code is only defined
- // for SPIR-V.
- (getTriple().isSPIRV() && Opts.HIP && Opts.CUDAIsDevice));
+ // The address mapping from HIP/CUDA language for device code is only
+ // defined for SPIR-V.
+ (getTriple().isSPIRV() && Opts.CUDAIsDevice));
}
void setSupportedOpenCLOpts() override {
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.cpp
index e3e0da21f8d5..84874b58ba68 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.cpp
@@ -59,6 +59,17 @@ bool SystemZTargetInfo::validateAsmConstraint(
default:
return false;
+ case 'Z':
+ switch (Name[1]) {
+ default:
+ return false;
+ case 'Q': // Address with base and unsigned 12-bit displacement
+ case 'R': // Likewise, plus an index
+ case 'S': // Address with base and signed 20-bit displacement
+ case 'T': // Likewise, plus an index
+ break;
+ }
+ LLVM_FALLTHROUGH;
case 'a': // Address register
case 'd': // Data register (equivalent to 'r')
case 'f': // Floating-point register
@@ -93,7 +104,7 @@ static constexpr ISANameRevision ISARevisions[] = {
{{"arch11"}, 11}, {{"z13"}, 11},
{{"arch12"}, 12}, {{"z14"}, 12},
{{"arch13"}, 13}, {{"z15"}, 13},
- {{"arch14"}, 14}
+ {{"arch14"}, 14}, {{"z16"}, 14},
};
int SystemZTargetInfo::getISARevision(StringRef Name) const {
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.h b/contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.h
index 92cefeea5d26..d12045c756c1 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/SystemZ.h
@@ -82,6 +82,30 @@ public:
bool validateAsmConstraint(const char *&Name,
TargetInfo::ConstraintInfo &info) const override;
+ std::string convertConstraint(const char *&Constraint) const override {
+ switch (Constraint[0]) {
+ case 'p': // Keep 'p' constraint.
+ return std::string("p");
+ case 'Z':
+ switch (Constraint[1]) {
+ case 'Q': // Address with base and unsigned 12-bit displacement
+ case 'R': // Likewise, plus an index
+ case 'S': // Address with base and signed 20-bit displacement
+ case 'T': // Likewise, plus an index
+ // "^" hints llvm that this is a 2 letter constraint.
+ // "Constraint++" is used to promote the string iterator
+ // to the next constraint.
+ return std::string("^") + std::string(Constraint++, 2);
+ default:
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ return TargetInfo::convertConstraint(Constraint);
+ }
+
const char *getClobbers() const override {
// FIXME: Is this really right?
return "";
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/VE.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/VE.cpp
index 22223654e8ad..4d66c98edc92 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/VE.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/VE.cpp
@@ -18,6 +18,12 @@
using namespace clang;
using namespace clang::targets;
+const Builtin::Info VETargetInfo::BuiltinInfo[] = {
+#define BUILTIN(ID, TYPE, ATTRS) \
+ {#ID, TYPE, ATTRS, nullptr, ALL_LANGUAGES, nullptr},
+#include "clang/Basic/BuiltinsVE.def"
+};
+
void VETargetInfo::getTargetDefines(const LangOptions &Opts,
MacroBuilder &Builder) const {
Builder.defineMacro("_LP64", "1");
@@ -35,5 +41,6 @@ void VETargetInfo::getTargetDefines(const LangOptions &Opts,
}
ArrayRef<Builtin::Info> VETargetInfo::getTargetBuiltins() const {
- return ArrayRef<Builtin::Info>();
+ return llvm::makeArrayRef(BuiltinInfo,
+ clang::VE::LastTSBuiltin - Builtin::FirstTSBuiltin);
}
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.cpp
index 2309997eb77b..b3b6c2be5c13 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.cpp
@@ -56,6 +56,7 @@ bool WebAssemblyTargetInfo::hasFeature(StringRef Feature) const {
.Case("multivalue", HasMultivalue)
.Case("tail-call", HasTailCall)
.Case("reference-types", HasReferenceTypes)
+ .Case("extended-const", HasExtendedConst)
.Default(false);
}
@@ -93,6 +94,8 @@ void WebAssemblyTargetInfo::getTargetDefines(const LangOptions &Opts,
Builder.defineMacro("__wasm_tail_call__");
if (HasReferenceTypes)
Builder.defineMacro("__wasm_reference_types__");
+ if (HasExtendedConst)
+ Builder.defineMacro("__wasm_extended_const__");
}
void WebAssemblyTargetInfo::setSIMDLevel(llvm::StringMap<bool> &Features,
@@ -240,6 +243,14 @@ bool WebAssemblyTargetInfo::handleTargetFeatures(
HasReferenceTypes = false;
continue;
}
+ if (Feature == "+extended-const") {
+ HasExtendedConst = true;
+ continue;
+ }
+ if (Feature == "-extended-const") {
+ HasExtendedConst = false;
+ continue;
+ }
Diags.Report(diag::err_opt_not_valid_with_opt)
<< Feature << "-target-feature";
@@ -255,9 +266,11 @@ ArrayRef<Builtin::Info> WebAssemblyTargetInfo::getTargetBuiltins() const {
void WebAssemblyTargetInfo::adjust(DiagnosticsEngine &Diags,
LangOptions &Opts) {
- // If the Atomics feature isn't available, turn off POSIXThreads and
- // ThreadModel, so that we don't predefine _REENTRANT or __STDCPP_THREADS__.
- if (!HasAtomics) {
+ TargetInfo::adjust(Diags, Opts);
+ // Turn off POSIXThreads and ThreadModel so that we don't predefine _REENTRANT
+ // or __STDCPP_THREADS__ if we will eventually end up stripping atomics
+ // because they are unsupported.
+ if (!HasAtomics || !HasBulkMemory) {
Opts.POSIXThreads = false;
Opts.setThreadModel(LangOptions::ThreadModelKind::Single);
Opts.ThreadsafeStatics = false;
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.h b/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.h
index 075486990558..63418869d10a 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/WebAssembly.h
@@ -39,6 +39,7 @@ class LLVM_LIBRARY_VISIBILITY WebAssemblyTargetInfo : public TargetInfo {
bool HasMultivalue = false;
bool HasTailCall = false;
bool HasReferenceTypes = false;
+ bool HasExtendedConst = false;
std::string ABI;
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/X86.cpp b/contrib/llvm-project/clang/lib/Basic/Targets/X86.cpp
index 5c4bd364b06a..06988830eaed 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/X86.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/X86.cpp
@@ -239,7 +239,6 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
HasAVX512ER = true;
} else if (Feature == "+avx512fp16") {
HasAVX512FP16 = true;
- HasFloat16 = true;
} else if (Feature == "+avx512pf") {
HasAVX512PF = true;
} else if (Feature == "+avx512dq") {
@@ -355,6 +354,8 @@ bool X86TargetInfo::handleTargetFeatures(std::vector<std::string> &Features,
.Default(NoSSE);
SSELevel = std::max(SSELevel, Level);
+ HasFloat16 = SSELevel >= SSE2;
+
MMX3DNowEnum ThreeDNowLevel = llvm::StringSwitch<MMX3DNowEnum>(Feature)
.Case("+3dnowa", AMD3DNowAthlon)
.Case("+3dnow", AMD3DNow)
@@ -1095,22 +1096,22 @@ unsigned X86TargetInfo::multiVersionSortPriority(StringRef Name) const {
bool X86TargetInfo::validateCPUSpecificCPUDispatch(StringRef Name) const {
return llvm::StringSwitch<bool>(Name)
-#define CPU_SPECIFIC(NAME, MANGLING, FEATURES) .Case(NAME, true)
-#define CPU_SPECIFIC_ALIAS(NEW_NAME, NAME) .Case(NEW_NAME, true)
+#define CPU_SPECIFIC(NAME, TUNE_NAME, MANGLING, FEATURES) .Case(NAME, true)
+#define CPU_SPECIFIC_ALIAS(NEW_NAME, TUNE_NAME, NAME) .Case(NEW_NAME, true)
#include "llvm/Support/X86TargetParser.def"
.Default(false);
}
static StringRef CPUSpecificCPUDispatchNameDealias(StringRef Name) {
return llvm::StringSwitch<StringRef>(Name)
-#define CPU_SPECIFIC_ALIAS(NEW_NAME, NAME) .Case(NEW_NAME, NAME)
+#define CPU_SPECIFIC_ALIAS(NEW_NAME, TUNE_NAME, NAME) .Case(NEW_NAME, NAME)
#include "llvm/Support/X86TargetParser.def"
.Default(Name);
}
char X86TargetInfo::CPUSpecificManglingCharacter(StringRef Name) const {
return llvm::StringSwitch<char>(CPUSpecificCPUDispatchNameDealias(Name))
-#define CPU_SPECIFIC(NAME, MANGLING, FEATURES) .Case(NAME, MANGLING)
+#define CPU_SPECIFIC(NAME, TUNE_NAME, MANGLING, FEATURES) .Case(NAME, MANGLING)
#include "llvm/Support/X86TargetParser.def"
.Default(0);
}
@@ -1119,12 +1120,20 @@ void X86TargetInfo::getCPUSpecificCPUDispatchFeatures(
StringRef Name, llvm::SmallVectorImpl<StringRef> &Features) const {
StringRef WholeList =
llvm::StringSwitch<StringRef>(CPUSpecificCPUDispatchNameDealias(Name))
-#define CPU_SPECIFIC(NAME, MANGLING, FEATURES) .Case(NAME, FEATURES)
+#define CPU_SPECIFIC(NAME, TUNE_NAME, MANGLING, FEATURES) .Case(NAME, FEATURES)
#include "llvm/Support/X86TargetParser.def"
.Default("");
WholeList.split(Features, ',', /*MaxSplit=*/-1, /*KeepEmpty=*/false);
}
+StringRef X86TargetInfo::getCPUSpecificTuneName(StringRef Name) const {
+ return llvm::StringSwitch<StringRef>(Name)
+#define CPU_SPECIFIC(NAME, TUNE_NAME, MANGLING, FEATURES) .Case(NAME, TUNE_NAME)
+#define CPU_SPECIFIC_ALIAS(NEW_NAME, TUNE_NAME, NAME) .Case(NEW_NAME, TUNE_NAME)
+#include "llvm/Support/X86TargetParser.def"
+ .Default("");
+}
+
// We can't use a generic validation scheme for the cpus accepted here
// versus subtarget cpus accepted in the target attribute because the
// variables intitialized by the runtime only support the below currently
@@ -1482,8 +1491,8 @@ std::string X86TargetInfo::convertConstraint(const char *&Constraint) const {
return std::string("{si}");
case 'D':
return std::string("{di}");
- case 'p': // address
- return std::string("im");
+ case 'p': // Keep 'p' constraint (address).
+ return std::string("p");
case 't': // top of floating point stack.
return std::string("{st}");
case 'u': // second from top of floating point stack.
diff --git a/contrib/llvm-project/clang/lib/Basic/Targets/X86.h b/contrib/llvm-project/clang/lib/Basic/Targets/X86.h
index b34a16bb5f5b..e582a2932b4f 100644
--- a/contrib/llvm-project/clang/lib/Basic/Targets/X86.h
+++ b/contrib/llvm-project/clang/lib/Basic/Targets/X86.h
@@ -14,6 +14,7 @@
#define LLVM_CLANG_LIB_BASIC_TARGETS_X86_H
#include "OSTargets.h"
+#include "clang/Basic/BitmaskEnum.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/TargetOptions.h"
#include "llvm/ADT/Triple.h"
@@ -168,11 +169,15 @@ public:
return LongDoubleFormat == &llvm::APFloat::IEEEquad() ? "g" : "e";
}
- unsigned getFloatEvalMethod() const override {
+ LangOptions::FPEvalMethodKind getFPEvalMethod() const override {
// X87 evaluates with 80 bits "long double" precision.
- return SSELevel == NoSSE ? 2 : 0;
+ return SSELevel == NoSSE ? LangOptions::FPEvalMethodKind::FEM_Extended
+ : LangOptions::FPEvalMethodKind::FEM_Source;
}
+ // EvalMethod `source` is not supported for targets with `NoSSE` feature.
+ bool supportSourceEvalMethod() const override { return SSELevel > NoSSE; }
+
ArrayRef<const char *> getGCCRegNames() const override;
ArrayRef<TargetInfo::GCCRegAlias> getGCCRegAliases() const override {
@@ -197,6 +202,8 @@ public:
StringRef Name,
llvm::SmallVectorImpl<StringRef> &Features) const override;
+ StringRef getCPUSpecificTuneName(StringRef Name) const override;
+
Optional<unsigned> getCPUCacheLineSize() const override;
bool validateAsmConstraint(const char *&Name,
@@ -416,9 +423,9 @@ public:
RegParmMax = 3;
// Use fpret for all types.
- RealTypeUsesObjCFPRet =
- ((1 << (int)FloatModeKind::Float) | (1 << (int)FloatModeKind::Double) |
- (1 << (int)FloatModeKind::LongDouble));
+ RealTypeUsesObjCFPRetMask =
+ (int)(FloatModeKind::Float | FloatModeKind::Double |
+ FloatModeKind::LongDouble);
// x86-32 has atomics up to 8 bytes
MaxAtomicPromoteWidth = 64;
@@ -475,13 +482,13 @@ public:
NetBSDI386TargetInfo(const llvm::Triple &Triple, const TargetOptions &Opts)
: NetBSDTargetInfo<X86_32TargetInfo>(Triple, Opts) {}
- unsigned getFloatEvalMethod() const override {
+ LangOptions::FPEvalMethodKind getFPEvalMethod() const override {
VersionTuple OsVersion = getTriple().getOSVersion();
// New NetBSD uses the default rounding mode.
if (OsVersion >= VersionTuple(6, 99, 26) || OsVersion.getMajor() == 0)
- return X86_32TargetInfo::getFloatEvalMethod();
+ return X86_32TargetInfo::getFPEvalMethod();
// NetBSD before 6.99.26 defaults to "double" rounding.
- return 1;
+ return LangOptions::FPEvalMethodKind::FEM_Double;
}
};
@@ -697,7 +704,7 @@ public:
"64-i64:64-f80:128-n8:16:32:64-S128");
// Use fpret only for long double.
- RealTypeUsesObjCFPRet = (1 << (int)FloatModeKind::LongDouble);
+ RealTypeUsesObjCFPRetMask = (int)FloatModeKind::LongDouble;
// Use fp2ret for _Complex long double.
ComplexLongDoubleUsesFP2Ret = true;
diff --git a/contrib/llvm-project/clang/lib/Basic/TokenKinds.cpp b/contrib/llvm-project/clang/lib/Basic/TokenKinds.cpp
index d55e176c72c4..c300175ce90b 100644
--- a/contrib/llvm-project/clang/lib/Basic/TokenKinds.cpp
+++ b/contrib/llvm-project/clang/lib/Basic/TokenKinds.cpp
@@ -46,6 +46,15 @@ const char *tok::getKeywordSpelling(TokenKind Kind) {
return nullptr;
}
+const char *tok::getPPKeywordSpelling(tok::PPKeywordKind Kind) {
+ switch (Kind) {
+#define PPKEYWORD(x) case tok::pp_##x: return #x;
+#include "clang/Basic/TokenKinds.def"
+ default: break;
+ }
+ return nullptr;
+}
+
bool tok::isAnnotation(TokenKind Kind) {
switch (Kind) {
#define ANNOTATION(X) case annot_ ## X: return true;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ABIInfo.h b/contrib/llvm-project/clang/lib/CodeGen/ABIInfo.h
index 0d12183055e1..6214148adab9 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/ABIInfo.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/ABIInfo.h
@@ -100,6 +100,7 @@ namespace swiftcall {
virtual bool isHomogeneousAggregateSmallEnough(const Type *Base,
uint64_t Members) const;
+ virtual bool isZeroLengthBitfieldPermittedInHomogeneousAggregate() const;
bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
uint64_t &Members) const;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/Address.h b/contrib/llvm-project/clang/lib/CodeGen/Address.h
index 3ac0f4f0d7e5..bddeac1d6dcb 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/Address.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/Address.h
@@ -87,11 +87,6 @@ public:
"Incorrect pointer element type");
}
- // Deprecated: Use constructor with explicit element type instead.
- Address(llvm::Value *Pointer, CharUnits Alignment)
- : Address(Pointer, Pointer->getType()->getPointerElementType(),
- Alignment) {}
-
static Address invalid() { return Address(nullptr); }
bool isValid() const { return A.getPointer() != nullptr; }
diff --git a/contrib/llvm-project/clang/lib/CodeGen/BackendUtil.cpp b/contrib/llvm-project/clang/lib/CodeGen/BackendUtil.cpp
index a4d330c0ba93..eb40e446057f 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/BackendUtil.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/BackendUtil.cpp
@@ -39,6 +39,7 @@
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/SubtargetFeature.h"
#include "llvm/MC/TargetRegistry.h"
+#include "llvm/Object/OffloadBinary.h"
#include "llvm/Passes/PassBuilder.h"
#include "llvm/Passes/PassPlugin.h"
#include "llvm/Passes/StandardInstrumentations.h"
@@ -52,7 +53,6 @@
#include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h"
-#include "llvm/Transforms/Coroutines.h"
#include "llvm/Transforms/Coroutines/CoroCleanup.h"
#include "llvm/Transforms/Coroutines/CoroEarly.h"
#include "llvm/Transforms/Coroutines/CoroElide.h"
@@ -60,7 +60,6 @@
#include "llvm/Transforms/IPO.h"
#include "llvm/Transforms/IPO/AlwaysInliner.h"
#include "llvm/Transforms/IPO/LowerTypeTests.h"
-#include "llvm/Transforms/IPO/PassManagerBuilder.h"
#include "llvm/Transforms/IPO/ThinLTOBitcodeWriter.h"
#include "llvm/Transforms/InstCombine/InstCombine.h"
#include "llvm/Transforms/Instrumentation.h"
@@ -118,6 +117,8 @@ class EmitAssemblyHelper {
std::unique_ptr<raw_pwrite_stream> OS;
+ Triple TargetTriple;
+
TargetIRAnalysis getTargetIRAnalysis() const {
if (TM)
return TM->getTargetIRAnalysis();
@@ -125,8 +126,6 @@ class EmitAssemblyHelper {
return TargetIRAnalysis();
}
- void CreatePasses(legacy::PassManager &MPM, legacy::FunctionPassManager &FPM);
-
/// Generates the TargetMachine.
/// Leaves TM unchanged if it is unable to create the target machine.
/// Some of our clang tests specify triples which are not built
@@ -162,6 +161,16 @@ class EmitAssemblyHelper {
std::unique_ptr<raw_pwrite_stream> &OS,
std::unique_ptr<llvm::ToolOutputFile> &DwoOS);
+ /// Check whether we should emit a module summary for regular LTO.
+ /// The module summary should be emitted by default for regular LTO
+ /// except for ld64 targets.
+ ///
+ /// \return True if the module summary should be emitted.
+ bool shouldEmitRegularLTOSummary() const {
+ return CodeGenOpts.PrepareForLTO && !CodeGenOpts.DisableLLVMPasses &&
+ TargetTriple.getVendor() != llvm::Triple::Apple;
+ }
+
public:
EmitAssemblyHelper(DiagnosticsEngine &_Diags,
const HeaderSearchOptions &HeaderSearchOpts,
@@ -170,7 +179,8 @@ public:
const LangOptions &LOpts, Module *M)
: Diags(_Diags), HSOpts(HeaderSearchOpts), CodeGenOpts(CGOpts),
TargetOpts(TOpts), LangOpts(LOpts), TheModule(M),
- CodeGenerationTime("codegen", "Code Generation Time") {}
+ CodeGenerationTime("codegen", "Code Generation Time"),
+ TargetTriple(TheModule->getTargetTriple()) {}
~EmitAssemblyHelper() {
if (CodeGenOpts.DisableFree)
@@ -179,60 +189,10 @@ public:
std::unique_ptr<TargetMachine> TM;
- // Emit output using the legacy pass manager for the optimization pipeline.
- // This will be removed soon when using the legacy pass manager for the
- // optimization pipeline is no longer supported.
- void EmitAssemblyWithLegacyPassManager(BackendAction Action,
- std::unique_ptr<raw_pwrite_stream> OS);
-
- // Emit output using the new pass manager for the optimization pipeline. This
- // is the default.
+ // Emit output using the new pass manager for the optimization pipeline.
void EmitAssembly(BackendAction Action,
std::unique_ptr<raw_pwrite_stream> OS);
};
-
-// We need this wrapper to access LangOpts and CGOpts from extension functions
-// that we add to the PassManagerBuilder.
-class PassManagerBuilderWrapper : public PassManagerBuilder {
-public:
- PassManagerBuilderWrapper(const Triple &TargetTriple,
- const CodeGenOptions &CGOpts,
- const LangOptions &LangOpts)
- : TargetTriple(TargetTriple), CGOpts(CGOpts), LangOpts(LangOpts) {}
- const Triple &getTargetTriple() const { return TargetTriple; }
- const CodeGenOptions &getCGOpts() const { return CGOpts; }
- const LangOptions &getLangOpts() const { return LangOpts; }
-
-private:
- const Triple &TargetTriple;
- const CodeGenOptions &CGOpts;
- const LangOptions &LangOpts;
-};
-}
-
-static void addObjCARCAPElimPass(const PassManagerBuilder &Builder, PassManagerBase &PM) {
- if (Builder.OptLevel > 0)
- PM.add(createObjCARCAPElimPass());
-}
-
-static void addObjCARCExpandPass(const PassManagerBuilder &Builder, PassManagerBase &PM) {
- if (Builder.OptLevel > 0)
- PM.add(createObjCARCExpandPass());
-}
-
-static void addObjCARCOptPass(const PassManagerBuilder &Builder, PassManagerBase &PM) {
- if (Builder.OptLevel > 0)
- PM.add(createObjCARCOptPass());
-}
-
-static void addAddDiscriminatorsPass(const PassManagerBuilder &Builder,
- legacy::PassManagerBase &PM) {
- PM.add(createAddDiscriminatorsPass());
-}
-
-static void addBoundsCheckingPass(const PassManagerBuilder &Builder,
- legacy::PassManagerBase &PM) {
- PM.add(createBoundsCheckingLegacyPass());
}
static SanitizerCoverageOptions
@@ -258,17 +218,6 @@ getSancovOptsFromCGOpts(const CodeGenOptions &CGOpts) {
return Opts;
}
-static void addSanitizerCoveragePass(const PassManagerBuilder &Builder,
- legacy::PassManagerBase &PM) {
- const PassManagerBuilderWrapper &BuilderWrapper =
- static_cast<const PassManagerBuilderWrapper &>(Builder);
- const CodeGenOptions &CGOpts = BuilderWrapper.getCGOpts();
- auto Opts = getSancovOptsFromCGOpts(CGOpts);
- PM.add(createModuleSanitizerCoverageLegacyPassPass(
- Opts, CGOpts.SanitizeCoverageAllowlistFiles,
- CGOpts.SanitizeCoverageIgnorelistFiles));
-}
-
// Check if ASan should use GC-friendly instrumentation for globals.
// First of all, there is no point if -fdata-sections is off (expect for MachO,
// where this is not a factor). Also, on ELF this feature requires an assembler
@@ -281,134 +230,20 @@ static bool asanUseGlobalsGC(const Triple &T, const CodeGenOptions &CGOpts) {
case Triple::COFF:
return true;
case Triple::ELF:
- return CGOpts.DataSections && !CGOpts.DisableIntegratedAS;
+ return !CGOpts.DisableIntegratedAS;
case Triple::GOFF:
llvm::report_fatal_error("ASan not implemented for GOFF");
case Triple::XCOFF:
llvm::report_fatal_error("ASan not implemented for XCOFF.");
case Triple::Wasm:
+ case Triple::DXContainer:
+ case Triple::SPIRV:
case Triple::UnknownObjectFormat:
break;
}
return false;
}
-static void addMemProfilerPasses(const PassManagerBuilder &Builder,
- legacy::PassManagerBase &PM) {
- PM.add(createMemProfilerFunctionPass());
- PM.add(createModuleMemProfilerLegacyPassPass());
-}
-
-static void addAddressSanitizerPasses(const PassManagerBuilder &Builder,
- legacy::PassManagerBase &PM) {
- const PassManagerBuilderWrapper &BuilderWrapper =
- static_cast<const PassManagerBuilderWrapper&>(Builder);
- const Triple &T = BuilderWrapper.getTargetTriple();
- const CodeGenOptions &CGOpts = BuilderWrapper.getCGOpts();
- bool Recover = CGOpts.SanitizeRecover.has(SanitizerKind::Address);
- bool UseAfterScope = CGOpts.SanitizeAddressUseAfterScope;
- bool UseOdrIndicator = CGOpts.SanitizeAddressUseOdrIndicator;
- bool UseGlobalsGC = asanUseGlobalsGC(T, CGOpts);
- llvm::AsanDtorKind DestructorKind = CGOpts.getSanitizeAddressDtor();
- llvm::AsanDetectStackUseAfterReturnMode UseAfterReturn =
- CGOpts.getSanitizeAddressUseAfterReturn();
- PM.add(createAddressSanitizerFunctionPass(/*CompileKernel*/ false, Recover,
- UseAfterScope, UseAfterReturn));
- PM.add(createModuleAddressSanitizerLegacyPassPass(
- /*CompileKernel*/ false, Recover, UseGlobalsGC, UseOdrIndicator,
- DestructorKind));
-}
-
-static void addKernelAddressSanitizerPasses(const PassManagerBuilder &Builder,
- legacy::PassManagerBase &PM) {
- PM.add(createAddressSanitizerFunctionPass(
- /*CompileKernel*/ true, /*Recover*/ true, /*UseAfterScope*/ false,
- /*UseAfterReturn*/ llvm::AsanDetectStackUseAfterReturnMode::Never));
- PM.add(createModuleAddressSanitizerLegacyPassPass(
- /*CompileKernel*/ true, /*Recover*/ true, /*UseGlobalsGC*/ true,
- /*UseOdrIndicator*/ false));
-}
-
-static void addHWAddressSanitizerPasses(const PassManagerBuilder &Builder,
- legacy::PassManagerBase &PM) {
- const PassManagerBuilderWrapper &BuilderWrapper =
- static_cast<const PassManagerBuilderWrapper &>(Builder);
- const CodeGenOptions &CGOpts = BuilderWrapper.getCGOpts();
- bool Recover = CGOpts.SanitizeRecover.has(SanitizerKind::HWAddress);
- PM.add(createHWAddressSanitizerLegacyPassPass(
- /*CompileKernel*/ false, Recover,
- /*DisableOptimization*/ CGOpts.OptimizationLevel == 0));
-}
-
-static void addKernelHWAddressSanitizerPasses(const PassManagerBuilder &Builder,
- legacy::PassManagerBase &PM) {
- const PassManagerBuilderWrapper &BuilderWrapper =
- static_cast<const PassManagerBuilderWrapper &>(Builder);
- const CodeGenOptions &CGOpts = BuilderWrapper.getCGOpts();
- PM.add(createHWAddressSanitizerLegacyPassPass(
- /*CompileKernel*/ true, /*Recover*/ true,
- /*DisableOptimization*/ CGOpts.OptimizationLevel == 0));
-}
-
-static void addGeneralOptsForMemorySanitizer(const PassManagerBuilder &Builder,
- legacy::PassManagerBase &PM,
- bool CompileKernel) {
- const PassManagerBuilderWrapper &BuilderWrapper =
- static_cast<const PassManagerBuilderWrapper&>(Builder);
- const CodeGenOptions &CGOpts = BuilderWrapper.getCGOpts();
- int TrackOrigins = CGOpts.SanitizeMemoryTrackOrigins;
- bool Recover = CGOpts.SanitizeRecover.has(SanitizerKind::Memory);
- PM.add(createMemorySanitizerLegacyPassPass(
- MemorySanitizerOptions{TrackOrigins, Recover, CompileKernel,
- CGOpts.SanitizeMemoryParamRetval != 0}));
-
- // MemorySanitizer inserts complex instrumentation that mostly follows
- // the logic of the original code, but operates on "shadow" values.
- // It can benefit from re-running some general purpose optimization passes.
- if (Builder.OptLevel > 0) {
- PM.add(createEarlyCSEPass());
- PM.add(createReassociatePass());
- PM.add(createLICMPass());
- PM.add(createGVNPass());
- PM.add(createInstructionCombiningPass());
- PM.add(createDeadStoreEliminationPass());
- }
-}
-
-static void addMemorySanitizerPass(const PassManagerBuilder &Builder,
- legacy::PassManagerBase &PM) {
- addGeneralOptsForMemorySanitizer(Builder, PM, /*CompileKernel*/ false);
-}
-
-static void addKernelMemorySanitizerPass(const PassManagerBuilder &Builder,
- legacy::PassManagerBase &PM) {
- addGeneralOptsForMemorySanitizer(Builder, PM, /*CompileKernel*/ true);
-}
-
-static void addThreadSanitizerPass(const PassManagerBuilder &Builder,
- legacy::PassManagerBase &PM) {
- PM.add(createThreadSanitizerLegacyPassPass());
-}
-
-static void addDataFlowSanitizerPass(const PassManagerBuilder &Builder,
- legacy::PassManagerBase &PM) {
- const PassManagerBuilderWrapper &BuilderWrapper =
- static_cast<const PassManagerBuilderWrapper&>(Builder);
- const LangOptions &LangOpts = BuilderWrapper.getLangOpts();
- PM.add(createDataFlowSanitizerLegacyPassPass(LangOpts.NoSanitizeFiles));
-}
-
-static void addEntryExitInstrumentationPass(const PassManagerBuilder &Builder,
- legacy::PassManagerBase &PM) {
- PM.add(createEntryExitInstrumenterPass());
-}
-
-static void
-addPostInlineEntryExitInstrumentationPass(const PassManagerBuilder &Builder,
- legacy::PassManagerBase &PM) {
- PM.add(createPostInlineEntryExitInstrumenterPass());
-}
-
static TargetLibraryInfoImpl *createTLII(llvm::Triple &TargetTriple,
const CodeGenOptions &CodeGenOpts) {
TargetLibraryInfoImpl *TLII = new TargetLibraryInfoImpl(TargetTriple);
@@ -443,17 +278,6 @@ static TargetLibraryInfoImpl *createTLII(llvm::Triple &TargetTriple,
return TLII;
}
-static void addSymbolRewriterPass(const CodeGenOptions &Opts,
- legacy::PassManager *MPM) {
- llvm::SymbolRewriter::RewriteDescriptorList DL;
-
- llvm::SymbolRewriter::RewriteMapParser MapParser;
- for (const auto &MapFile : Opts.RewriteMapFiles)
- MapParser.parse(MapFile, &DL);
-
- MPM->add(createRewriteSymbolsPass(DL));
-}
-
static CodeGenOpt::Level getCGOptLevel(const CodeGenOptions &CodeGenOpts) {
switch (CodeGenOpts.OptimizationLevel) {
default:
@@ -546,6 +370,8 @@ static bool initTargetOptions(DiagnosticsEngine &Diags,
Options.BinutilsVersion =
llvm::TargetMachine::parseBinutilsVersion(CodeGenOpts.BinutilsVersion);
Options.UseInitArray = CodeGenOpts.UseInitArray;
+ Options.LowerGlobalDtorsViaCxaAtExit =
+ CodeGenOpts.RegisterGlobalDtorsWithAtExit;
Options.DisableIntegratedAS = CodeGenOpts.DisableIntegratedAS;
Options.CompressDebugSections = CodeGenOpts.getCompressDebugSections();
Options.RelaxELFRelocations = CodeGenOpts.RelaxELFRelocations;
@@ -606,6 +432,10 @@ static bool initTargetOptions(DiagnosticsEngine &Diags,
Options.EnableAIXExtendedAltivecABI = CodeGenOpts.EnableAIXExtendedAltivecABI;
Options.XRayOmitFunctionIndex = CodeGenOpts.XRayOmitFunctionIndex;
Options.LoopAlignment = CodeGenOpts.LoopAlignment;
+ Options.DebugStrictDwarf = CodeGenOpts.DebugStrictDwarf;
+ Options.ObjectFilenameForDebug = CodeGenOpts.ObjectFilenameForDebug;
+ Options.Hotpatch = CodeGenOpts.HotPatch;
+ Options.JMCInstrument = CodeGenOpts.JMCInstrument;
switch (CodeGenOpts.getSwiftAsyncFramePointer()) {
case CodeGenOptions::SwiftAsyncFramePointerKind::Auto:
@@ -623,9 +453,13 @@ static bool initTargetOptions(DiagnosticsEngine &Diags,
}
Options.MCOptions.SplitDwarfFile = CodeGenOpts.SplitDwarfFile;
+ Options.MCOptions.EmitDwarfUnwind = CodeGenOpts.getEmitDwarfUnwind();
Options.MCOptions.MCRelaxAll = CodeGenOpts.RelaxAll;
Options.MCOptions.MCSaveTempLabels = CodeGenOpts.SaveTempLabels;
- Options.MCOptions.MCUseDwarfDirectory = !CodeGenOpts.NoDwarfDirectoryAsm;
+ Options.MCOptions.MCUseDwarfDirectory =
+ CodeGenOpts.NoDwarfDirectoryAsm
+ ? llvm::MCTargetOptions::DisableDwarfDirectory
+ : llvm::MCTargetOptions::EnableDwarfDirectory;
Options.MCOptions.MCNoExecStack = CodeGenOpts.NoExecStack;
Options.MCOptions.MCIncrementalLinkerCompatible =
CodeGenOpts.IncrementalLinkerCompatible;
@@ -644,9 +478,7 @@ static bool initTargetOptions(DiagnosticsEngine &Diags,
Entry.IgnoreSysRoot ? Entry.Path : HSOpts.Sysroot + Entry.Path);
Options.MCOptions.Argv0 = CodeGenOpts.Argv0;
Options.MCOptions.CommandLineArgs = CodeGenOpts.CommandLineArgs;
- Options.DebugStrictDwarf = CodeGenOpts.DebugStrictDwarf;
- Options.ObjectFilenameForDebug = CodeGenOpts.ObjectFilenameForDebug;
- Options.Hotpatch = CodeGenOpts.HotPatch;
+ Options.MisExpect = CodeGenOpts.MisExpect;
return true;
}
@@ -680,233 +512,6 @@ getInstrProfOptions(const CodeGenOptions &CodeGenOpts,
return Options;
}
-void EmitAssemblyHelper::CreatePasses(legacy::PassManager &MPM,
- legacy::FunctionPassManager &FPM) {
- // Handle disabling of all LLVM passes, where we want to preserve the
- // internal module before any optimization.
- if (CodeGenOpts.DisableLLVMPasses)
- return;
-
- // Figure out TargetLibraryInfo. This needs to be added to MPM and FPM
- // manually (and not via PMBuilder), since some passes (eg. InstrProfiling)
- // are inserted before PMBuilder ones - they'd get the default-constructed
- // TLI with an unknown target otherwise.
- Triple TargetTriple(TheModule->getTargetTriple());
- std::unique_ptr<TargetLibraryInfoImpl> TLII(
- createTLII(TargetTriple, CodeGenOpts));
-
- // If we reached here with a non-empty index file name, then the index file
- // was empty and we are not performing ThinLTO backend compilation (used in
- // testing in a distributed build environment). Drop any the type test
- // assume sequences inserted for whole program vtables so that codegen doesn't
- // complain.
- if (!CodeGenOpts.ThinLTOIndexFile.empty())
- MPM.add(createLowerTypeTestsPass(/*ExportSummary=*/nullptr,
- /*ImportSummary=*/nullptr,
- /*DropTypeTests=*/true));
-
- PassManagerBuilderWrapper PMBuilder(TargetTriple, CodeGenOpts, LangOpts);
-
- // At O0 and O1 we only run the always inliner which is more efficient. At
- // higher optimization levels we run the normal inliner.
- if (CodeGenOpts.OptimizationLevel <= 1) {
- bool InsertLifetimeIntrinsics = ((CodeGenOpts.OptimizationLevel != 0 &&
- !CodeGenOpts.DisableLifetimeMarkers) ||
- LangOpts.Coroutines);
- PMBuilder.Inliner = createAlwaysInlinerLegacyPass(InsertLifetimeIntrinsics);
- } else {
- // We do not want to inline hot callsites for SamplePGO module-summary build
- // because profile annotation will happen again in ThinLTO backend, and we
- // want the IR of the hot path to match the profile.
- PMBuilder.Inliner = createFunctionInliningPass(
- CodeGenOpts.OptimizationLevel, CodeGenOpts.OptimizeSize,
- (!CodeGenOpts.SampleProfileFile.empty() &&
- CodeGenOpts.PrepareForThinLTO));
- }
-
- PMBuilder.OptLevel = CodeGenOpts.OptimizationLevel;
- PMBuilder.SizeLevel = CodeGenOpts.OptimizeSize;
- PMBuilder.SLPVectorize = CodeGenOpts.VectorizeSLP;
- PMBuilder.LoopVectorize = CodeGenOpts.VectorizeLoop;
- // Only enable CGProfilePass when using integrated assembler, since
- // non-integrated assemblers don't recognize .cgprofile section.
- PMBuilder.CallGraphProfile = !CodeGenOpts.DisableIntegratedAS;
-
- PMBuilder.DisableUnrollLoops = !CodeGenOpts.UnrollLoops;
- // Loop interleaving in the loop vectorizer has historically been set to be
- // enabled when loop unrolling is enabled.
- PMBuilder.LoopsInterleaved = CodeGenOpts.UnrollLoops;
- PMBuilder.MergeFunctions = CodeGenOpts.MergeFunctions;
- PMBuilder.PrepareForThinLTO = CodeGenOpts.PrepareForThinLTO;
- PMBuilder.PrepareForLTO = CodeGenOpts.PrepareForLTO;
- PMBuilder.RerollLoops = CodeGenOpts.RerollLoops;
-
- MPM.add(new TargetLibraryInfoWrapperPass(*TLII));
-
- if (TM)
- TM->adjustPassManager(PMBuilder);
-
- if (CodeGenOpts.DebugInfoForProfiling ||
- !CodeGenOpts.SampleProfileFile.empty())
- PMBuilder.addExtension(PassManagerBuilder::EP_EarlyAsPossible,
- addAddDiscriminatorsPass);
-
- // In ObjC ARC mode, add the main ARC optimization passes.
- if (LangOpts.ObjCAutoRefCount) {
- PMBuilder.addExtension(PassManagerBuilder::EP_EarlyAsPossible,
- addObjCARCExpandPass);
- PMBuilder.addExtension(PassManagerBuilder::EP_ModuleOptimizerEarly,
- addObjCARCAPElimPass);
- PMBuilder.addExtension(PassManagerBuilder::EP_ScalarOptimizerLate,
- addObjCARCOptPass);
- }
-
- if (LangOpts.Coroutines)
- addCoroutinePassesToExtensionPoints(PMBuilder);
-
- if (!CodeGenOpts.MemoryProfileOutput.empty()) {
- PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
- addMemProfilerPasses);
- PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
- addMemProfilerPasses);
- }
-
- if (LangOpts.Sanitize.has(SanitizerKind::LocalBounds)) {
- PMBuilder.addExtension(PassManagerBuilder::EP_ScalarOptimizerLate,
- addBoundsCheckingPass);
- PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
- addBoundsCheckingPass);
- }
-
- if (CodeGenOpts.hasSanitizeCoverage()) {
- PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
- addSanitizerCoveragePass);
- PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
- addSanitizerCoveragePass);
- }
-
- if (LangOpts.Sanitize.has(SanitizerKind::Address)) {
- PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
- addAddressSanitizerPasses);
- PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
- addAddressSanitizerPasses);
- }
-
- if (LangOpts.Sanitize.has(SanitizerKind::KernelAddress)) {
- PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
- addKernelAddressSanitizerPasses);
- PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
- addKernelAddressSanitizerPasses);
- }
-
- if (LangOpts.Sanitize.has(SanitizerKind::HWAddress)) {
- PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
- addHWAddressSanitizerPasses);
- PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
- addHWAddressSanitizerPasses);
- }
-
- if (LangOpts.Sanitize.has(SanitizerKind::KernelHWAddress)) {
- PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
- addKernelHWAddressSanitizerPasses);
- PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
- addKernelHWAddressSanitizerPasses);
- }
-
- if (LangOpts.Sanitize.has(SanitizerKind::Memory)) {
- PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
- addMemorySanitizerPass);
- PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
- addMemorySanitizerPass);
- }
-
- if (LangOpts.Sanitize.has(SanitizerKind::KernelMemory)) {
- PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
- addKernelMemorySanitizerPass);
- PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
- addKernelMemorySanitizerPass);
- }
-
- if (LangOpts.Sanitize.has(SanitizerKind::Thread)) {
- PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
- addThreadSanitizerPass);
- PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
- addThreadSanitizerPass);
- }
-
- if (LangOpts.Sanitize.has(SanitizerKind::DataFlow)) {
- PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
- addDataFlowSanitizerPass);
- PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
- addDataFlowSanitizerPass);
- }
-
- if (CodeGenOpts.InstrumentFunctions ||
- CodeGenOpts.InstrumentFunctionEntryBare ||
- CodeGenOpts.InstrumentFunctionsAfterInlining ||
- CodeGenOpts.InstrumentForProfiling) {
- PMBuilder.addExtension(PassManagerBuilder::EP_EarlyAsPossible,
- addEntryExitInstrumentationPass);
- PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
- addEntryExitInstrumentationPass);
- PMBuilder.addExtension(PassManagerBuilder::EP_OptimizerLast,
- addPostInlineEntryExitInstrumentationPass);
- PMBuilder.addExtension(PassManagerBuilder::EP_EnabledOnOptLevel0,
- addPostInlineEntryExitInstrumentationPass);
- }
-
- // Set up the per-function pass manager.
- FPM.add(new TargetLibraryInfoWrapperPass(*TLII));
- if (CodeGenOpts.VerifyModule)
- FPM.add(createVerifierPass());
-
- // Set up the per-module pass manager.
- if (!CodeGenOpts.RewriteMapFiles.empty())
- addSymbolRewriterPass(CodeGenOpts, &MPM);
-
- if (Optional<GCOVOptions> Options = getGCOVOptions(CodeGenOpts, LangOpts)) {
- MPM.add(createGCOVProfilerPass(*Options));
- if (CodeGenOpts.getDebugInfo() == codegenoptions::NoDebugInfo)
- MPM.add(createStripSymbolsPass(true));
- }
-
- if (Optional<InstrProfOptions> Options =
- getInstrProfOptions(CodeGenOpts, LangOpts))
- MPM.add(createInstrProfilingLegacyPass(*Options, false));
-
- bool hasIRInstr = false;
- if (CodeGenOpts.hasProfileIRInstr()) {
- PMBuilder.EnablePGOInstrGen = true;
- hasIRInstr = true;
- }
- if (CodeGenOpts.hasProfileCSIRInstr()) {
- assert(!CodeGenOpts.hasProfileCSIRUse() &&
- "Cannot have both CSProfileUse pass and CSProfileGen pass at the "
- "same time");
- assert(!hasIRInstr &&
- "Cannot have both ProfileGen pass and CSProfileGen pass at the "
- "same time");
- PMBuilder.EnablePGOCSInstrGen = true;
- hasIRInstr = true;
- }
- if (hasIRInstr) {
- if (!CodeGenOpts.InstrProfileOutput.empty())
- PMBuilder.PGOInstrGen = CodeGenOpts.InstrProfileOutput;
- else
- PMBuilder.PGOInstrGen = getDefaultProfileGenName();
- }
- if (CodeGenOpts.hasProfileIRUse()) {
- PMBuilder.PGOInstrUse = CodeGenOpts.ProfileInstrumentUsePath;
- PMBuilder.EnablePGOCSInstrUse = CodeGenOpts.hasProfileCSIRUse();
- }
-
- if (!CodeGenOpts.SampleProfileFile.empty())
- PMBuilder.PGOSampleUse = CodeGenOpts.SampleProfileFile;
-
- PMBuilder.populateFunctionPassManager(FPM);
- PMBuilder.populateModulePassManager(MPM);
-}
-
static void setCommandLineOpts(const CodeGenOptions &CodeGenOpts) {
SmallVector<const char *, 16> BackendArgs;
BackendArgs.push_back("clang"); // Fake program name.
@@ -961,7 +566,6 @@ bool EmitAssemblyHelper::AddEmitPasses(legacy::PassManager &CodeGenPasses,
raw_pwrite_stream &OS,
raw_pwrite_stream *DwoOS) {
// Add LibraryInfo.
- llvm::Triple TargetTriple(TheModule->getTargetTriple());
std::unique_ptr<TargetLibraryInfoImpl> TLII(
createTLII(TargetTriple, CodeGenOpts));
CodeGenPasses.add(new TargetLibraryInfoWrapperPass(*TLII));
@@ -985,139 +589,6 @@ bool EmitAssemblyHelper::AddEmitPasses(legacy::PassManager &CodeGenPasses,
return true;
}
-void EmitAssemblyHelper::EmitAssemblyWithLegacyPassManager(
- BackendAction Action, std::unique_ptr<raw_pwrite_stream> OS) {
- TimeRegion Region(CodeGenOpts.TimePasses ? &CodeGenerationTime : nullptr);
-
- setCommandLineOpts(CodeGenOpts);
-
- bool UsesCodeGen = actionRequiresCodeGen(Action);
- CreateTargetMachine(UsesCodeGen);
-
- if (UsesCodeGen && !TM)
- return;
- if (TM)
- TheModule->setDataLayout(TM->createDataLayout());
-
- DebugifyCustomPassManager PerModulePasses;
- DebugInfoPerPassMap DIPreservationMap;
- if (CodeGenOpts.EnableDIPreservationVerify) {
- PerModulePasses.setDebugifyMode(DebugifyMode::OriginalDebugInfo);
- PerModulePasses.setDIPreservationMap(DIPreservationMap);
-
- if (!CodeGenOpts.DIBugsReportFilePath.empty())
- PerModulePasses.setOrigDIVerifyBugsReportFilePath(
- CodeGenOpts.DIBugsReportFilePath);
- }
- PerModulePasses.add(
- createTargetTransformInfoWrapperPass(getTargetIRAnalysis()));
-
- legacy::FunctionPassManager PerFunctionPasses(TheModule);
- PerFunctionPasses.add(
- createTargetTransformInfoWrapperPass(getTargetIRAnalysis()));
-
- CreatePasses(PerModulePasses, PerFunctionPasses);
-
- // Add a verifier pass if requested. We don't have to do this if the action
- // requires code generation because there will already be a verifier pass in
- // the code-generation pipeline.
- if (!UsesCodeGen && CodeGenOpts.VerifyModule)
- PerModulePasses.add(createVerifierPass());
-
- legacy::PassManager CodeGenPasses;
- CodeGenPasses.add(
- createTargetTransformInfoWrapperPass(getTargetIRAnalysis()));
-
- std::unique_ptr<llvm::ToolOutputFile> ThinLinkOS, DwoOS;
-
- switch (Action) {
- case Backend_EmitNothing:
- break;
-
- case Backend_EmitBC:
- if (CodeGenOpts.PrepareForThinLTO && !CodeGenOpts.DisableLLVMPasses) {
- if (!CodeGenOpts.ThinLinkBitcodeFile.empty()) {
- ThinLinkOS = openOutputFile(CodeGenOpts.ThinLinkBitcodeFile);
- if (!ThinLinkOS)
- return;
- }
- if (!TheModule->getModuleFlag("EnableSplitLTOUnit"))
- TheModule->addModuleFlag(Module::Error, "EnableSplitLTOUnit",
- CodeGenOpts.EnableSplitLTOUnit);
- PerModulePasses.add(createWriteThinLTOBitcodePass(
- *OS, ThinLinkOS ? &ThinLinkOS->os() : nullptr));
- } else {
- // Emit a module summary by default for Regular LTO except for ld64
- // targets
- bool EmitLTOSummary =
- (CodeGenOpts.PrepareForLTO &&
- !CodeGenOpts.DisableLLVMPasses &&
- llvm::Triple(TheModule->getTargetTriple()).getVendor() !=
- llvm::Triple::Apple);
- if (EmitLTOSummary) {
- if (!TheModule->getModuleFlag("ThinLTO"))
- TheModule->addModuleFlag(Module::Error, "ThinLTO", uint32_t(0));
- if (!TheModule->getModuleFlag("EnableSplitLTOUnit"))
- TheModule->addModuleFlag(Module::Error, "EnableSplitLTOUnit",
- uint32_t(1));
- }
-
- PerModulePasses.add(createBitcodeWriterPass(
- *OS, CodeGenOpts.EmitLLVMUseLists, EmitLTOSummary));
- }
- break;
-
- case Backend_EmitLL:
- PerModulePasses.add(
- createPrintModulePass(*OS, "", CodeGenOpts.EmitLLVMUseLists));
- break;
-
- default:
- if (!CodeGenOpts.SplitDwarfOutput.empty()) {
- DwoOS = openOutputFile(CodeGenOpts.SplitDwarfOutput);
- if (!DwoOS)
- return;
- }
- if (!AddEmitPasses(CodeGenPasses, Action, *OS,
- DwoOS ? &DwoOS->os() : nullptr))
- return;
- }
-
- // Before executing passes, print the final values of the LLVM options.
- cl::PrintOptionValues();
-
- // Run passes. For now we do all passes at once, but eventually we
- // would like to have the option of streaming code generation.
-
- {
- PrettyStackTraceString CrashInfo("Per-function optimization");
- llvm::TimeTraceScope TimeScope("PerFunctionPasses");
-
- PerFunctionPasses.doInitialization();
- for (Function &F : *TheModule)
- if (!F.isDeclaration())
- PerFunctionPasses.run(F);
- PerFunctionPasses.doFinalization();
- }
-
- {
- PrettyStackTraceString CrashInfo("Per-module optimization passes");
- llvm::TimeTraceScope TimeScope("PerModulePasses");
- PerModulePasses.run(*TheModule);
- }
-
- {
- PrettyStackTraceString CrashInfo("Code generation");
- llvm::TimeTraceScope TimeScope("CodeGenPasses");
- CodeGenPasses.run(*TheModule);
- }
-
- if (ThinLinkOS)
- ThinLinkOS->keep();
- if (DwoOS)
- DwoOS->keep();
-}
-
static OptimizationLevel mapToLevel(const CodeGenOptions &Opts) {
switch (Opts.OptimizationLevel) {
default:
@@ -1205,7 +676,6 @@ static void addSanitizers(const Triple &TargetTriple,
Opts.Recover = CodeGenOpts.SanitizeRecover.has(Mask);
Opts.UseAfterScope = CodeGenOpts.SanitizeAddressUseAfterScope;
Opts.UseAfterReturn = CodeGenOpts.getSanitizeAddressUseAfterReturn();
- MPM.addPass(RequireAnalysisPass<ASanGlobalsMetadataAnalysis, Module>());
MPM.addPass(ModuleAddressSanitizerPass(
Opts, UseGlobalGC, UseOdrIndicator, DestructorKind));
}
@@ -1270,7 +740,7 @@ void EmitAssemblyHelper::RunOptimizationPipeline(
assert(!CodeGenOpts.hasProfileCSIRUse() &&
"Cannot have both CSProfileUse pass and CSProfileGen pass at "
"the same time");
- if (PGOOpt.hasValue()) {
+ if (PGOOpt) {
assert(PGOOpt->Action != PGOOptions::IRInstr &&
PGOOpt->Action != PGOOptions::SampleUse &&
"Cannot run CSProfileGen pass with ProfileGen or SampleUse "
@@ -1334,7 +804,6 @@ void EmitAssemblyHelper::RunOptimizationPipeline(
// Register the target library analysis directly and give it a customized
// preset TLI.
- Triple TargetTriple(TheModule->getTargetTriple());
std::unique_ptr<TargetLibraryInfoImpl> TLII(
createTLII(TargetTriple, CodeGenOpts));
FAM.registerPass([&] { return TargetLibraryAnalysis(*TLII); });
@@ -1468,10 +937,7 @@ void EmitAssemblyHelper::RunOptimizationPipeline(
} else {
// Emit a module summary by default for Regular LTO except for ld64
// targets
- bool EmitLTOSummary =
- (CodeGenOpts.PrepareForLTO && !CodeGenOpts.DisableLLVMPasses &&
- llvm::Triple(TheModule->getTargetTriple()).getVendor() !=
- llvm::Triple::Apple);
+ bool EmitLTOSummary = shouldEmitRegularLTOSummary();
if (EmitLTOSummary) {
if (!TheModule->getModuleFlag("ThinLTO"))
TheModule->addModuleFlag(Module::Error, "ThinLTO", uint32_t(0));
@@ -1536,14 +1002,6 @@ void EmitAssemblyHelper::RunCodegenPipeline(
}
}
-/// A clean version of `EmitAssembly` that uses the new pass manager.
-///
-/// Not all features are currently supported in this system, but where
-/// necessary it falls back to the legacy pass manager to at least provide
-/// basic functionality.
-///
-/// This API is planned to have its functionality finished and then to replace
-/// `EmitAssembly` at some point in the future when the default switches.
void EmitAssemblyHelper::EmitAssembly(BackendAction Action,
std::unique_ptr<raw_pwrite_stream> OS) {
TimeRegion Region(CodeGenOpts.TimePasses ? &CodeGenerationTime : nullptr);
@@ -1631,7 +1089,6 @@ static void runThinLTOBackend(
}
Conf.ProfileRemapping = std::move(ProfileRemapping);
- Conf.UseNewPM = !CGOpts.LegacyPassManager;
Conf.DebugPassManager = CGOpts.DebugPassManager;
Conf.RemarksWithHotness = CGOpts.DiagnosticsWithHotness;
Conf.RemarksFilename = CGOpts.OptRecordFile;
@@ -1721,11 +1178,7 @@ void clang::EmitBackendOutput(DiagnosticsEngine &Diags,
}
EmitAssemblyHelper AsmHelper(Diags, HeaderOpts, CGOpts, TOpts, LOpts, M);
-
- if (CGOpts.LegacyPassManager)
- AsmHelper.EmitAssemblyWithLegacyPassManager(Action, std::move(OS));
- else
- AsmHelper.EmitAssembly(Action, std::move(OS));
+ AsmHelper.EmitAssembly(Action, std::move(OS));
// Verify clang's TargetInfo DataLayout against the LLVM TargetMachine's
// DataLayout.
@@ -1758,24 +1211,16 @@ void clang::EmbedObject(llvm::Module *M, const CodeGenOptions &CGOpts,
return;
for (StringRef OffloadObject : CGOpts.OffloadObjects) {
- if (OffloadObject.count(',') != 1) {
- Diags.Report(Diags.getCustomDiagID(
- DiagnosticsEngine::Error, "Invalid string pair for embedding '%0'"))
- << OffloadObject;
- return;
- }
- auto FilenameAndSection = OffloadObject.split(',');
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> ObjectOrErr =
- llvm::MemoryBuffer::getFileOrSTDIN(std::get<0>(FilenameAndSection));
+ llvm::MemoryBuffer::getFileOrSTDIN(OffloadObject);
if (std::error_code EC = ObjectOrErr.getError()) {
auto DiagID = Diags.getCustomDiagID(DiagnosticsEngine::Error,
"could not open '%0' for embedding");
- Diags.Report(DiagID) << std::get<0>(FilenameAndSection);
+ Diags.Report(DiagID) << OffloadObject;
return;
}
- SmallString<128> SectionName(
- {".llvm.offloading.", std::get<1>(FilenameAndSection)});
- llvm::embedBufferInModule(*M, **ObjectOrErr, SectionName);
+ llvm::embedBufferInModule(*M, **ObjectOrErr, ".llvm.offloading",
+ Align(object::OffloadBinary::getAlignment()));
}
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp
index 10569ae2c3f9..dee0cb64be97 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGAtomic.cpp
@@ -86,15 +86,14 @@ namespace {
lvalue.getAlignment();
VoidPtrAddr = CGF.Builder.CreateConstGEP1_64(
CGF.Int8Ty, VoidPtrAddr, OffsetInChars.getQuantity());
+ llvm::Type *IntTy = CGF.Builder.getIntNTy(AtomicSizeInBits);
auto Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- VoidPtrAddr,
- CGF.Builder.getIntNTy(AtomicSizeInBits)->getPointerTo(),
- "atomic_bitfield_base");
+ VoidPtrAddr, IntTy->getPointerTo(), "atomic_bitfield_base");
BFI = OrigBFI;
BFI.Offset = Offset;
BFI.StorageSize = AtomicSizeInBits;
BFI.StorageOffset += OffsetInChars;
- LVal = LValue::MakeBitfield(Address(Addr, lvalue.getAlignment()),
+ LVal = LValue::MakeBitfield(Address(Addr, IntTy, lvalue.getAlignment()),
BFI, lvalue.getType(), lvalue.getBaseInfo(),
lvalue.getTBAAInfo());
AtomicTy = C.getIntTypeForBitwidth(AtomicSizeInBits, OrigBFI.IsSigned);
@@ -149,7 +148,16 @@ namespace {
return LVal.getExtVectorPointer();
}
Address getAtomicAddress() const {
- return Address(getAtomicPointer(), getAtomicAlignment());
+ llvm::Type *ElTy;
+ if (LVal.isSimple())
+ ElTy = LVal.getAddress(CGF).getElementType();
+ else if (LVal.isBitField())
+ ElTy = LVal.getBitFieldAddress().getElementType();
+ else if (LVal.isVectorElt())
+ ElTy = LVal.getVectorAddress().getElementType();
+ else
+ ElTy = LVal.getExtVectorAddress().getElementType();
+ return Address(getAtomicPointer(), ElTy, getAtomicAlignment());
}
Address getAtomicAddressAsAtomicIntPointer() const {
@@ -296,7 +304,8 @@ Address AtomicInfo::CreateTempAlloca() const {
// Cast to pointer to value type for bitfields.
if (LVal.isBitField())
return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- TempAlloca, getAtomicAddress().getType());
+ TempAlloca, getAtomicAddress().getType(),
+ getAtomicAddress().getElementType());
return TempAlloca;
}
@@ -779,9 +788,9 @@ AddDirectArgument(CodeGenFunction &CGF, CallArgList &Args,
int64_t SizeInBits = CGF.getContext().toBits(SizeInChars);
ValTy =
CGF.getContext().getIntTypeForBitwidth(SizeInBits, /*Signed=*/false);
- llvm::Type *IPtrTy = llvm::IntegerType::get(CGF.getLLVMContext(),
- SizeInBits)->getPointerTo();
- Address Ptr = Address(CGF.Builder.CreateBitCast(Val, IPtrTy), Align);
+ llvm::Type *ITy = llvm::IntegerType::get(CGF.getLLVMContext(), SizeInBits);
+ Address Ptr = Address(CGF.Builder.CreateBitCast(Val, ITy->getPointerTo()),
+ ITy, Align);
Val = CGF.EmitLoadOfScalar(Ptr, false,
CGF.getContext().getPointerType(ValTy),
Loc);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.cpp
index 1f1de3df857c..ff6ca0914e0d 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGBlocks.cpp
@@ -216,8 +216,9 @@ static llvm::Constant *buildBlockDescriptor(CodeGenModule &CGM,
llvm::Constant *disposeHelper = buildDisposeHelper(CGM, blockInfo);
elements.add(disposeHelper);
- if (cast<llvm::Function>(copyHelper->getOperand(0))->hasInternalLinkage() ||
- cast<llvm::Function>(disposeHelper->getOperand(0))
+ if (cast<llvm::Function>(copyHelper->stripPointerCasts())
+ ->hasInternalLinkage() ||
+ cast<llvm::Function>(disposeHelper->stripPointerCasts())
->hasInternalLinkage())
hasInternalHelper = true;
}
@@ -1105,7 +1106,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
if (IsOpenCL) {
CGM.getOpenCLRuntime().recordBlockInfo(blockInfo.BlockExpression, InvokeFn,
- result);
+ result, blockInfo.StructureType);
}
return result;
@@ -1160,8 +1161,7 @@ llvm::Type *CodeGenModule::getGenericBlockLiteralType() {
SmallVector<llvm::Type *, 8> StructFields(
{IntTy, IntTy, getOpenCLRuntime().getGenericVoidPointerType()});
if (auto *Helper = getTargetCodeGenInfo().getTargetOpenCLBlockHelper()) {
- for (auto I : Helper->getCustomFieldTypes())
- StructFields.push_back(I);
+ llvm::append_range(StructFields, Helper->getCustomFieldTypes());
}
GenericBlockLiteralType = llvm::StructType::create(
StructFields, "struct.__opencl_block_literal_generic");
@@ -1263,10 +1263,9 @@ Address CodeGenFunction::GetAddrOfBlockDecl(const VarDecl *variable) {
// to byref*.
auto &byrefInfo = getBlockByrefInfo(variable);
- addr = Address(Builder.CreateLoad(addr), byrefInfo.ByrefAlignment);
+ addr = Address(Builder.CreateLoad(addr), Int8Ty, byrefInfo.ByrefAlignment);
- auto byrefPointerType = llvm::PointerType::get(byrefInfo.Type, 0);
- addr = Builder.CreateBitCast(addr, byrefPointerType, "byref.addr");
+ addr = Builder.CreateElementBitCast(addr, byrefInfo.Type, "byref.addr");
addr = emitBlockByrefAddress(addr, byrefInfo, /*follow*/ true,
variable->getName());
@@ -1402,7 +1401,8 @@ static llvm::Constant *buildGlobalBlock(CodeGenModule &CGM,
if (CGM.getContext().getLangOpts().OpenCL)
CGM.getOpenCLRuntime().recordBlockInfo(
blockInfo.BlockExpression,
- cast<llvm::Function>(blockFn->stripPointerCasts()), Result);
+ cast<llvm::Function>(blockFn->stripPointerCasts()), Result,
+ literal->getValueType());
return Result;
}
@@ -1441,15 +1441,12 @@ void CodeGenFunction::setBlockContextParameter(const ImplicitParamDecl *D,
Address CodeGenFunction::LoadBlockStruct() {
assert(BlockInfo && "not in a block invocation function!");
assert(BlockPointer && "no block pointer set!");
- return Address(BlockPointer, BlockInfo->BlockAlign);
+ return Address(BlockPointer, BlockInfo->StructureType, BlockInfo->BlockAlign);
}
-llvm::Function *
-CodeGenFunction::GenerateBlockFunction(GlobalDecl GD,
- const CGBlockInfo &blockInfo,
- const DeclMapTy &ldm,
- bool IsLambdaConversionToBlock,
- bool BuildGlobalBlock) {
+llvm::Function *CodeGenFunction::GenerateBlockFunction(
+ GlobalDecl GD, const CGBlockInfo &blockInfo, const DeclMapTy &ldm,
+ bool IsLambdaConversionToBlock, bool BuildGlobalBlock) {
const BlockDecl *blockDecl = blockInfo.getBlockDecl();
CurGD = GD;
@@ -1940,15 +1937,15 @@ CodeGenFunction::GenerateCopyHelperFunction(const CGBlockInfo &blockInfo) {
StartFunction(GlobalDecl(), ReturnTy, Fn, FI, args);
auto AL = ApplyDebugLocation::CreateArtificial(*this);
- llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
-
Address src = GetAddrOfLocalVar(&SrcDecl);
- src = Address(Builder.CreateLoad(src), blockInfo.BlockAlign);
- src = Builder.CreateBitCast(src, structPtrTy, "block.source");
+ src = Address(Builder.CreateLoad(src), Int8Ty, blockInfo.BlockAlign);
+ src = Builder.CreateElementBitCast(src, blockInfo.StructureType,
+ "block.source");
Address dst = GetAddrOfLocalVar(&DstDecl);
- dst = Address(Builder.CreateLoad(dst), blockInfo.BlockAlign);
- dst = Builder.CreateBitCast(dst, structPtrTy, "block.dest");
+ dst = Address(Builder.CreateLoad(dst), Int8Ty, blockInfo.BlockAlign);
+ dst =
+ Builder.CreateElementBitCast(dst, blockInfo.StructureType, "block.dest");
for (auto &capture : blockInfo.SortedCaptures) {
if (capture.isConstantOrTrivial())
@@ -2130,11 +2127,9 @@ CodeGenFunction::GenerateDestroyHelperFunction(const CGBlockInfo &blockInfo) {
auto AL = ApplyDebugLocation::CreateArtificial(*this);
- llvm::Type *structPtrTy = blockInfo.StructureType->getPointerTo();
-
Address src = GetAddrOfLocalVar(&SrcDecl);
- src = Address(Builder.CreateLoad(src), blockInfo.BlockAlign);
- src = Builder.CreateBitCast(src, structPtrTy, "block");
+ src = Address(Builder.CreateLoad(src), Int8Ty, blockInfo.BlockAlign);
+ src = Builder.CreateElementBitCast(src, blockInfo.StructureType, "block");
CodeGenFunction::RunCleanupsScope cleanups(*this);
@@ -2171,9 +2166,9 @@ public:
void emitCopy(CodeGenFunction &CGF, Address destField,
Address srcField) override {
- destField = CGF.Builder.CreateBitCast(destField, CGF.VoidPtrTy);
+ destField = CGF.Builder.CreateElementBitCast(destField, CGF.Int8Ty);
- srcField = CGF.Builder.CreateBitCast(srcField, CGF.VoidPtrPtrTy);
+ srcField = CGF.Builder.CreateElementBitCast(srcField, CGF.Int8PtrTy);
llvm::Value *srcValue = CGF.Builder.CreateLoad(srcField);
unsigned flags = (Flags | BLOCK_BYREF_CALLER).getBitMask();
@@ -2186,7 +2181,7 @@ public:
}
void emitDispose(CodeGenFunction &CGF, Address field) override {
- field = CGF.Builder.CreateBitCast(field, CGF.Int8PtrTy->getPointerTo(0));
+ field = CGF.Builder.CreateElementBitCast(field, CGF.Int8PtrTy);
llvm::Value *value = CGF.Builder.CreateLoad(field);
CGF.BuildBlockRelease(value, Flags | BLOCK_BYREF_CALLER, false);
@@ -2376,23 +2371,21 @@ generateByrefCopyHelper(CodeGenFunction &CGF, const BlockByrefInfo &byrefInfo,
auto AL = ApplyDebugLocation::CreateArtificial(CGF);
if (generator.needsCopy()) {
- llvm::Type *byrefPtrType = byrefInfo.Type->getPointerTo(0);
-
// dst->x
Address destField = CGF.GetAddrOfLocalVar(&Dst);
- destField = Address(CGF.Builder.CreateLoad(destField),
+ destField = Address(CGF.Builder.CreateLoad(destField), CGF.Int8Ty,
byrefInfo.ByrefAlignment);
- destField = CGF.Builder.CreateBitCast(destField, byrefPtrType);
- destField = CGF.emitBlockByrefAddress(destField, byrefInfo, false,
- "dest-object");
+ destField = CGF.Builder.CreateElementBitCast(destField, byrefInfo.Type);
+ destField =
+ CGF.emitBlockByrefAddress(destField, byrefInfo, false, "dest-object");
// src->x
Address srcField = CGF.GetAddrOfLocalVar(&Src);
- srcField = Address(CGF.Builder.CreateLoad(srcField),
+ srcField = Address(CGF.Builder.CreateLoad(srcField), CGF.Int8Ty,
byrefInfo.ByrefAlignment);
- srcField = CGF.Builder.CreateBitCast(srcField, byrefPtrType);
- srcField = CGF.emitBlockByrefAddress(srcField, byrefInfo, false,
- "src-object");
+ srcField = CGF.Builder.CreateElementBitCast(srcField, byrefInfo.Type);
+ srcField =
+ CGF.emitBlockByrefAddress(srcField, byrefInfo, false, "src-object");
generator.emitCopy(CGF, destField, srcField);
}
@@ -2446,9 +2439,9 @@ generateByrefDisposeHelper(CodeGenFunction &CGF,
if (generator.needsDispose()) {
Address addr = CGF.GetAddrOfLocalVar(&Src);
- addr = Address(CGF.Builder.CreateLoad(addr), byrefInfo.ByrefAlignment);
- auto byrefPtrType = byrefInfo.Type->getPointerTo(0);
- addr = CGF.Builder.CreateBitCast(addr, byrefPtrType);
+ addr = Address(CGF.Builder.CreateLoad(addr), CGF.Int8Ty,
+ byrefInfo.ByrefAlignment);
+ addr = CGF.Builder.CreateElementBitCast(addr, byrefInfo.Type);
addr = CGF.emitBlockByrefAddress(addr, byrefInfo, false, "object");
generator.emitDispose(CGF, addr);
@@ -2594,7 +2587,8 @@ Address CodeGenFunction::emitBlockByrefAddress(Address baseAddr,
// Chase the forwarding address if requested.
if (followForward) {
Address forwardingAddr = Builder.CreateStructGEP(baseAddr, 1, "forwarding");
- baseAddr = Address(Builder.CreateLoad(forwardingAddr), info.ByrefAlignment);
+ baseAddr = Address(Builder.CreateLoad(forwardingAddr), info.Type,
+ info.ByrefAlignment);
}
return Builder.CreateStructGEP(baseAddr, info.FieldIndex, name);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGBuilder.h b/contrib/llvm-project/clang/lib/CodeGen/CGBuilder.h
index 06b2da146603..2fcfea64ede6 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGBuilder.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGBuilder.h
@@ -32,6 +32,7 @@ public:
void InsertHelper(llvm::Instruction *I, const llvm::Twine &Name,
llvm::BasicBlock *BB,
llvm::BasicBlock::iterator InsertPt) const override;
+
private:
CodeGenFunction *CGF = nullptr;
};
@@ -45,17 +46,18 @@ class CGBuilderTy : public CGBuilderBaseTy {
/// Storing a reference to the type cache here makes it a lot easier
/// to build natural-feeling, target-specific IR.
const CodeGenTypeCache &TypeCache;
+
public:
CGBuilderTy(const CodeGenTypeCache &TypeCache, llvm::LLVMContext &C)
- : CGBuilderBaseTy(C), TypeCache(TypeCache) {}
- CGBuilderTy(const CodeGenTypeCache &TypeCache,
- llvm::LLVMContext &C, const llvm::ConstantFolder &F,
+ : CGBuilderBaseTy(C), TypeCache(TypeCache) {}
+ CGBuilderTy(const CodeGenTypeCache &TypeCache, llvm::LLVMContext &C,
+ const llvm::ConstantFolder &F,
const CGBuilderInserterTy &Inserter)
- : CGBuilderBaseTy(C, F, Inserter), TypeCache(TypeCache) {}
+ : CGBuilderBaseTy(C, F, Inserter), TypeCache(TypeCache) {}
CGBuilderTy(const CodeGenTypeCache &TypeCache, llvm::Instruction *I)
- : CGBuilderBaseTy(I), TypeCache(TypeCache) {}
+ : CGBuilderBaseTy(I), TypeCache(TypeCache) {}
CGBuilderTy(const CodeGenTypeCache &TypeCache, llvm::BasicBlock *BB)
- : CGBuilderBaseTy(BB), TypeCache(TypeCache) {}
+ : CGBuilderBaseTy(BB), TypeCache(TypeCache) {}
llvm::ConstantInt *getSize(CharUnits N) {
return llvm::ConstantInt::get(TypeCache.SizeTy, N.getQuantity());
@@ -102,7 +104,8 @@ public:
using CGBuilderBaseTy::CreateAlignedStore;
llvm::StoreInst *CreateAlignedStore(llvm::Value *Val, llvm::Value *Addr,
- CharUnits Align, bool IsVolatile = false) {
+ CharUnits Align,
+ bool IsVolatile = false) {
return CreateAlignedStore(Val, Addr, Align.getAsAlign(), IsVolatile);
}
@@ -151,18 +154,13 @@ public:
Ordering, SSID);
}
- using CGBuilderBaseTy::CreateBitCast;
- Address CreateBitCast(Address Addr, llvm::Type *Ty,
- const llvm::Twine &Name = "") {
- return Address(CreateBitCast(Addr.getPointer(), Ty, Name),
- Addr.getAlignment());
- }
-
using CGBuilderBaseTy::CreateAddrSpaceCast;
Address CreateAddrSpaceCast(Address Addr, llvm::Type *Ty,
const llvm::Twine &Name = "") {
- return Address(CreateAddrSpaceCast(Addr.getPointer(), Ty, Name),
- Addr.getAlignment());
+ assert(cast<llvm::PointerType>(Ty)->isOpaqueOrPointeeTypeMatches(
+ Addr.getElementType()) &&
+ "Should not change the element type");
+ return Addr.withPointer(CreateAddrSpaceCast(Addr.getPointer(), Ty, Name));
}
/// Cast the element type of the given address to a different type,
@@ -170,16 +168,17 @@ public:
Address CreateElementBitCast(Address Addr, llvm::Type *Ty,
const llvm::Twine &Name = "") {
auto *PtrTy = Ty->getPointerTo(Addr.getAddressSpace());
- return Address(CreateBitCast(Addr.getPointer(), PtrTy, Name),
- Ty, Addr.getAlignment());
+ return Address(CreateBitCast(Addr.getPointer(), PtrTy, Name), Ty,
+ Addr.getAlignment());
}
using CGBuilderBaseTy::CreatePointerBitCastOrAddrSpaceCast;
Address CreatePointerBitCastOrAddrSpaceCast(Address Addr, llvm::Type *Ty,
+ llvm::Type *ElementTy,
const llvm::Twine &Name = "") {
llvm::Value *Ptr =
- CreatePointerBitCastOrAddrSpaceCast(Addr.getPointer(), Ty, Name);
- return Address(Ptr, Addr.getAlignment());
+ CreatePointerBitCastOrAddrSpaceCast(Addr.getPointer(), Ty, Name);
+ return Address(Ptr, ElementTy, Addr.getAlignment());
}
/// Given
@@ -197,10 +196,10 @@ public:
const llvm::StructLayout *Layout = DL.getStructLayout(ElTy);
auto Offset = CharUnits::fromQuantity(Layout->getElementOffset(Index));
- return Address(CreateStructGEP(Addr.getElementType(),
- Addr.getPointer(), Index, Name),
- ElTy->getElementType(Index),
- Addr.getAlignment().alignmentAtOffset(Offset));
+ return Address(
+ CreateStructGEP(Addr.getElementType(), Addr.getPointer(), Index, Name),
+ ElTy->getElementType(Index),
+ Addr.getAlignment().alignmentAtOffset(Offset));
}
/// Given
@@ -268,10 +267,10 @@ public:
CharUnits EltSize =
CharUnits::fromQuantity(DL.getTypeAllocSize(Addr.getElementType()));
- return Address(CreateGEP(Addr.getElementType(), Addr.getPointer(), Index,
- Name),
- Addr.getElementType(),
- Addr.getAlignment().alignmentOfArrayElement(EltSize));
+ return Address(
+ CreateGEP(Addr.getElementType(), Addr.getPointer(), Index, Name),
+ Addr.getElementType(),
+ Addr.getAlignment().alignmentOfArrayElement(EltSize));
}
/// Given a pointer to i8, adjust it by a given constant offset.
@@ -345,9 +344,16 @@ public:
Dest.getAlignment().getAsAlign(), IsVolatile);
}
+ using CGBuilderBaseTy::CreateMemSetInline;
+ llvm::CallInst *CreateMemSetInline(Address Dest, llvm::Value *Value,
+ uint64_t Size) {
+ return CreateMemSetInline(Dest.getPointer(),
+ Dest.getAlignment().getAsAlign(), Value,
+ getInt64(Size));
+ }
+
using CGBuilderBaseTy::CreatePreserveStructAccessIndex;
- Address CreatePreserveStructAccessIndex(Address Addr,
- unsigned Index,
+ Address CreatePreserveStructAccessIndex(Address Addr, unsigned Index,
unsigned FieldIndex,
llvm::MDNode *DbgInfo) {
llvm::StructType *ElTy = cast<llvm::StructType>(Addr.getElementType());
@@ -367,7 +373,7 @@ public:
}
};
-} // end namespace CodeGen
-} // end namespace clang
+} // end namespace CodeGen
+} // end namespace clang
#endif
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp
index d071c7a5b4a4..8c7ee6b078f2 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGBuiltin.cpp
@@ -45,6 +45,7 @@
#include "llvm/IR/IntrinsicsR600.h"
#include "llvm/IR/IntrinsicsRISCV.h"
#include "llvm/IR/IntrinsicsS390.h"
+#include "llvm/IR/IntrinsicsVE.h"
#include "llvm/IR/IntrinsicsWebAssembly.h"
#include "llvm/IR/IntrinsicsX86.h"
#include "llvm/IR/MDBuilder.h"
@@ -373,7 +374,7 @@ static Value *EmitAtomicCmpXchg128ForMSIntrin(CodeGenFunction &CGF,
llvm::Type *Int128PtrTy = Int128Ty->getPointerTo();
Destination = CGF.Builder.CreateBitCast(Destination, Int128PtrTy);
Address ComparandResult(CGF.Builder.CreateBitCast(ComparandPtr, Int128PtrTy),
- CGF.getContext().toCharUnitsFromBits(128));
+ Int128Ty, CGF.getContext().toCharUnitsFromBits(128));
// (((i128)hi) << 64) | ((i128)lo)
ExchangeHigh = CGF.Builder.CreateZExt(ExchangeHigh, Int128Ty);
@@ -961,7 +962,7 @@ static llvm::Value *EmitBitTestIntrinsic(CodeGenFunction &CGF,
Value *BitBaseI8 = CGF.Builder.CreatePointerCast(BitBase, CGF.Int8PtrTy);
Address ByteAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, BitBaseI8,
ByteIndex, "bittest.byteaddr"),
- CharUnits::One());
+ CGF.Int8Ty, CharUnits::One());
Value *PosLow =
CGF.Builder.CreateAnd(CGF.Builder.CreateTrunc(BitPos, CGF.Int8Ty),
llvm::ConstantInt::get(CGF.Int8Ty, 0x7));
@@ -1168,141 +1169,141 @@ translateArmToMsvcIntrin(unsigned BuiltinID) {
switch (BuiltinID) {
default:
return None;
- case ARM::BI_BitScanForward:
- case ARM::BI_BitScanForward64:
+ case clang::ARM::BI_BitScanForward:
+ case clang::ARM::BI_BitScanForward64:
return MSVCIntrin::_BitScanForward;
- case ARM::BI_BitScanReverse:
- case ARM::BI_BitScanReverse64:
+ case clang::ARM::BI_BitScanReverse:
+ case clang::ARM::BI_BitScanReverse64:
return MSVCIntrin::_BitScanReverse;
- case ARM::BI_InterlockedAnd64:
+ case clang::ARM::BI_InterlockedAnd64:
return MSVCIntrin::_InterlockedAnd;
- case ARM::BI_InterlockedExchange64:
+ case clang::ARM::BI_InterlockedExchange64:
return MSVCIntrin::_InterlockedExchange;
- case ARM::BI_InterlockedExchangeAdd64:
+ case clang::ARM::BI_InterlockedExchangeAdd64:
return MSVCIntrin::_InterlockedExchangeAdd;
- case ARM::BI_InterlockedExchangeSub64:
+ case clang::ARM::BI_InterlockedExchangeSub64:
return MSVCIntrin::_InterlockedExchangeSub;
- case ARM::BI_InterlockedOr64:
+ case clang::ARM::BI_InterlockedOr64:
return MSVCIntrin::_InterlockedOr;
- case ARM::BI_InterlockedXor64:
+ case clang::ARM::BI_InterlockedXor64:
return MSVCIntrin::_InterlockedXor;
- case ARM::BI_InterlockedDecrement64:
+ case clang::ARM::BI_InterlockedDecrement64:
return MSVCIntrin::_InterlockedDecrement;
- case ARM::BI_InterlockedIncrement64:
+ case clang::ARM::BI_InterlockedIncrement64:
return MSVCIntrin::_InterlockedIncrement;
- case ARM::BI_InterlockedExchangeAdd8_acq:
- case ARM::BI_InterlockedExchangeAdd16_acq:
- case ARM::BI_InterlockedExchangeAdd_acq:
- case ARM::BI_InterlockedExchangeAdd64_acq:
+ case clang::ARM::BI_InterlockedExchangeAdd8_acq:
+ case clang::ARM::BI_InterlockedExchangeAdd16_acq:
+ case clang::ARM::BI_InterlockedExchangeAdd_acq:
+ case clang::ARM::BI_InterlockedExchangeAdd64_acq:
return MSVCIntrin::_InterlockedExchangeAdd_acq;
- case ARM::BI_InterlockedExchangeAdd8_rel:
- case ARM::BI_InterlockedExchangeAdd16_rel:
- case ARM::BI_InterlockedExchangeAdd_rel:
- case ARM::BI_InterlockedExchangeAdd64_rel:
+ case clang::ARM::BI_InterlockedExchangeAdd8_rel:
+ case clang::ARM::BI_InterlockedExchangeAdd16_rel:
+ case clang::ARM::BI_InterlockedExchangeAdd_rel:
+ case clang::ARM::BI_InterlockedExchangeAdd64_rel:
return MSVCIntrin::_InterlockedExchangeAdd_rel;
- case ARM::BI_InterlockedExchangeAdd8_nf:
- case ARM::BI_InterlockedExchangeAdd16_nf:
- case ARM::BI_InterlockedExchangeAdd_nf:
- case ARM::BI_InterlockedExchangeAdd64_nf:
+ case clang::ARM::BI_InterlockedExchangeAdd8_nf:
+ case clang::ARM::BI_InterlockedExchangeAdd16_nf:
+ case clang::ARM::BI_InterlockedExchangeAdd_nf:
+ case clang::ARM::BI_InterlockedExchangeAdd64_nf:
return MSVCIntrin::_InterlockedExchangeAdd_nf;
- case ARM::BI_InterlockedExchange8_acq:
- case ARM::BI_InterlockedExchange16_acq:
- case ARM::BI_InterlockedExchange_acq:
- case ARM::BI_InterlockedExchange64_acq:
+ case clang::ARM::BI_InterlockedExchange8_acq:
+ case clang::ARM::BI_InterlockedExchange16_acq:
+ case clang::ARM::BI_InterlockedExchange_acq:
+ case clang::ARM::BI_InterlockedExchange64_acq:
return MSVCIntrin::_InterlockedExchange_acq;
- case ARM::BI_InterlockedExchange8_rel:
- case ARM::BI_InterlockedExchange16_rel:
- case ARM::BI_InterlockedExchange_rel:
- case ARM::BI_InterlockedExchange64_rel:
+ case clang::ARM::BI_InterlockedExchange8_rel:
+ case clang::ARM::BI_InterlockedExchange16_rel:
+ case clang::ARM::BI_InterlockedExchange_rel:
+ case clang::ARM::BI_InterlockedExchange64_rel:
return MSVCIntrin::_InterlockedExchange_rel;
- case ARM::BI_InterlockedExchange8_nf:
- case ARM::BI_InterlockedExchange16_nf:
- case ARM::BI_InterlockedExchange_nf:
- case ARM::BI_InterlockedExchange64_nf:
+ case clang::ARM::BI_InterlockedExchange8_nf:
+ case clang::ARM::BI_InterlockedExchange16_nf:
+ case clang::ARM::BI_InterlockedExchange_nf:
+ case clang::ARM::BI_InterlockedExchange64_nf:
return MSVCIntrin::_InterlockedExchange_nf;
- case ARM::BI_InterlockedCompareExchange8_acq:
- case ARM::BI_InterlockedCompareExchange16_acq:
- case ARM::BI_InterlockedCompareExchange_acq:
- case ARM::BI_InterlockedCompareExchange64_acq:
+ case clang::ARM::BI_InterlockedCompareExchange8_acq:
+ case clang::ARM::BI_InterlockedCompareExchange16_acq:
+ case clang::ARM::BI_InterlockedCompareExchange_acq:
+ case clang::ARM::BI_InterlockedCompareExchange64_acq:
return MSVCIntrin::_InterlockedCompareExchange_acq;
- case ARM::BI_InterlockedCompareExchange8_rel:
- case ARM::BI_InterlockedCompareExchange16_rel:
- case ARM::BI_InterlockedCompareExchange_rel:
- case ARM::BI_InterlockedCompareExchange64_rel:
+ case clang::ARM::BI_InterlockedCompareExchange8_rel:
+ case clang::ARM::BI_InterlockedCompareExchange16_rel:
+ case clang::ARM::BI_InterlockedCompareExchange_rel:
+ case clang::ARM::BI_InterlockedCompareExchange64_rel:
return MSVCIntrin::_InterlockedCompareExchange_rel;
- case ARM::BI_InterlockedCompareExchange8_nf:
- case ARM::BI_InterlockedCompareExchange16_nf:
- case ARM::BI_InterlockedCompareExchange_nf:
- case ARM::BI_InterlockedCompareExchange64_nf:
+ case clang::ARM::BI_InterlockedCompareExchange8_nf:
+ case clang::ARM::BI_InterlockedCompareExchange16_nf:
+ case clang::ARM::BI_InterlockedCompareExchange_nf:
+ case clang::ARM::BI_InterlockedCompareExchange64_nf:
return MSVCIntrin::_InterlockedCompareExchange_nf;
- case ARM::BI_InterlockedOr8_acq:
- case ARM::BI_InterlockedOr16_acq:
- case ARM::BI_InterlockedOr_acq:
- case ARM::BI_InterlockedOr64_acq:
+ case clang::ARM::BI_InterlockedOr8_acq:
+ case clang::ARM::BI_InterlockedOr16_acq:
+ case clang::ARM::BI_InterlockedOr_acq:
+ case clang::ARM::BI_InterlockedOr64_acq:
return MSVCIntrin::_InterlockedOr_acq;
- case ARM::BI_InterlockedOr8_rel:
- case ARM::BI_InterlockedOr16_rel:
- case ARM::BI_InterlockedOr_rel:
- case ARM::BI_InterlockedOr64_rel:
+ case clang::ARM::BI_InterlockedOr8_rel:
+ case clang::ARM::BI_InterlockedOr16_rel:
+ case clang::ARM::BI_InterlockedOr_rel:
+ case clang::ARM::BI_InterlockedOr64_rel:
return MSVCIntrin::_InterlockedOr_rel;
- case ARM::BI_InterlockedOr8_nf:
- case ARM::BI_InterlockedOr16_nf:
- case ARM::BI_InterlockedOr_nf:
- case ARM::BI_InterlockedOr64_nf:
+ case clang::ARM::BI_InterlockedOr8_nf:
+ case clang::ARM::BI_InterlockedOr16_nf:
+ case clang::ARM::BI_InterlockedOr_nf:
+ case clang::ARM::BI_InterlockedOr64_nf:
return MSVCIntrin::_InterlockedOr_nf;
- case ARM::BI_InterlockedXor8_acq:
- case ARM::BI_InterlockedXor16_acq:
- case ARM::BI_InterlockedXor_acq:
- case ARM::BI_InterlockedXor64_acq:
+ case clang::ARM::BI_InterlockedXor8_acq:
+ case clang::ARM::BI_InterlockedXor16_acq:
+ case clang::ARM::BI_InterlockedXor_acq:
+ case clang::ARM::BI_InterlockedXor64_acq:
return MSVCIntrin::_InterlockedXor_acq;
- case ARM::BI_InterlockedXor8_rel:
- case ARM::BI_InterlockedXor16_rel:
- case ARM::BI_InterlockedXor_rel:
- case ARM::BI_InterlockedXor64_rel:
+ case clang::ARM::BI_InterlockedXor8_rel:
+ case clang::ARM::BI_InterlockedXor16_rel:
+ case clang::ARM::BI_InterlockedXor_rel:
+ case clang::ARM::BI_InterlockedXor64_rel:
return MSVCIntrin::_InterlockedXor_rel;
- case ARM::BI_InterlockedXor8_nf:
- case ARM::BI_InterlockedXor16_nf:
- case ARM::BI_InterlockedXor_nf:
- case ARM::BI_InterlockedXor64_nf:
+ case clang::ARM::BI_InterlockedXor8_nf:
+ case clang::ARM::BI_InterlockedXor16_nf:
+ case clang::ARM::BI_InterlockedXor_nf:
+ case clang::ARM::BI_InterlockedXor64_nf:
return MSVCIntrin::_InterlockedXor_nf;
- case ARM::BI_InterlockedAnd8_acq:
- case ARM::BI_InterlockedAnd16_acq:
- case ARM::BI_InterlockedAnd_acq:
- case ARM::BI_InterlockedAnd64_acq:
+ case clang::ARM::BI_InterlockedAnd8_acq:
+ case clang::ARM::BI_InterlockedAnd16_acq:
+ case clang::ARM::BI_InterlockedAnd_acq:
+ case clang::ARM::BI_InterlockedAnd64_acq:
return MSVCIntrin::_InterlockedAnd_acq;
- case ARM::BI_InterlockedAnd8_rel:
- case ARM::BI_InterlockedAnd16_rel:
- case ARM::BI_InterlockedAnd_rel:
- case ARM::BI_InterlockedAnd64_rel:
+ case clang::ARM::BI_InterlockedAnd8_rel:
+ case clang::ARM::BI_InterlockedAnd16_rel:
+ case clang::ARM::BI_InterlockedAnd_rel:
+ case clang::ARM::BI_InterlockedAnd64_rel:
return MSVCIntrin::_InterlockedAnd_rel;
- case ARM::BI_InterlockedAnd8_nf:
- case ARM::BI_InterlockedAnd16_nf:
- case ARM::BI_InterlockedAnd_nf:
- case ARM::BI_InterlockedAnd64_nf:
+ case clang::ARM::BI_InterlockedAnd8_nf:
+ case clang::ARM::BI_InterlockedAnd16_nf:
+ case clang::ARM::BI_InterlockedAnd_nf:
+ case clang::ARM::BI_InterlockedAnd64_nf:
return MSVCIntrin::_InterlockedAnd_nf;
- case ARM::BI_InterlockedIncrement16_acq:
- case ARM::BI_InterlockedIncrement_acq:
- case ARM::BI_InterlockedIncrement64_acq:
+ case clang::ARM::BI_InterlockedIncrement16_acq:
+ case clang::ARM::BI_InterlockedIncrement_acq:
+ case clang::ARM::BI_InterlockedIncrement64_acq:
return MSVCIntrin::_InterlockedIncrement_acq;
- case ARM::BI_InterlockedIncrement16_rel:
- case ARM::BI_InterlockedIncrement_rel:
- case ARM::BI_InterlockedIncrement64_rel:
+ case clang::ARM::BI_InterlockedIncrement16_rel:
+ case clang::ARM::BI_InterlockedIncrement_rel:
+ case clang::ARM::BI_InterlockedIncrement64_rel:
return MSVCIntrin::_InterlockedIncrement_rel;
- case ARM::BI_InterlockedIncrement16_nf:
- case ARM::BI_InterlockedIncrement_nf:
- case ARM::BI_InterlockedIncrement64_nf:
+ case clang::ARM::BI_InterlockedIncrement16_nf:
+ case clang::ARM::BI_InterlockedIncrement_nf:
+ case clang::ARM::BI_InterlockedIncrement64_nf:
return MSVCIntrin::_InterlockedIncrement_nf;
- case ARM::BI_InterlockedDecrement16_acq:
- case ARM::BI_InterlockedDecrement_acq:
- case ARM::BI_InterlockedDecrement64_acq:
+ case clang::ARM::BI_InterlockedDecrement16_acq:
+ case clang::ARM::BI_InterlockedDecrement_acq:
+ case clang::ARM::BI_InterlockedDecrement64_acq:
return MSVCIntrin::_InterlockedDecrement_acq;
- case ARM::BI_InterlockedDecrement16_rel:
- case ARM::BI_InterlockedDecrement_rel:
- case ARM::BI_InterlockedDecrement64_rel:
+ case clang::ARM::BI_InterlockedDecrement16_rel:
+ case clang::ARM::BI_InterlockedDecrement_rel:
+ case clang::ARM::BI_InterlockedDecrement64_rel:
return MSVCIntrin::_InterlockedDecrement_rel;
- case ARM::BI_InterlockedDecrement16_nf:
- case ARM::BI_InterlockedDecrement_nf:
- case ARM::BI_InterlockedDecrement64_nf:
+ case clang::ARM::BI_InterlockedDecrement16_nf:
+ case clang::ARM::BI_InterlockedDecrement_nf:
+ case clang::ARM::BI_InterlockedDecrement64_nf:
return MSVCIntrin::_InterlockedDecrement_nf;
}
llvm_unreachable("must return from switch");
@@ -1314,149 +1315,149 @@ translateAarch64ToMsvcIntrin(unsigned BuiltinID) {
switch (BuiltinID) {
default:
return None;
- case AArch64::BI_BitScanForward:
- case AArch64::BI_BitScanForward64:
+ case clang::AArch64::BI_BitScanForward:
+ case clang::AArch64::BI_BitScanForward64:
return MSVCIntrin::_BitScanForward;
- case AArch64::BI_BitScanReverse:
- case AArch64::BI_BitScanReverse64:
+ case clang::AArch64::BI_BitScanReverse:
+ case clang::AArch64::BI_BitScanReverse64:
return MSVCIntrin::_BitScanReverse;
- case AArch64::BI_InterlockedAnd64:
+ case clang::AArch64::BI_InterlockedAnd64:
return MSVCIntrin::_InterlockedAnd;
- case AArch64::BI_InterlockedExchange64:
+ case clang::AArch64::BI_InterlockedExchange64:
return MSVCIntrin::_InterlockedExchange;
- case AArch64::BI_InterlockedExchangeAdd64:
+ case clang::AArch64::BI_InterlockedExchangeAdd64:
return MSVCIntrin::_InterlockedExchangeAdd;
- case AArch64::BI_InterlockedExchangeSub64:
+ case clang::AArch64::BI_InterlockedExchangeSub64:
return MSVCIntrin::_InterlockedExchangeSub;
- case AArch64::BI_InterlockedOr64:
+ case clang::AArch64::BI_InterlockedOr64:
return MSVCIntrin::_InterlockedOr;
- case AArch64::BI_InterlockedXor64:
+ case clang::AArch64::BI_InterlockedXor64:
return MSVCIntrin::_InterlockedXor;
- case AArch64::BI_InterlockedDecrement64:
+ case clang::AArch64::BI_InterlockedDecrement64:
return MSVCIntrin::_InterlockedDecrement;
- case AArch64::BI_InterlockedIncrement64:
+ case clang::AArch64::BI_InterlockedIncrement64:
return MSVCIntrin::_InterlockedIncrement;
- case AArch64::BI_InterlockedExchangeAdd8_acq:
- case AArch64::BI_InterlockedExchangeAdd16_acq:
- case AArch64::BI_InterlockedExchangeAdd_acq:
- case AArch64::BI_InterlockedExchangeAdd64_acq:
+ case clang::AArch64::BI_InterlockedExchangeAdd8_acq:
+ case clang::AArch64::BI_InterlockedExchangeAdd16_acq:
+ case clang::AArch64::BI_InterlockedExchangeAdd_acq:
+ case clang::AArch64::BI_InterlockedExchangeAdd64_acq:
return MSVCIntrin::_InterlockedExchangeAdd_acq;
- case AArch64::BI_InterlockedExchangeAdd8_rel:
- case AArch64::BI_InterlockedExchangeAdd16_rel:
- case AArch64::BI_InterlockedExchangeAdd_rel:
- case AArch64::BI_InterlockedExchangeAdd64_rel:
+ case clang::AArch64::BI_InterlockedExchangeAdd8_rel:
+ case clang::AArch64::BI_InterlockedExchangeAdd16_rel:
+ case clang::AArch64::BI_InterlockedExchangeAdd_rel:
+ case clang::AArch64::BI_InterlockedExchangeAdd64_rel:
return MSVCIntrin::_InterlockedExchangeAdd_rel;
- case AArch64::BI_InterlockedExchangeAdd8_nf:
- case AArch64::BI_InterlockedExchangeAdd16_nf:
- case AArch64::BI_InterlockedExchangeAdd_nf:
- case AArch64::BI_InterlockedExchangeAdd64_nf:
+ case clang::AArch64::BI_InterlockedExchangeAdd8_nf:
+ case clang::AArch64::BI_InterlockedExchangeAdd16_nf:
+ case clang::AArch64::BI_InterlockedExchangeAdd_nf:
+ case clang::AArch64::BI_InterlockedExchangeAdd64_nf:
return MSVCIntrin::_InterlockedExchangeAdd_nf;
- case AArch64::BI_InterlockedExchange8_acq:
- case AArch64::BI_InterlockedExchange16_acq:
- case AArch64::BI_InterlockedExchange_acq:
- case AArch64::BI_InterlockedExchange64_acq:
+ case clang::AArch64::BI_InterlockedExchange8_acq:
+ case clang::AArch64::BI_InterlockedExchange16_acq:
+ case clang::AArch64::BI_InterlockedExchange_acq:
+ case clang::AArch64::BI_InterlockedExchange64_acq:
return MSVCIntrin::_InterlockedExchange_acq;
- case AArch64::BI_InterlockedExchange8_rel:
- case AArch64::BI_InterlockedExchange16_rel:
- case AArch64::BI_InterlockedExchange_rel:
- case AArch64::BI_InterlockedExchange64_rel:
+ case clang::AArch64::BI_InterlockedExchange8_rel:
+ case clang::AArch64::BI_InterlockedExchange16_rel:
+ case clang::AArch64::BI_InterlockedExchange_rel:
+ case clang::AArch64::BI_InterlockedExchange64_rel:
return MSVCIntrin::_InterlockedExchange_rel;
- case AArch64::BI_InterlockedExchange8_nf:
- case AArch64::BI_InterlockedExchange16_nf:
- case AArch64::BI_InterlockedExchange_nf:
- case AArch64::BI_InterlockedExchange64_nf:
+ case clang::AArch64::BI_InterlockedExchange8_nf:
+ case clang::AArch64::BI_InterlockedExchange16_nf:
+ case clang::AArch64::BI_InterlockedExchange_nf:
+ case clang::AArch64::BI_InterlockedExchange64_nf:
return MSVCIntrin::_InterlockedExchange_nf;
- case AArch64::BI_InterlockedCompareExchange8_acq:
- case AArch64::BI_InterlockedCompareExchange16_acq:
- case AArch64::BI_InterlockedCompareExchange_acq:
- case AArch64::BI_InterlockedCompareExchange64_acq:
+ case clang::AArch64::BI_InterlockedCompareExchange8_acq:
+ case clang::AArch64::BI_InterlockedCompareExchange16_acq:
+ case clang::AArch64::BI_InterlockedCompareExchange_acq:
+ case clang::AArch64::BI_InterlockedCompareExchange64_acq:
return MSVCIntrin::_InterlockedCompareExchange_acq;
- case AArch64::BI_InterlockedCompareExchange8_rel:
- case AArch64::BI_InterlockedCompareExchange16_rel:
- case AArch64::BI_InterlockedCompareExchange_rel:
- case AArch64::BI_InterlockedCompareExchange64_rel:
+ case clang::AArch64::BI_InterlockedCompareExchange8_rel:
+ case clang::AArch64::BI_InterlockedCompareExchange16_rel:
+ case clang::AArch64::BI_InterlockedCompareExchange_rel:
+ case clang::AArch64::BI_InterlockedCompareExchange64_rel:
return MSVCIntrin::_InterlockedCompareExchange_rel;
- case AArch64::BI_InterlockedCompareExchange8_nf:
- case AArch64::BI_InterlockedCompareExchange16_nf:
- case AArch64::BI_InterlockedCompareExchange_nf:
- case AArch64::BI_InterlockedCompareExchange64_nf:
+ case clang::AArch64::BI_InterlockedCompareExchange8_nf:
+ case clang::AArch64::BI_InterlockedCompareExchange16_nf:
+ case clang::AArch64::BI_InterlockedCompareExchange_nf:
+ case clang::AArch64::BI_InterlockedCompareExchange64_nf:
return MSVCIntrin::_InterlockedCompareExchange_nf;
- case AArch64::BI_InterlockedCompareExchange128:
+ case clang::AArch64::BI_InterlockedCompareExchange128:
return MSVCIntrin::_InterlockedCompareExchange128;
- case AArch64::BI_InterlockedCompareExchange128_acq:
+ case clang::AArch64::BI_InterlockedCompareExchange128_acq:
return MSVCIntrin::_InterlockedCompareExchange128_acq;
- case AArch64::BI_InterlockedCompareExchange128_nf:
+ case clang::AArch64::BI_InterlockedCompareExchange128_nf:
return MSVCIntrin::_InterlockedCompareExchange128_nf;
- case AArch64::BI_InterlockedCompareExchange128_rel:
+ case clang::AArch64::BI_InterlockedCompareExchange128_rel:
return MSVCIntrin::_InterlockedCompareExchange128_rel;
- case AArch64::BI_InterlockedOr8_acq:
- case AArch64::BI_InterlockedOr16_acq:
- case AArch64::BI_InterlockedOr_acq:
- case AArch64::BI_InterlockedOr64_acq:
+ case clang::AArch64::BI_InterlockedOr8_acq:
+ case clang::AArch64::BI_InterlockedOr16_acq:
+ case clang::AArch64::BI_InterlockedOr_acq:
+ case clang::AArch64::BI_InterlockedOr64_acq:
return MSVCIntrin::_InterlockedOr_acq;
- case AArch64::BI_InterlockedOr8_rel:
- case AArch64::BI_InterlockedOr16_rel:
- case AArch64::BI_InterlockedOr_rel:
- case AArch64::BI_InterlockedOr64_rel:
+ case clang::AArch64::BI_InterlockedOr8_rel:
+ case clang::AArch64::BI_InterlockedOr16_rel:
+ case clang::AArch64::BI_InterlockedOr_rel:
+ case clang::AArch64::BI_InterlockedOr64_rel:
return MSVCIntrin::_InterlockedOr_rel;
- case AArch64::BI_InterlockedOr8_nf:
- case AArch64::BI_InterlockedOr16_nf:
- case AArch64::BI_InterlockedOr_nf:
- case AArch64::BI_InterlockedOr64_nf:
+ case clang::AArch64::BI_InterlockedOr8_nf:
+ case clang::AArch64::BI_InterlockedOr16_nf:
+ case clang::AArch64::BI_InterlockedOr_nf:
+ case clang::AArch64::BI_InterlockedOr64_nf:
return MSVCIntrin::_InterlockedOr_nf;
- case AArch64::BI_InterlockedXor8_acq:
- case AArch64::BI_InterlockedXor16_acq:
- case AArch64::BI_InterlockedXor_acq:
- case AArch64::BI_InterlockedXor64_acq:
+ case clang::AArch64::BI_InterlockedXor8_acq:
+ case clang::AArch64::BI_InterlockedXor16_acq:
+ case clang::AArch64::BI_InterlockedXor_acq:
+ case clang::AArch64::BI_InterlockedXor64_acq:
return MSVCIntrin::_InterlockedXor_acq;
- case AArch64::BI_InterlockedXor8_rel:
- case AArch64::BI_InterlockedXor16_rel:
- case AArch64::BI_InterlockedXor_rel:
- case AArch64::BI_InterlockedXor64_rel:
+ case clang::AArch64::BI_InterlockedXor8_rel:
+ case clang::AArch64::BI_InterlockedXor16_rel:
+ case clang::AArch64::BI_InterlockedXor_rel:
+ case clang::AArch64::BI_InterlockedXor64_rel:
return MSVCIntrin::_InterlockedXor_rel;
- case AArch64::BI_InterlockedXor8_nf:
- case AArch64::BI_InterlockedXor16_nf:
- case AArch64::BI_InterlockedXor_nf:
- case AArch64::BI_InterlockedXor64_nf:
+ case clang::AArch64::BI_InterlockedXor8_nf:
+ case clang::AArch64::BI_InterlockedXor16_nf:
+ case clang::AArch64::BI_InterlockedXor_nf:
+ case clang::AArch64::BI_InterlockedXor64_nf:
return MSVCIntrin::_InterlockedXor_nf;
- case AArch64::BI_InterlockedAnd8_acq:
- case AArch64::BI_InterlockedAnd16_acq:
- case AArch64::BI_InterlockedAnd_acq:
- case AArch64::BI_InterlockedAnd64_acq:
+ case clang::AArch64::BI_InterlockedAnd8_acq:
+ case clang::AArch64::BI_InterlockedAnd16_acq:
+ case clang::AArch64::BI_InterlockedAnd_acq:
+ case clang::AArch64::BI_InterlockedAnd64_acq:
return MSVCIntrin::_InterlockedAnd_acq;
- case AArch64::BI_InterlockedAnd8_rel:
- case AArch64::BI_InterlockedAnd16_rel:
- case AArch64::BI_InterlockedAnd_rel:
- case AArch64::BI_InterlockedAnd64_rel:
+ case clang::AArch64::BI_InterlockedAnd8_rel:
+ case clang::AArch64::BI_InterlockedAnd16_rel:
+ case clang::AArch64::BI_InterlockedAnd_rel:
+ case clang::AArch64::BI_InterlockedAnd64_rel:
return MSVCIntrin::_InterlockedAnd_rel;
- case AArch64::BI_InterlockedAnd8_nf:
- case AArch64::BI_InterlockedAnd16_nf:
- case AArch64::BI_InterlockedAnd_nf:
- case AArch64::BI_InterlockedAnd64_nf:
+ case clang::AArch64::BI_InterlockedAnd8_nf:
+ case clang::AArch64::BI_InterlockedAnd16_nf:
+ case clang::AArch64::BI_InterlockedAnd_nf:
+ case clang::AArch64::BI_InterlockedAnd64_nf:
return MSVCIntrin::_InterlockedAnd_nf;
- case AArch64::BI_InterlockedIncrement16_acq:
- case AArch64::BI_InterlockedIncrement_acq:
- case AArch64::BI_InterlockedIncrement64_acq:
+ case clang::AArch64::BI_InterlockedIncrement16_acq:
+ case clang::AArch64::BI_InterlockedIncrement_acq:
+ case clang::AArch64::BI_InterlockedIncrement64_acq:
return MSVCIntrin::_InterlockedIncrement_acq;
- case AArch64::BI_InterlockedIncrement16_rel:
- case AArch64::BI_InterlockedIncrement_rel:
- case AArch64::BI_InterlockedIncrement64_rel:
+ case clang::AArch64::BI_InterlockedIncrement16_rel:
+ case clang::AArch64::BI_InterlockedIncrement_rel:
+ case clang::AArch64::BI_InterlockedIncrement64_rel:
return MSVCIntrin::_InterlockedIncrement_rel;
- case AArch64::BI_InterlockedIncrement16_nf:
- case AArch64::BI_InterlockedIncrement_nf:
- case AArch64::BI_InterlockedIncrement64_nf:
+ case clang::AArch64::BI_InterlockedIncrement16_nf:
+ case clang::AArch64::BI_InterlockedIncrement_nf:
+ case clang::AArch64::BI_InterlockedIncrement64_nf:
return MSVCIntrin::_InterlockedIncrement_nf;
- case AArch64::BI_InterlockedDecrement16_acq:
- case AArch64::BI_InterlockedDecrement_acq:
- case AArch64::BI_InterlockedDecrement64_acq:
+ case clang::AArch64::BI_InterlockedDecrement16_acq:
+ case clang::AArch64::BI_InterlockedDecrement_acq:
+ case clang::AArch64::BI_InterlockedDecrement64_acq:
return MSVCIntrin::_InterlockedDecrement_acq;
- case AArch64::BI_InterlockedDecrement16_rel:
- case AArch64::BI_InterlockedDecrement_rel:
- case AArch64::BI_InterlockedDecrement64_rel:
+ case clang::AArch64::BI_InterlockedDecrement16_rel:
+ case clang::AArch64::BI_InterlockedDecrement_rel:
+ case clang::AArch64::BI_InterlockedDecrement64_rel:
return MSVCIntrin::_InterlockedDecrement_rel;
- case AArch64::BI_InterlockedDecrement16_nf:
- case AArch64::BI_InterlockedDecrement_nf:
- case AArch64::BI_InterlockedDecrement64_nf:
+ case clang::AArch64::BI_InterlockedDecrement16_nf:
+ case clang::AArch64::BI_InterlockedDecrement_nf:
+ case clang::AArch64::BI_InterlockedDecrement64_nf:
return MSVCIntrin::_InterlockedDecrement_nf;
}
llvm_unreachable("must return from switch");
@@ -1778,8 +1779,9 @@ llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction(
auto AL = ApplyDebugLocation::CreateArtificial(*this);
CharUnits Offset;
- Address BufAddr(Builder.CreateLoad(GetAddrOfLocalVar(Args[0]), "buf"),
- BufferAlignment);
+ Address BufAddr =
+ Address(Builder.CreateLoad(GetAddrOfLocalVar(Args[0]), "buf"), Int8Ty,
+ BufferAlignment);
Builder.CreateStore(Builder.getInt8(Layout.getSummaryByte()),
Builder.CreateConstByteGEP(BufAddr, Offset++, "summary"));
Builder.CreateStore(Builder.getInt8(Layout.getNumArgsByte()),
@@ -1800,8 +1802,8 @@ llvm::Function *CodeGenFunction::generateBuiltinOSLogHelperFunction(
Address Arg = GetAddrOfLocalVar(Args[I]);
Address Addr = Builder.CreateConstByteGEP(BufAddr, Offset, "argData");
- Addr = Builder.CreateBitCast(Addr, Arg.getPointer()->getType(),
- "argDataCast");
+ Addr =
+ Builder.CreateElementBitCast(Addr, Arg.getElementType(), "argDataCast");
Builder.CreateStore(Builder.CreateLoad(Arg), Addr);
Offset += Size;
++I;
@@ -2000,7 +2002,7 @@ EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1,
// Signed overflow occurs if the result is greater than INT_MAX or lesser
// than INT_MIN, i.e when |Result| > (INT_MAX + IsNegative).
auto IntMax =
- llvm::APInt::getSignedMaxValue(ResultInfo.Width).zextOrSelf(OpWidth);
+ llvm::APInt::getSignedMaxValue(ResultInfo.Width).zext(OpWidth);
llvm::Value *MaxResult =
CGF.Builder.CreateAdd(llvm::ConstantInt::get(OpTy, IntMax),
CGF.Builder.CreateZExt(IsNegative, OpTy));
@@ -2041,89 +2043,6 @@ EmitCheckedMixedSignMultiply(CodeGenFunction &CGF, const clang::Expr *Op1,
return RValue::get(Overflow);
}
-static llvm::Value *dumpRecord(CodeGenFunction &CGF, QualType RType,
- Value *&RecordPtr, CharUnits Align,
- llvm::FunctionCallee Func, int Lvl) {
- ASTContext &Context = CGF.getContext();
- RecordDecl *RD = RType->castAs<RecordType>()->getDecl()->getDefinition();
- std::string Pad = std::string(Lvl * 4, ' ');
-
- Value *GString =
- CGF.Builder.CreateGlobalStringPtr(RType.getAsString() + " {\n");
- Value *Res = CGF.Builder.CreateCall(Func, {GString});
-
- static llvm::DenseMap<QualType, const char *> Types;
- if (Types.empty()) {
- Types[Context.CharTy] = "%c";
- Types[Context.BoolTy] = "%d";
- Types[Context.SignedCharTy] = "%hhd";
- Types[Context.UnsignedCharTy] = "%hhu";
- Types[Context.IntTy] = "%d";
- Types[Context.UnsignedIntTy] = "%u";
- Types[Context.LongTy] = "%ld";
- Types[Context.UnsignedLongTy] = "%lu";
- Types[Context.LongLongTy] = "%lld";
- Types[Context.UnsignedLongLongTy] = "%llu";
- Types[Context.ShortTy] = "%hd";
- Types[Context.UnsignedShortTy] = "%hu";
- Types[Context.VoidPtrTy] = "%p";
- Types[Context.FloatTy] = "%f";
- Types[Context.DoubleTy] = "%f";
- Types[Context.LongDoubleTy] = "%Lf";
- Types[Context.getPointerType(Context.CharTy)] = "%s";
- Types[Context.getPointerType(Context.getConstType(Context.CharTy))] = "%s";
- }
-
- for (const auto *FD : RD->fields()) {
- Value *FieldPtr = RecordPtr;
- if (RD->isUnion())
- FieldPtr = CGF.Builder.CreatePointerCast(
- FieldPtr, CGF.ConvertType(Context.getPointerType(FD->getType())));
- else
- FieldPtr = CGF.Builder.CreateStructGEP(CGF.ConvertType(RType), FieldPtr,
- FD->getFieldIndex());
-
- GString = CGF.Builder.CreateGlobalStringPtr(
- llvm::Twine(Pad)
- .concat(FD->getType().getAsString())
- .concat(llvm::Twine(' '))
- .concat(FD->getNameAsString())
- .concat(" : ")
- .str());
- Value *TmpRes = CGF.Builder.CreateCall(Func, {GString});
- Res = CGF.Builder.CreateAdd(Res, TmpRes);
-
- QualType CanonicalType =
- FD->getType().getUnqualifiedType().getCanonicalType();
-
- // We check whether we are in a recursive type
- if (CanonicalType->isRecordType()) {
- TmpRes = dumpRecord(CGF, CanonicalType, FieldPtr, Align, Func, Lvl + 1);
- Res = CGF.Builder.CreateAdd(TmpRes, Res);
- continue;
- }
-
- // We try to determine the best format to print the current field
- llvm::Twine Format = Types.find(CanonicalType) == Types.end()
- ? Types[Context.VoidPtrTy]
- : Types[CanonicalType];
-
- Address FieldAddress = Address(FieldPtr, Align);
- FieldPtr = CGF.Builder.CreateLoad(FieldAddress);
-
- // FIXME Need to handle bitfield here
- GString = CGF.Builder.CreateGlobalStringPtr(
- Format.concat(llvm::Twine('\n')).str());
- TmpRes = CGF.Builder.CreateCall(Func, {GString, FieldPtr});
- Res = CGF.Builder.CreateAdd(Res, TmpRes);
- }
-
- GString = CGF.Builder.CreateGlobalStringPtr(Pad + "}\n");
- Value *TmpRes = CGF.Builder.CreateCall(Func, {GString});
- Res = CGF.Builder.CreateAdd(Res, TmpRes);
- return Res;
-}
-
static bool
TypeRequiresBuiltinLaunderImp(const ASTContext &Ctx, QualType Ty,
llvm::SmallPtrSetImpl<const Decl *> &Seen) {
@@ -2252,8 +2171,9 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
ReturnValueSlot ReturnValue) {
const FunctionDecl *FD = GD.getDecl()->getAsFunction();
// See if we can constant fold this builtin. If so, don't emit it at all.
+ // TODO: Extend this handling to all builtin calls that we can constant-fold.
Expr::EvalResult Result;
- if (E->EvaluateAsRValue(Result, CGM.getContext()) &&
+ if (E->isPRValue() && E->EvaluateAsRValue(Result, CGM.getContext()) &&
!Result.hasSideEffects()) {
if (Result.Val.isInt())
return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
@@ -2649,23 +2569,6 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
return RValue::get(ComplexVal.first);
}
- case Builtin::BI__builtin_dump_struct: {
- llvm::Type *LLVMIntTy = getTypes().ConvertType(getContext().IntTy);
- llvm::FunctionType *LLVMFuncType = llvm::FunctionType::get(
- LLVMIntTy, {llvm::Type::getInt8PtrTy(getLLVMContext())}, true);
-
- Value *Func = EmitScalarExpr(E->getArg(1)->IgnoreImpCasts());
- CharUnits Arg0Align = EmitPointerWithAlignment(E->getArg(0)).getAlignment();
-
- const Expr *Arg0 = E->getArg(0)->IgnoreImpCasts();
- QualType Arg0Type = Arg0->getType()->getPointeeType();
-
- Value *RecordPtr = EmitScalarExpr(Arg0);
- Value *Res = dumpRecord(*this, Arg0Type, RecordPtr, Arg0Align,
- {LLVMFuncType, Func}, 0);
- return RValue::get(Res);
- }
-
case Builtin::BI__builtin_preserve_access_index: {
// Only enabled preserved access index region when debuginfo
// is available as debuginfo is needed to preserve user-level
@@ -2929,7 +2832,10 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
}
case Builtin::BI__builtin_bswap16:
case Builtin::BI__builtin_bswap32:
- case Builtin::BI__builtin_bswap64: {
+ case Builtin::BI__builtin_bswap64:
+ case Builtin::BI_byteswap_ushort:
+ case Builtin::BI_byteswap_ulong:
+ case Builtin::BI_byteswap_uint64: {
return RValue::get(emitUnaryBuiltin(*this, E, Intrinsic::bswap));
}
case Builtin::BI__builtin_bitreverse8:
@@ -3154,6 +3060,25 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
return RValue::get(
emitUnaryBuiltin(*this, E, llvm::Intrinsic::trunc, "elt.trunc"));
+ case Builtin::BI__builtin_elementwise_add_sat:
+ case Builtin::BI__builtin_elementwise_sub_sat: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ Value *Result;
+ assert(Op0->getType()->isIntOrIntVectorTy() && "integer type expected");
+ QualType Ty = E->getArg(0)->getType();
+ if (auto *VecTy = Ty->getAs<VectorType>())
+ Ty = VecTy->getElementType();
+ bool IsSigned = Ty->isSignedIntegerType();
+ unsigned Opc;
+ if (BuiltinIDIfNoAsmLabel == Builtin::BI__builtin_elementwise_add_sat)
+ Opc = IsSigned ? llvm::Intrinsic::sadd_sat : llvm::Intrinsic::uadd_sat;
+ else
+ Opc = IsSigned ? llvm::Intrinsic::ssub_sat : llvm::Intrinsic::usub_sat;
+ Result = Builder.CreateBinaryIntrinsic(Opc, Op0, Op1, nullptr, "elt.sat");
+ return RValue::get(Result);
+ }
+
case Builtin::BI__builtin_elementwise_max: {
Value *Op0 = EmitScalarExpr(E->getArg(0));
Value *Op1 = EmitScalarExpr(E->getArg(1));
@@ -3218,6 +3143,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
*this, E, GetIntrinsicID(E->getArg(0)->getType()), "rdx.min"));
}
+ case Builtin::BI__builtin_reduce_add:
+ return RValue::get(emitUnaryBuiltin(
+ *this, E, llvm::Intrinsic::vector_reduce_add, "rdx.add"));
+ case Builtin::BI__builtin_reduce_mul:
+ return RValue::get(emitUnaryBuiltin(
+ *this, E, llvm::Intrinsic::vector_reduce_mul, "rdx.mul"));
case Builtin::BI__builtin_reduce_xor:
return RValue::get(emitUnaryBuiltin(
*this, E, llvm::Intrinsic::vector_reduce_xor, "rdx.xor"));
@@ -3231,14 +3162,14 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_matrix_transpose: {
auto *MatrixTy = E->getArg(0)->getType()->castAs<ConstantMatrixType>();
Value *MatValue = EmitScalarExpr(E->getArg(0));
- MatrixBuilder<CGBuilderTy> MB(Builder);
+ MatrixBuilder MB(Builder);
Value *Result = MB.CreateMatrixTranspose(MatValue, MatrixTy->getNumRows(),
MatrixTy->getNumColumns());
return RValue::get(Result);
}
case Builtin::BI__builtin_matrix_column_major_load: {
- MatrixBuilder<CGBuilderTy> MB(Builder);
+ MatrixBuilder MB(Builder);
// Emit everything that isn't dependent on the first parameter type
Value *Stride = EmitScalarExpr(E->getArg(3));
const auto *ResultTy = E->getType()->getAs<ConstantMatrixType>();
@@ -3250,14 +3181,15 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
EmitNonNullArgCheck(RValue::get(Src.getPointer()), E->getArg(0)->getType(),
E->getArg(0)->getExprLoc(), FD, 0);
Value *Result = MB.CreateColumnMajorLoad(
- Src.getPointer(), Align(Src.getAlignment().getQuantity()), Stride,
- IsVolatile, ResultTy->getNumRows(), ResultTy->getNumColumns(),
+ Src.getElementType(), Src.getPointer(),
+ Align(Src.getAlignment().getQuantity()), Stride, IsVolatile,
+ ResultTy->getNumRows(), ResultTy->getNumColumns(),
"matrix");
return RValue::get(Result);
}
case Builtin::BI__builtin_matrix_column_major_store: {
- MatrixBuilder<CGBuilderTy> MB(Builder);
+ MatrixBuilder MB(Builder);
Value *Matrix = EmitScalarExpr(E->getArg(0));
Address Dst = EmitPointerWithAlignment(E->getArg(1));
Value *Stride = EmitScalarExpr(E->getArg(2));
@@ -3576,6 +3508,17 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Builder.CreateMemSet(Dest, ByteVal, SizeVal, false);
return RValue::get(Dest.getPointer());
}
+ case Builtin::BI__builtin_memset_inline: {
+ Address Dest = EmitPointerWithAlignment(E->getArg(0));
+ Value *ByteVal =
+ Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)), Builder.getInt8Ty());
+ uint64_t Size =
+ E->getArg(2)->EvaluateKnownConstInt(getContext()).getZExtValue();
+ EmitNonNullArgCheck(RValue::get(Dest.getPointer()), E->getArg(0)->getType(),
+ E->getArg(0)->getExprLoc(), FD, 0);
+ Builder.CreateMemSetInline(Dest, ByteVal, Size);
+ return RValue::get(nullptr);
+ }
case Builtin::BI__builtin___memset_chk: {
// fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
Expr::EvalResult SizeResult, DstSizeResult;
@@ -3818,7 +3761,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
// Call LLVM's EH setjmp, which is lightweight.
Function *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
- Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
+ Buf = Builder.CreateElementBitCast(Buf, Int8Ty);
return RValue::get(Builder.CreateCall(F, Buf.getPointer()));
}
case Builtin::BI__builtin_longjmp: {
@@ -4129,8 +4072,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
Address Ptr = EmitPointerWithAlignment(E->getArg(0));
- unsigned AddrSpace = Ptr.getPointer()->getType()->getPointerAddressSpace();
- Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
+ Ptr = Builder.CreateElementBitCast(Ptr, Int8Ty);
Value *NewVal = Builder.getInt8(0);
Value *Order = EmitScalarExpr(E->getArg(1));
if (isa<llvm::ConstantInt>(Order)) {
@@ -4524,6 +4466,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
return RValue::get(Carry);
}
+ case Builtin::BIaddressof:
+ case Builtin::BI__addressof:
case Builtin::BI__builtin_addressof:
return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this));
case Builtin::BI__builtin_function_start:
@@ -4683,6 +4627,12 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
}
break;
+ // C++ std:: builtins.
+ case Builtin::BImove:
+ case Builtin::BImove_if_noexcept:
+ case Builtin::BIforward:
+ case Builtin::BIas_const:
+ return RValue::get(EmitLValue(E->getArg(0)).getPointer(*this));
case Builtin::BI__GetExceptionInfo: {
if (llvm::GlobalVariable *GV =
CGM.getCXXABI().getThrowInfo(FD->getParamDecl(0)->getType()))
@@ -5176,7 +5126,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
case Builtin::BI__builtin_thread_pointer: {
if (!getContext().getTargetInfo().isTLSSupported())
CGM.ErrorUnsupported(E, "__builtin_thread_pointer");
- // Fall through - it's already mapped to the intrinsic by GCCBuiltin.
+ // Fall through - it's already mapped to the intrinsic by ClangBuiltin.
break;
}
case Builtin::BI__builtin_os_log_format:
@@ -5319,7 +5269,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
StringRef Prefix =
llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch());
if (!Prefix.empty()) {
- IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix.data(), Name);
+ IntrinsicID = Intrinsic::getIntrinsicForClangBuiltin(Prefix.data(), Name);
// NOTE we don't need to perform a compatibility flag check here since the
// intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
// MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
@@ -5369,7 +5319,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
"Must be able to losslessly bit cast to param");
- ArgValue = Builder.CreateBitCast(ArgValue, PTy);
+ // Cast vector type (e.g., v256i32) to x86_amx, this only happen
+ // in amx intrinsics.
+ if (PTy->isX86_AMXTy())
+ ArgValue = Builder.CreateIntrinsic(Intrinsic::x86_cast_vector_to_tile,
+ {ArgValue->getType()}, {ArgValue});
+ else
+ ArgValue = Builder.CreateBitCast(ArgValue, PTy);
}
Args.push_back(ArgValue);
@@ -5393,7 +5349,13 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&
"Must be able to losslessly bit cast result type");
- V = Builder.CreateBitCast(V, RetTy);
+ // Cast x86_amx to vector type (e.g., v256i32), this only happen
+ // in amx intrinsics.
+ if (V->getType()->isX86_AMXTy())
+ V = Builder.CreateIntrinsic(Intrinsic::x86_cast_tile_to_vector, {RetTy},
+ {V});
+ else
+ V = Builder.CreateBitCast(V, RetTy);
}
return RValue::get(V);
@@ -6899,8 +6861,7 @@ Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
case NEON::BI__builtin_neon_vld1_dup_v:
case NEON::BI__builtin_neon_vld1q_dup_v: {
Value *V = UndefValue::get(Ty);
- Ty = llvm::PointerType::getUnqual(VTy->getElementType());
- PtrOp0 = Builder.CreateBitCast(PtrOp0, Ty);
+ PtrOp0 = Builder.CreateElementBitCast(PtrOp0, VTy->getElementType());
LoadInst *Ld = Builder.CreateLoad(PtrOp0);
llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
Ops[0] = Builder.CreateInsertElement(V, Ld, CI);
@@ -7294,7 +7255,10 @@ Value *CodeGenFunction::EmitAArch64CompareBuiltinExpr(
Op = Builder.CreateBitCast(Op, OTy);
if (OTy->getScalarType()->isFloatingPointTy()) {
- Op = Builder.CreateFCmp(Fp, Op, Constant::getNullValue(OTy));
+ if (Fp == CmpInst::FCMP_OEQ)
+ Op = Builder.CreateFCmp(Fp, Op, Constant::getNullValue(OTy));
+ else
+ Op = Builder.CreateFCmpS(Fp, Op, Constant::getNullValue(OTy));
} else {
Op = Builder.CreateICmp(Ip, Op, Constant::getNullValue(OTy));
}
@@ -7345,27 +7309,27 @@ Value *CodeGenFunction::GetValueForARMHint(unsigned BuiltinID) {
switch (BuiltinID) {
default:
return nullptr;
- case ARM::BI__builtin_arm_nop:
+ case clang::ARM::BI__builtin_arm_nop:
Value = 0;
break;
- case ARM::BI__builtin_arm_yield:
- case ARM::BI__yield:
+ case clang::ARM::BI__builtin_arm_yield:
+ case clang::ARM::BI__yield:
Value = 1;
break;
- case ARM::BI__builtin_arm_wfe:
- case ARM::BI__wfe:
+ case clang::ARM::BI__builtin_arm_wfe:
+ case clang::ARM::BI__wfe:
Value = 2;
break;
- case ARM::BI__builtin_arm_wfi:
- case ARM::BI__wfi:
+ case clang::ARM::BI__builtin_arm_wfi:
+ case clang::ARM::BI__wfi:
Value = 3;
break;
- case ARM::BI__builtin_arm_sev:
- case ARM::BI__sev:
+ case clang::ARM::BI__builtin_arm_sev:
+ case clang::ARM::BI__sev:
Value = 4;
break;
- case ARM::BI__builtin_arm_sevl:
- case ARM::BI__sevl:
+ case clang::ARM::BI__builtin_arm_sevl:
+ case clang::ARM::BI__sevl:
Value = 5;
break;
}
@@ -7498,7 +7462,7 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
if (auto Hint = GetValueForARMHint(BuiltinID))
return Hint;
- if (BuiltinID == ARM::BI__emit) {
+ if (BuiltinID == clang::ARM::BI__emit) {
bool IsThumb = getTarget().getTriple().getArch() == llvm::Triple::thumb;
llvm::FunctionType *FTy =
llvm::FunctionType::get(VoidTy, /*Variadic=*/false);
@@ -7519,12 +7483,12 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(Emit);
}
- if (BuiltinID == ARM::BI__builtin_arm_dbg) {
+ if (BuiltinID == clang::ARM::BI__builtin_arm_dbg) {
Value *Option = EmitScalarExpr(E->getArg(0));
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_dbg), Option);
}
- if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
+ if (BuiltinID == clang::ARM::BI__builtin_arm_prefetch) {
Value *Address = EmitScalarExpr(E->getArg(0));
Value *RW = EmitScalarExpr(E->getArg(1));
Value *IsData = EmitScalarExpr(E->getArg(2));
@@ -7536,23 +7500,23 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, {Address, RW, Locality, IsData});
}
- if (BuiltinID == ARM::BI__builtin_arm_rbit) {
+ if (BuiltinID == clang::ARM::BI__builtin_arm_rbit) {
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
return Builder.CreateCall(
CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
}
- if (BuiltinID == ARM::BI__builtin_arm_cls) {
+ if (BuiltinID == clang::ARM::BI__builtin_arm_cls) {
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_cls), Arg, "cls");
}
- if (BuiltinID == ARM::BI__builtin_arm_cls64) {
+ if (BuiltinID == clang::ARM::BI__builtin_arm_cls64) {
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_cls64), Arg,
"cls");
}
- if (BuiltinID == ARM::BI__clear_cache) {
+ if (BuiltinID == clang::ARM::BI__clear_cache) {
assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
const FunctionDecl *FD = E->getDirectCallee();
Value *Ops[2];
@@ -7564,16 +7528,16 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
}
- if (BuiltinID == ARM::BI__builtin_arm_mcrr ||
- BuiltinID == ARM::BI__builtin_arm_mcrr2) {
+ if (BuiltinID == clang::ARM::BI__builtin_arm_mcrr ||
+ BuiltinID == clang::ARM::BI__builtin_arm_mcrr2) {
Function *F;
switch (BuiltinID) {
default: llvm_unreachable("unexpected builtin");
- case ARM::BI__builtin_arm_mcrr:
+ case clang::ARM::BI__builtin_arm_mcrr:
F = CGM.getIntrinsic(Intrinsic::arm_mcrr);
break;
- case ARM::BI__builtin_arm_mcrr2:
+ case clang::ARM::BI__builtin_arm_mcrr2:
F = CGM.getIntrinsic(Intrinsic::arm_mcrr2);
break;
}
@@ -7598,16 +7562,16 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, {Coproc, Opc1, Rt, Rt2, CRm});
}
- if (BuiltinID == ARM::BI__builtin_arm_mrrc ||
- BuiltinID == ARM::BI__builtin_arm_mrrc2) {
+ if (BuiltinID == clang::ARM::BI__builtin_arm_mrrc ||
+ BuiltinID == clang::ARM::BI__builtin_arm_mrrc2) {
Function *F;
switch (BuiltinID) {
default: llvm_unreachable("unexpected builtin");
- case ARM::BI__builtin_arm_mrrc:
+ case clang::ARM::BI__builtin_arm_mrrc:
F = CGM.getIntrinsic(Intrinsic::arm_mrrc);
break;
- case ARM::BI__builtin_arm_mrrc2:
+ case clang::ARM::BI__builtin_arm_mrrc2:
F = CGM.getIntrinsic(Intrinsic::arm_mrrc2);
break;
}
@@ -7632,21 +7596,21 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return Builder.CreateBitCast(RtAndRt2, ConvertType(E->getType()));
}
- if (BuiltinID == ARM::BI__builtin_arm_ldrexd ||
- ((BuiltinID == ARM::BI__builtin_arm_ldrex ||
- BuiltinID == ARM::BI__builtin_arm_ldaex) &&
+ if (BuiltinID == clang::ARM::BI__builtin_arm_ldrexd ||
+ ((BuiltinID == clang::ARM::BI__builtin_arm_ldrex ||
+ BuiltinID == clang::ARM::BI__builtin_arm_ldaex) &&
getContext().getTypeSize(E->getType()) == 64) ||
- BuiltinID == ARM::BI__ldrexd) {
+ BuiltinID == clang::ARM::BI__ldrexd) {
Function *F;
switch (BuiltinID) {
default: llvm_unreachable("unexpected builtin");
- case ARM::BI__builtin_arm_ldaex:
+ case clang::ARM::BI__builtin_arm_ldaex:
F = CGM.getIntrinsic(Intrinsic::arm_ldaexd);
break;
- case ARM::BI__builtin_arm_ldrexd:
- case ARM::BI__builtin_arm_ldrex:
- case ARM::BI__ldrexd:
+ case clang::ARM::BI__builtin_arm_ldrexd:
+ case clang::ARM::BI__builtin_arm_ldrex:
+ case clang::ARM::BI__ldrexd:
F = CGM.getIntrinsic(Intrinsic::arm_ldrexd);
break;
}
@@ -7666,46 +7630,49 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return Builder.CreateBitCast(Val, ConvertType(E->getType()));
}
- if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
- BuiltinID == ARM::BI__builtin_arm_ldaex) {
+ if (BuiltinID == clang::ARM::BI__builtin_arm_ldrex ||
+ BuiltinID == clang::ARM::BI__builtin_arm_ldaex) {
Value *LoadAddr = EmitScalarExpr(E->getArg(0));
QualType Ty = E->getType();
llvm::Type *RealResTy = ConvertType(Ty);
- llvm::Type *PtrTy = llvm::IntegerType::get(
- getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo();
+ llvm::Type *IntTy =
+ llvm::IntegerType::get(getLLVMContext(), getContext().getTypeSize(Ty));
+ llvm::Type *PtrTy = IntTy->getPointerTo();
LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy);
- Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_ldaex
- ? Intrinsic::arm_ldaex
- : Intrinsic::arm_ldrex,
- PtrTy);
- Value *Val = Builder.CreateCall(F, LoadAddr, "ldrex");
+ Function *F = CGM.getIntrinsic(
+ BuiltinID == clang::ARM::BI__builtin_arm_ldaex ? Intrinsic::arm_ldaex
+ : Intrinsic::arm_ldrex,
+ PtrTy);
+ CallInst *Val = Builder.CreateCall(F, LoadAddr, "ldrex");
+ Val->addParamAttr(
+ 0, Attribute::get(getLLVMContext(), Attribute::ElementType, IntTy));
if (RealResTy->isPointerTy())
return Builder.CreateIntToPtr(Val, RealResTy);
else {
llvm::Type *IntResTy = llvm::IntegerType::get(
getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy));
- Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
- return Builder.CreateBitCast(Val, RealResTy);
+ return Builder.CreateBitCast(Builder.CreateTruncOrBitCast(Val, IntResTy),
+ RealResTy);
}
}
- if (BuiltinID == ARM::BI__builtin_arm_strexd ||
- ((BuiltinID == ARM::BI__builtin_arm_stlex ||
- BuiltinID == ARM::BI__builtin_arm_strex) &&
+ if (BuiltinID == clang::ARM::BI__builtin_arm_strexd ||
+ ((BuiltinID == clang::ARM::BI__builtin_arm_stlex ||
+ BuiltinID == clang::ARM::BI__builtin_arm_strex) &&
getContext().getTypeSize(E->getArg(0)->getType()) == 64)) {
- Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
- ? Intrinsic::arm_stlexd
- : Intrinsic::arm_strexd);
+ Function *F = CGM.getIntrinsic(
+ BuiltinID == clang::ARM::BI__builtin_arm_stlex ? Intrinsic::arm_stlexd
+ : Intrinsic::arm_strexd);
llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty);
Address Tmp = CreateMemTemp(E->getArg(0)->getType());
Value *Val = EmitScalarExpr(E->getArg(0));
Builder.CreateStore(Val, Tmp);
- Address LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy));
+ Address LdPtr = Builder.CreateElementBitCast(Tmp, STy);
Val = Builder.CreateLoad(LdPtr);
Value *Arg0 = Builder.CreateExtractValue(Val, 0);
@@ -7714,8 +7681,8 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "strexd");
}
- if (BuiltinID == ARM::BI__builtin_arm_strex ||
- BuiltinID == ARM::BI__builtin_arm_stlex) {
+ if (BuiltinID == clang::ARM::BI__builtin_arm_strex ||
+ BuiltinID == clang::ARM::BI__builtin_arm_stlex) {
Value *StoreVal = EmitScalarExpr(E->getArg(0));
Value *StoreAddr = EmitScalarExpr(E->getArg(1));
@@ -7734,14 +7701,18 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int32Ty);
}
- Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
- ? Intrinsic::arm_stlex
- : Intrinsic::arm_strex,
- StoreAddr->getType());
- return Builder.CreateCall(F, {StoreVal, StoreAddr}, "strex");
+ Function *F = CGM.getIntrinsic(
+ BuiltinID == clang::ARM::BI__builtin_arm_stlex ? Intrinsic::arm_stlex
+ : Intrinsic::arm_strex,
+ StoreAddr->getType());
+
+ CallInst *CI = Builder.CreateCall(F, {StoreVal, StoreAddr}, "strex");
+ CI->addParamAttr(
+ 1, Attribute::get(getLLVMContext(), Attribute::ElementType, StoreTy));
+ return CI;
}
- if (BuiltinID == ARM::BI__builtin_arm_clrex) {
+ if (BuiltinID == clang::ARM::BI__builtin_arm_clrex) {
Function *F = CGM.getIntrinsic(Intrinsic::arm_clrex);
return Builder.CreateCall(F);
}
@@ -7749,19 +7720,19 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
// CRC32
Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
switch (BuiltinID) {
- case ARM::BI__builtin_arm_crc32b:
+ case clang::ARM::BI__builtin_arm_crc32b:
CRCIntrinsicID = Intrinsic::arm_crc32b; break;
- case ARM::BI__builtin_arm_crc32cb:
+ case clang::ARM::BI__builtin_arm_crc32cb:
CRCIntrinsicID = Intrinsic::arm_crc32cb; break;
- case ARM::BI__builtin_arm_crc32h:
+ case clang::ARM::BI__builtin_arm_crc32h:
CRCIntrinsicID = Intrinsic::arm_crc32h; break;
- case ARM::BI__builtin_arm_crc32ch:
+ case clang::ARM::BI__builtin_arm_crc32ch:
CRCIntrinsicID = Intrinsic::arm_crc32ch; break;
- case ARM::BI__builtin_arm_crc32w:
- case ARM::BI__builtin_arm_crc32d:
+ case clang::ARM::BI__builtin_arm_crc32w:
+ case clang::ARM::BI__builtin_arm_crc32d:
CRCIntrinsicID = Intrinsic::arm_crc32w; break;
- case ARM::BI__builtin_arm_crc32cw:
- case ARM::BI__builtin_arm_crc32cd:
+ case clang::ARM::BI__builtin_arm_crc32cw:
+ case clang::ARM::BI__builtin_arm_crc32cd:
CRCIntrinsicID = Intrinsic::arm_crc32cw; break;
}
@@ -7771,8 +7742,8 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
// crc32{c,}d intrinsics are implemnted as two calls to crc32{c,}w
// intrinsics, hence we need different codegen for these cases.
- if (BuiltinID == ARM::BI__builtin_arm_crc32d ||
- BuiltinID == ARM::BI__builtin_arm_crc32cd) {
+ if (BuiltinID == clang::ARM::BI__builtin_arm_crc32d ||
+ BuiltinID == clang::ARM::BI__builtin_arm_crc32cd) {
Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
Value *Arg1a = Builder.CreateTruncOrBitCast(Arg1, Int32Ty);
Value *Arg1b = Builder.CreateLShr(Arg1, C1);
@@ -7789,24 +7760,24 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
}
}
- if (BuiltinID == ARM::BI__builtin_arm_rsr ||
- BuiltinID == ARM::BI__builtin_arm_rsr64 ||
- BuiltinID == ARM::BI__builtin_arm_rsrp ||
- BuiltinID == ARM::BI__builtin_arm_wsr ||
- BuiltinID == ARM::BI__builtin_arm_wsr64 ||
- BuiltinID == ARM::BI__builtin_arm_wsrp) {
+ if (BuiltinID == clang::ARM::BI__builtin_arm_rsr ||
+ BuiltinID == clang::ARM::BI__builtin_arm_rsr64 ||
+ BuiltinID == clang::ARM::BI__builtin_arm_rsrp ||
+ BuiltinID == clang::ARM::BI__builtin_arm_wsr ||
+ BuiltinID == clang::ARM::BI__builtin_arm_wsr64 ||
+ BuiltinID == clang::ARM::BI__builtin_arm_wsrp) {
SpecialRegisterAccessKind AccessKind = Write;
- if (BuiltinID == ARM::BI__builtin_arm_rsr ||
- BuiltinID == ARM::BI__builtin_arm_rsr64 ||
- BuiltinID == ARM::BI__builtin_arm_rsrp)
+ if (BuiltinID == clang::ARM::BI__builtin_arm_rsr ||
+ BuiltinID == clang::ARM::BI__builtin_arm_rsr64 ||
+ BuiltinID == clang::ARM::BI__builtin_arm_rsrp)
AccessKind = VolatileRead;
- bool IsPointerBuiltin = BuiltinID == ARM::BI__builtin_arm_rsrp ||
- BuiltinID == ARM::BI__builtin_arm_wsrp;
+ bool IsPointerBuiltin = BuiltinID == clang::ARM::BI__builtin_arm_rsrp ||
+ BuiltinID == clang::ARM::BI__builtin_arm_wsrp;
- bool Is64Bit = BuiltinID == ARM::BI__builtin_arm_rsr64 ||
- BuiltinID == ARM::BI__builtin_arm_wsr64;
+ bool Is64Bit = BuiltinID == clang::ARM::BI__builtin_arm_rsr64 ||
+ BuiltinID == clang::ARM::BI__builtin_arm_wsr64;
llvm::Type *ValueType;
llvm::Type *RegisterType;
@@ -7823,6 +7794,11 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
AccessKind);
}
+ if (BuiltinID == ARM::BI__builtin_sponentry) {
+ llvm::Function *F = CGM.getIntrinsic(Intrinsic::sponentry, AllocaInt8PtrTy);
+ return Builder.CreateCall(F);
+ }
+
// Handle MSVC intrinsics before argument evaluation to prevent double
// evaluation.
if (Optional<MSVCIntrin> MsvcIntId = translateArmToMsvcIntrin(BuiltinID))
@@ -7981,10 +7957,11 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
// The ARM _MoveToCoprocessor builtins put the input register value as
// the first argument, but the LLVM intrinsic expects it as the third one.
- case ARM::BI_MoveToCoprocessor:
- case ARM::BI_MoveToCoprocessor2: {
- Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI_MoveToCoprocessor ?
- Intrinsic::arm_mcr : Intrinsic::arm_mcr2);
+ case clang::ARM::BI_MoveToCoprocessor:
+ case clang::ARM::BI_MoveToCoprocessor2: {
+ Function *F = CGM.getIntrinsic(BuiltinID == clang::ARM::BI_MoveToCoprocessor
+ ? Intrinsic::arm_mcr
+ : Intrinsic::arm_mcr2);
return Builder.CreateCall(F, {Ops[1], Ops[2], Ops[0],
Ops[3], Ops[4], Ops[5]});
}
@@ -7997,11 +7974,11 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
if (!Result)
return nullptr;
- if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f ||
- BuiltinID == ARM::BI__builtin_arm_vcvtr_d) {
+ if (BuiltinID == clang::ARM::BI__builtin_arm_vcvtr_f ||
+ BuiltinID == clang::ARM::BI__builtin_arm_vcvtr_d) {
// Determine the overloaded type of this builtin.
llvm::Type *Ty;
- if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f)
+ if (BuiltinID == clang::ARM::BI__builtin_arm_vcvtr_f)
Ty = FloatTy;
else
Ty = DoubleTy;
@@ -8126,8 +8103,8 @@ Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
case NEON::BI__builtin_neon_vst1_lane_v: {
Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
- Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
- auto St = Builder.CreateStore(Ops[1], Builder.CreateBitCast(PtrOp0, Ty));
+ auto St = Builder.CreateStore(
+ Ops[1], Builder.CreateElementBitCast(PtrOp0, Ops[1]->getType()));
return St;
}
case NEON::BI__builtin_neon_vtbl1_v:
@@ -9005,7 +8982,10 @@ Value *CodeGenFunction::EmitSVEMaskedLoad(const CallExpr *E,
BasePtr = Builder.CreateBitCast(BasePtr, MemEltTy->getPointerTo());
Function *F = CGM.getIntrinsic(BuiltinID, MemoryTy);
- Value *Load = Builder.CreateCall(F, {Predicate, BasePtr});
+ auto *Load =
+ cast<llvm::Instruction>(Builder.CreateCall(F, {Predicate, BasePtr}));
+ auto TBAAInfo = CGM.getTBAAAccessInfo(LangPTy->getPointeeType());
+ CGM.DecorateInstructionWithTBAA(Load, TBAAInfo);
return IsZExtReturn ? Builder.CreateZExt(Load, VectorTy)
: Builder.CreateSExt(Load, VectorTy);
@@ -9033,7 +9013,11 @@ Value *CodeGenFunction::EmitSVEMaskedStore(const CallExpr *E,
BasePtr = Builder.CreateBitCast(BasePtr, MemEltTy->getPointerTo());
Function *F = CGM.getIntrinsic(BuiltinID, MemoryTy);
- return Builder.CreateCall(F, {Val, Predicate, BasePtr});
+ auto *Store =
+ cast<llvm::Instruction>(Builder.CreateCall(F, {Val, Predicate, BasePtr}));
+ auto TBAAInfo = CGM.getTBAAAccessInfo(LangPTy->getPointeeType());
+ CGM.DecorateInstructionWithTBAA(Store, TBAAInfo);
+ return Store;
}
// Limit the usage of scalable llvm IR generated by the ACLE by using the
@@ -9427,34 +9411,34 @@ Value *CodeGenFunction::EmitAArch64SVEBuiltinExpr(unsigned BuiltinID,
Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
const CallExpr *E,
llvm::Triple::ArchType Arch) {
- if (BuiltinID >= AArch64::FirstSVEBuiltin &&
- BuiltinID <= AArch64::LastSVEBuiltin)
+ if (BuiltinID >= clang::AArch64::FirstSVEBuiltin &&
+ BuiltinID <= clang::AArch64::LastSVEBuiltin)
return EmitAArch64SVEBuiltinExpr(BuiltinID, E);
unsigned HintID = static_cast<unsigned>(-1);
switch (BuiltinID) {
default: break;
- case AArch64::BI__builtin_arm_nop:
+ case clang::AArch64::BI__builtin_arm_nop:
HintID = 0;
break;
- case AArch64::BI__builtin_arm_yield:
- case AArch64::BI__yield:
+ case clang::AArch64::BI__builtin_arm_yield:
+ case clang::AArch64::BI__yield:
HintID = 1;
break;
- case AArch64::BI__builtin_arm_wfe:
- case AArch64::BI__wfe:
+ case clang::AArch64::BI__builtin_arm_wfe:
+ case clang::AArch64::BI__wfe:
HintID = 2;
break;
- case AArch64::BI__builtin_arm_wfi:
- case AArch64::BI__wfi:
+ case clang::AArch64::BI__builtin_arm_wfi:
+ case clang::AArch64::BI__wfi:
HintID = 3;
break;
- case AArch64::BI__builtin_arm_sev:
- case AArch64::BI__sev:
+ case clang::AArch64::BI__builtin_arm_sev:
+ case clang::AArch64::BI__sev:
HintID = 4;
break;
- case AArch64::BI__builtin_arm_sevl:
- case AArch64::BI__sevl:
+ case clang::AArch64::BI__builtin_arm_sevl:
+ case clang::AArch64::BI__sevl:
HintID = 5;
break;
}
@@ -9464,7 +9448,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, llvm::ConstantInt::get(Int32Ty, HintID));
}
- if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
+ if (BuiltinID == clang::AArch64::BI__builtin_arm_prefetch) {
Value *Address = EmitScalarExpr(E->getArg(0));
Value *RW = EmitScalarExpr(E->getArg(1));
Value *CacheLevel = EmitScalarExpr(E->getArg(2));
@@ -9487,14 +9471,14 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, {Address, RW, Locality, IsData});
}
- if (BuiltinID == AArch64::BI__builtin_arm_rbit) {
+ if (BuiltinID == clang::AArch64::BI__builtin_arm_rbit) {
assert((getContext().getTypeSize(E->getType()) == 32) &&
"rbit of unusual size!");
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
return Builder.CreateCall(
CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
}
- if (BuiltinID == AArch64::BI__builtin_arm_rbit64) {
+ if (BuiltinID == clang::AArch64::BI__builtin_arm_rbit64) {
assert((getContext().getTypeSize(E->getType()) == 64) &&
"rbit of unusual size!");
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
@@ -9502,50 +9486,50 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
CGM.getIntrinsic(Intrinsic::bitreverse, Arg->getType()), Arg, "rbit");
}
- if (BuiltinID == AArch64::BI__builtin_arm_cls) {
+ if (BuiltinID == clang::AArch64::BI__builtin_arm_cls) {
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_cls), Arg,
"cls");
}
- if (BuiltinID == AArch64::BI__builtin_arm_cls64) {
+ if (BuiltinID == clang::AArch64::BI__builtin_arm_cls64) {
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_cls64), Arg,
"cls");
}
- if (BuiltinID == AArch64::BI__builtin_arm_frint32zf ||
- BuiltinID == AArch64::BI__builtin_arm_frint32z) {
+ if (BuiltinID == clang::AArch64::BI__builtin_arm_frint32zf ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_frint32z) {
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
llvm::Type *Ty = Arg->getType();
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint32z, Ty),
Arg, "frint32z");
}
- if (BuiltinID == AArch64::BI__builtin_arm_frint64zf ||
- BuiltinID == AArch64::BI__builtin_arm_frint64z) {
+ if (BuiltinID == clang::AArch64::BI__builtin_arm_frint64zf ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_frint64z) {
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
llvm::Type *Ty = Arg->getType();
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint64z, Ty),
Arg, "frint64z");
}
- if (BuiltinID == AArch64::BI__builtin_arm_frint32xf ||
- BuiltinID == AArch64::BI__builtin_arm_frint32x) {
+ if (BuiltinID == clang::AArch64::BI__builtin_arm_frint32xf ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_frint32x) {
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
llvm::Type *Ty = Arg->getType();
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint32x, Ty),
Arg, "frint32x");
}
- if (BuiltinID == AArch64::BI__builtin_arm_frint64xf ||
- BuiltinID == AArch64::BI__builtin_arm_frint64x) {
+ if (BuiltinID == clang::AArch64::BI__builtin_arm_frint64xf ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_frint64x) {
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
llvm::Type *Ty = Arg->getType();
return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::aarch64_frint64x, Ty),
Arg, "frint64x");
}
- if (BuiltinID == AArch64::BI__builtin_arm_jcvt) {
+ if (BuiltinID == clang::AArch64::BI__builtin_arm_jcvt) {
assert((getContext().getTypeSize(E->getType()) == 32) &&
"__jcvt of unusual size!");
llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
@@ -9553,14 +9537,14 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
CGM.getIntrinsic(Intrinsic::aarch64_fjcvtzs), Arg);
}
- if (BuiltinID == AArch64::BI__builtin_arm_ld64b ||
- BuiltinID == AArch64::BI__builtin_arm_st64b ||
- BuiltinID == AArch64::BI__builtin_arm_st64bv ||
- BuiltinID == AArch64::BI__builtin_arm_st64bv0) {
+ if (BuiltinID == clang::AArch64::BI__builtin_arm_ld64b ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_st64b ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_st64bv ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_st64bv0) {
llvm::Value *MemAddr = EmitScalarExpr(E->getArg(0));
llvm::Value *ValPtr = EmitScalarExpr(E->getArg(1));
- if (BuiltinID == AArch64::BI__builtin_arm_ld64b) {
+ if (BuiltinID == clang::AArch64::BI__builtin_arm_ld64b) {
// Load from the address via an LLVM intrinsic, receiving a
// tuple of 8 i64 words, and store each one to ValPtr.
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_ld64b);
@@ -9569,7 +9553,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
for (size_t i = 0; i < 8; i++) {
llvm::Value *ValOffsetPtr =
Builder.CreateGEP(Int64Ty, ValPtr, Builder.getInt32(i));
- Address Addr(ValOffsetPtr, CharUnits::fromQuantity(8));
+ Address Addr =
+ Address(ValOffsetPtr, Int64Ty, CharUnits::fromQuantity(8));
ToRet = Builder.CreateStore(Builder.CreateExtractValue(Val, i), Addr);
}
return ToRet;
@@ -9581,24 +9566,25 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
for (size_t i = 0; i < 8; i++) {
llvm::Value *ValOffsetPtr =
Builder.CreateGEP(Int64Ty, ValPtr, Builder.getInt32(i));
- Address Addr(ValOffsetPtr, CharUnits::fromQuantity(8));
+ Address Addr =
+ Address(ValOffsetPtr, Int64Ty, CharUnits::fromQuantity(8));
Args.push_back(Builder.CreateLoad(Addr));
}
- auto Intr = (BuiltinID == AArch64::BI__builtin_arm_st64b
+ auto Intr = (BuiltinID == clang::AArch64::BI__builtin_arm_st64b
? Intrinsic::aarch64_st64b
- : BuiltinID == AArch64::BI__builtin_arm_st64bv
- ? Intrinsic::aarch64_st64bv
- : Intrinsic::aarch64_st64bv0);
+ : BuiltinID == clang::AArch64::BI__builtin_arm_st64bv
+ ? Intrinsic::aarch64_st64bv
+ : Intrinsic::aarch64_st64bv0);
Function *F = CGM.getIntrinsic(Intr);
return Builder.CreateCall(F, Args);
}
}
- if (BuiltinID == AArch64::BI__builtin_arm_rndr ||
- BuiltinID == AArch64::BI__builtin_arm_rndrrs) {
+ if (BuiltinID == clang::AArch64::BI__builtin_arm_rndr ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_rndrrs) {
- auto Intr = (BuiltinID == AArch64::BI__builtin_arm_rndr
+ auto Intr = (BuiltinID == clang::AArch64::BI__builtin_arm_rndr
? Intrinsic::aarch64_rndr
: Intrinsic::aarch64_rndrrs);
Function *F = CGM.getIntrinsic(Intr);
@@ -9612,7 +9598,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return Status;
}
- if (BuiltinID == AArch64::BI__clear_cache) {
+ if (BuiltinID == clang::AArch64::BI__clear_cache) {
assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
const FunctionDecl *FD = E->getDirectCallee();
Value *Ops[2];
@@ -9624,12 +9610,13 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
}
- if ((BuiltinID == AArch64::BI__builtin_arm_ldrex ||
- BuiltinID == AArch64::BI__builtin_arm_ldaex) &&
+ if ((BuiltinID == clang::AArch64::BI__builtin_arm_ldrex ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_ldaex) &&
getContext().getTypeSize(E->getType()) == 128) {
- Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
- ? Intrinsic::aarch64_ldaxp
- : Intrinsic::aarch64_ldxp);
+ Function *F =
+ CGM.getIntrinsic(BuiltinID == clang::AArch64::BI__builtin_arm_ldaex
+ ? Intrinsic::aarch64_ldaxp
+ : Intrinsic::aarch64_ldxp);
Value *LdPtr = EmitScalarExpr(E->getArg(0));
Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
@@ -9645,43 +9632,48 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
Val = Builder.CreateOr(Val, Val1);
return Builder.CreateBitCast(Val, ConvertType(E->getType()));
- } else if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
- BuiltinID == AArch64::BI__builtin_arm_ldaex) {
+ } else if (BuiltinID == clang::AArch64::BI__builtin_arm_ldrex ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_ldaex) {
Value *LoadAddr = EmitScalarExpr(E->getArg(0));
QualType Ty = E->getType();
llvm::Type *RealResTy = ConvertType(Ty);
- llvm::Type *PtrTy = llvm::IntegerType::get(
- getLLVMContext(), getContext().getTypeSize(Ty))->getPointerTo();
+ llvm::Type *IntTy =
+ llvm::IntegerType::get(getLLVMContext(), getContext().getTypeSize(Ty));
+ llvm::Type *PtrTy = IntTy->getPointerTo();
LoadAddr = Builder.CreateBitCast(LoadAddr, PtrTy);
- Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
- ? Intrinsic::aarch64_ldaxr
- : Intrinsic::aarch64_ldxr,
- PtrTy);
- Value *Val = Builder.CreateCall(F, LoadAddr, "ldxr");
+ Function *F =
+ CGM.getIntrinsic(BuiltinID == clang::AArch64::BI__builtin_arm_ldaex
+ ? Intrinsic::aarch64_ldaxr
+ : Intrinsic::aarch64_ldxr,
+ PtrTy);
+ CallInst *Val = Builder.CreateCall(F, LoadAddr, "ldxr");
+ Val->addParamAttr(
+ 0, Attribute::get(getLLVMContext(), Attribute::ElementType, IntTy));
if (RealResTy->isPointerTy())
return Builder.CreateIntToPtr(Val, RealResTy);
llvm::Type *IntResTy = llvm::IntegerType::get(
getLLVMContext(), CGM.getDataLayout().getTypeSizeInBits(RealResTy));
- Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
- return Builder.CreateBitCast(Val, RealResTy);
+ return Builder.CreateBitCast(Builder.CreateTruncOrBitCast(Val, IntResTy),
+ RealResTy);
}
- if ((BuiltinID == AArch64::BI__builtin_arm_strex ||
- BuiltinID == AArch64::BI__builtin_arm_stlex) &&
+ if ((BuiltinID == clang::AArch64::BI__builtin_arm_strex ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_stlex) &&
getContext().getTypeSize(E->getArg(0)->getType()) == 128) {
- Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
- ? Intrinsic::aarch64_stlxp
- : Intrinsic::aarch64_stxp);
+ Function *F =
+ CGM.getIntrinsic(BuiltinID == clang::AArch64::BI__builtin_arm_stlex
+ ? Intrinsic::aarch64_stlxp
+ : Intrinsic::aarch64_stxp);
llvm::Type *STy = llvm::StructType::get(Int64Ty, Int64Ty);
Address Tmp = CreateMemTemp(E->getArg(0)->getType());
EmitAnyExprToMem(E->getArg(0), Tmp, Qualifiers(), /*init*/ true);
- Tmp = Builder.CreateBitCast(Tmp, llvm::PointerType::getUnqual(STy));
+ Tmp = Builder.CreateElementBitCast(Tmp, STy);
llvm::Value *Val = Builder.CreateLoad(Tmp);
Value *Arg0 = Builder.CreateExtractValue(Val, 0);
@@ -9691,8 +9683,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, {Arg0, Arg1, StPtr}, "stxp");
}
- if (BuiltinID == AArch64::BI__builtin_arm_strex ||
- BuiltinID == AArch64::BI__builtin_arm_stlex) {
+ if (BuiltinID == clang::AArch64::BI__builtin_arm_strex ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_stlex) {
Value *StoreVal = EmitScalarExpr(E->getArg(0));
Value *StoreAddr = EmitScalarExpr(E->getArg(1));
@@ -9711,14 +9703,18 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int64Ty);
}
- Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
- ? Intrinsic::aarch64_stlxr
- : Intrinsic::aarch64_stxr,
- StoreAddr->getType());
- return Builder.CreateCall(F, {StoreVal, StoreAddr}, "stxr");
+ Function *F =
+ CGM.getIntrinsic(BuiltinID == clang::AArch64::BI__builtin_arm_stlex
+ ? Intrinsic::aarch64_stlxr
+ : Intrinsic::aarch64_stxr,
+ StoreAddr->getType());
+ CallInst *CI = Builder.CreateCall(F, {StoreVal, StoreAddr}, "stxr");
+ CI->addParamAttr(
+ 1, Attribute::get(getLLVMContext(), Attribute::ElementType, StoreTy));
+ return CI;
}
- if (BuiltinID == AArch64::BI__getReg) {
+ if (BuiltinID == clang::AArch64::BI__getReg) {
Expr::EvalResult Result;
if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext()))
llvm_unreachable("Sema will ensure that the parameter is constant");
@@ -9736,33 +9732,42 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, Metadata);
}
- if (BuiltinID == AArch64::BI__builtin_arm_clrex) {
+ if (BuiltinID == clang::AArch64::BI__break) {
+ Expr::EvalResult Result;
+ if (!E->getArg(0)->EvaluateAsInt(Result, CGM.getContext()))
+ llvm_unreachable("Sema will ensure that the parameter is constant");
+
+ llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::aarch64_break);
+ return Builder.CreateCall(F, {EmitScalarExpr(E->getArg(0))});
+ }
+
+ if (BuiltinID == clang::AArch64::BI__builtin_arm_clrex) {
Function *F = CGM.getIntrinsic(Intrinsic::aarch64_clrex);
return Builder.CreateCall(F);
}
- if (BuiltinID == AArch64::BI_ReadWriteBarrier)
+ if (BuiltinID == clang::AArch64::BI_ReadWriteBarrier)
return Builder.CreateFence(llvm::AtomicOrdering::SequentiallyConsistent,
llvm::SyncScope::SingleThread);
// CRC32
Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
switch (BuiltinID) {
- case AArch64::BI__builtin_arm_crc32b:
+ case clang::AArch64::BI__builtin_arm_crc32b:
CRCIntrinsicID = Intrinsic::aarch64_crc32b; break;
- case AArch64::BI__builtin_arm_crc32cb:
+ case clang::AArch64::BI__builtin_arm_crc32cb:
CRCIntrinsicID = Intrinsic::aarch64_crc32cb; break;
- case AArch64::BI__builtin_arm_crc32h:
+ case clang::AArch64::BI__builtin_arm_crc32h:
CRCIntrinsicID = Intrinsic::aarch64_crc32h; break;
- case AArch64::BI__builtin_arm_crc32ch:
+ case clang::AArch64::BI__builtin_arm_crc32ch:
CRCIntrinsicID = Intrinsic::aarch64_crc32ch; break;
- case AArch64::BI__builtin_arm_crc32w:
+ case clang::AArch64::BI__builtin_arm_crc32w:
CRCIntrinsicID = Intrinsic::aarch64_crc32w; break;
- case AArch64::BI__builtin_arm_crc32cw:
+ case clang::AArch64::BI__builtin_arm_crc32cw:
CRCIntrinsicID = Intrinsic::aarch64_crc32cw; break;
- case AArch64::BI__builtin_arm_crc32d:
+ case clang::AArch64::BI__builtin_arm_crc32d:
CRCIntrinsicID = Intrinsic::aarch64_crc32x; break;
- case AArch64::BI__builtin_arm_crc32cd:
+ case clang::AArch64::BI__builtin_arm_crc32cd:
CRCIntrinsicID = Intrinsic::aarch64_crc32cx; break;
}
@@ -9792,17 +9797,17 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
// Memory Tagging Extensions (MTE) Intrinsics
Intrinsic::ID MTEIntrinsicID = Intrinsic::not_intrinsic;
switch (BuiltinID) {
- case AArch64::BI__builtin_arm_irg:
+ case clang::AArch64::BI__builtin_arm_irg:
MTEIntrinsicID = Intrinsic::aarch64_irg; break;
- case AArch64::BI__builtin_arm_addg:
+ case clang::AArch64::BI__builtin_arm_addg:
MTEIntrinsicID = Intrinsic::aarch64_addg; break;
- case AArch64::BI__builtin_arm_gmi:
+ case clang::AArch64::BI__builtin_arm_gmi:
MTEIntrinsicID = Intrinsic::aarch64_gmi; break;
- case AArch64::BI__builtin_arm_ldg:
+ case clang::AArch64::BI__builtin_arm_ldg:
MTEIntrinsicID = Intrinsic::aarch64_ldg; break;
- case AArch64::BI__builtin_arm_stg:
+ case clang::AArch64::BI__builtin_arm_stg:
MTEIntrinsicID = Intrinsic::aarch64_stg; break;
- case AArch64::BI__builtin_arm_subp:
+ case clang::AArch64::BI__builtin_arm_subp:
MTEIntrinsicID = Intrinsic::aarch64_subp; break;
}
@@ -9867,24 +9872,24 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
}
}
- if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
- BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
- BuiltinID == AArch64::BI__builtin_arm_rsrp ||
- BuiltinID == AArch64::BI__builtin_arm_wsr ||
- BuiltinID == AArch64::BI__builtin_arm_wsr64 ||
- BuiltinID == AArch64::BI__builtin_arm_wsrp) {
+ if (BuiltinID == clang::AArch64::BI__builtin_arm_rsr ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_rsr64 ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_rsrp ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_wsr ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_wsr64 ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_wsrp) {
SpecialRegisterAccessKind AccessKind = Write;
- if (BuiltinID == AArch64::BI__builtin_arm_rsr ||
- BuiltinID == AArch64::BI__builtin_arm_rsr64 ||
- BuiltinID == AArch64::BI__builtin_arm_rsrp)
+ if (BuiltinID == clang::AArch64::BI__builtin_arm_rsr ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_rsr64 ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_rsrp)
AccessKind = VolatileRead;
- bool IsPointerBuiltin = BuiltinID == AArch64::BI__builtin_arm_rsrp ||
- BuiltinID == AArch64::BI__builtin_arm_wsrp;
+ bool IsPointerBuiltin = BuiltinID == clang::AArch64::BI__builtin_arm_rsrp ||
+ BuiltinID == clang::AArch64::BI__builtin_arm_wsrp;
- bool Is64Bit = BuiltinID != AArch64::BI__builtin_arm_rsr &&
- BuiltinID != AArch64::BI__builtin_arm_wsr;
+ bool Is64Bit = BuiltinID != clang::AArch64::BI__builtin_arm_rsr &&
+ BuiltinID != clang::AArch64::BI__builtin_arm_wsr;
llvm::Type *ValueType;
llvm::Type *RegisterType = Int64Ty;
@@ -9900,8 +9905,8 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
AccessKind);
}
- if (BuiltinID == AArch64::BI_ReadStatusReg ||
- BuiltinID == AArch64::BI_WriteStatusReg) {
+ if (BuiltinID == clang::AArch64::BI_ReadStatusReg ||
+ BuiltinID == clang::AArch64::BI_WriteStatusReg) {
LLVMContext &Context = CGM.getLLVMContext();
unsigned SysReg =
@@ -9922,7 +9927,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
llvm::Type *RegisterType = Int64Ty;
llvm::Type *Types[] = { RegisterType };
- if (BuiltinID == AArch64::BI_ReadStatusReg) {
+ if (BuiltinID == clang::AArch64::BI_ReadStatusReg) {
llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::read_register, Types);
return Builder.CreateCall(F, Metadata);
@@ -9934,22 +9939,23 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return Builder.CreateCall(F, { Metadata, ArgValue });
}
- if (BuiltinID == AArch64::BI_AddressOfReturnAddress) {
+ if (BuiltinID == clang::AArch64::BI_AddressOfReturnAddress) {
llvm::Function *F =
CGM.getIntrinsic(Intrinsic::addressofreturnaddress, AllocaInt8PtrTy);
return Builder.CreateCall(F);
}
- if (BuiltinID == AArch64::BI__builtin_sponentry) {
+ if (BuiltinID == clang::AArch64::BI__builtin_sponentry) {
llvm::Function *F = CGM.getIntrinsic(Intrinsic::sponentry, AllocaInt8PtrTy);
return Builder.CreateCall(F);
}
- if (BuiltinID == AArch64::BI__mulh || BuiltinID == AArch64::BI__umulh) {
+ if (BuiltinID == clang::AArch64::BI__mulh ||
+ BuiltinID == clang::AArch64::BI__umulh) {
llvm::Type *ResType = ConvertType(E->getType());
llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
- bool IsSigned = BuiltinID == AArch64::BI__mulh;
+ bool IsSigned = BuiltinID == clang::AArch64::BI__mulh;
Value *LHS =
Builder.CreateIntCast(EmitScalarExpr(E->getArg(0)), Int128Ty, IsSigned);
Value *RHS =
@@ -9968,6 +9974,55 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
return HigherBits;
}
+ if (BuiltinID == AArch64::BI__writex18byte ||
+ BuiltinID == AArch64::BI__writex18word ||
+ BuiltinID == AArch64::BI__writex18dword ||
+ BuiltinID == AArch64::BI__writex18qword) {
+ llvm::Type *IntTy = ConvertType(E->getArg(1)->getType());
+
+ // Read x18 as i8*
+ LLVMContext &Context = CGM.getLLVMContext();
+ llvm::Metadata *Ops[] = {llvm::MDString::get(Context, "x18")};
+ llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
+ llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
+ llvm::Function *F =
+ CGM.getIntrinsic(llvm::Intrinsic::read_register, {Int64Ty});
+ llvm::Value *X18 = Builder.CreateCall(F, Metadata);
+ X18 = Builder.CreateIntToPtr(X18, llvm::PointerType::get(Int8Ty, 0));
+
+ // Store val at x18 + offset
+ Value *Offset = Builder.CreateZExt(EmitScalarExpr(E->getArg(0)), Int64Ty);
+ Value *Ptr = Builder.CreateGEP(Int8Ty, X18, Offset);
+ Ptr = Builder.CreatePointerCast(Ptr, llvm::PointerType::get(IntTy, 0));
+ Value *Val = EmitScalarExpr(E->getArg(1));
+ StoreInst *Store = Builder.CreateAlignedStore(Val, Ptr, CharUnits::One());
+ return Store;
+ }
+
+ if (BuiltinID == AArch64::BI__readx18byte ||
+ BuiltinID == AArch64::BI__readx18word ||
+ BuiltinID == AArch64::BI__readx18dword ||
+ BuiltinID == AArch64::BI__readx18qword) {
+ llvm::Type *IntTy = ConvertType(E->getType());
+
+ // Read x18 as i8*
+ LLVMContext &Context = CGM.getLLVMContext();
+ llvm::Metadata *Ops[] = {llvm::MDString::get(Context, "x18")};
+ llvm::MDNode *RegName = llvm::MDNode::get(Context, Ops);
+ llvm::Value *Metadata = llvm::MetadataAsValue::get(Context, RegName);
+ llvm::Function *F =
+ CGM.getIntrinsic(llvm::Intrinsic::read_register, {Int64Ty});
+ llvm::Value *X18 = Builder.CreateCall(F, Metadata);
+ X18 = Builder.CreateIntToPtr(X18, llvm::PointerType::get(Int8Ty, 0));
+
+ // Load x18 + offset
+ Value *Offset = Builder.CreateZExt(EmitScalarExpr(E->getArg(0)), Int64Ty);
+ Value *Ptr = Builder.CreateGEP(Int8Ty, X18, Offset);
+ Ptr = Builder.CreatePointerCast(Ptr, llvm::PointerType::get(IntTy, 0));
+ LoadInst *Load = Builder.CreateAlignedLoad(IntTy, Ptr, CharUnits::One());
+ return Load;
+ }
+
// Handle MSVC intrinsics before argument evaluation to prevent double
// evaluation.
if (Optional<MSVCIntrin> MsvcIntId = translateAarch64ToMsvcIntrin(BuiltinID))
@@ -10299,7 +10354,10 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops.push_back(EmitScalarExpr(E->getArg(1)));
Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
- Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
+ if (P == llvm::FCmpInst::FCMP_OEQ)
+ Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
+ else
+ Ops[0] = Builder.CreateFCmpS(P, Ops[0], Ops[1]);
return Builder.CreateSExt(Ops[0], Int64Ty, "vcmpd");
}
case NEON::BI__builtin_neon_vceqs_f32:
@@ -10319,7 +10377,10 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops.push_back(EmitScalarExpr(E->getArg(1)));
Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
Ops[1] = Builder.CreateBitCast(Ops[1], FloatTy);
- Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
+ if (P == llvm::FCmpInst::FCMP_OEQ)
+ Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
+ else
+ Ops[0] = Builder.CreateFCmpS(P, Ops[0], Ops[1]);
return Builder.CreateSExt(Ops[0], Int32Ty, "vcmpd");
}
case NEON::BI__builtin_neon_vceqh_f16:
@@ -10339,7 +10400,10 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
Ops.push_back(EmitScalarExpr(E->getArg(1)));
Ops[0] = Builder.CreateBitCast(Ops[0], HalfTy);
Ops[1] = Builder.CreateBitCast(Ops[1], HalfTy);
- Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
+ if (P == llvm::FCmpInst::FCMP_OEQ)
+ Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
+ else
+ Ops[0] = Builder.CreateFCmpS(P, Ops[0], Ops[1]);
return Builder.CreateSExt(Ops[0], Int16Ty, "vcmpd");
}
case NEON::BI__builtin_neon_vceqd_s64:
@@ -10684,7 +10748,7 @@ Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
"vgetq_lane");
}
- case AArch64::BI_InterlockedAdd: {
+ case clang::AArch64::BI_InterlockedAdd: {
Value *Arg0 = EmitScalarExpr(E->getArg(0));
Value *Arg1 = EmitScalarExpr(E->getArg(1));
AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
@@ -12520,13 +12584,6 @@ static Value *EmitX86SExtMask(CodeGenFunction &CGF, Value *Op,
return CGF.Builder.CreateSExt(Mask, DstTy, "vpmovm2");
}
-// Emit binary intrinsic with the same type used in result/args.
-static Value *EmitX86BinaryIntrinsic(CodeGenFunction &CGF,
- ArrayRef<Value *> Ops, Intrinsic::ID IID) {
- llvm::Function *F = CGF.CGM.getIntrinsic(IID, Ops[0]->getType());
- return CGF.Builder.CreateCall(F, {Ops[0], Ops[1]});
-}
-
Value *CodeGenFunction::EmitX86CpuIs(const CallExpr *E) {
const Expr *CPUExpr = E->getArg(0)->IgnoreParenCasts();
StringRef CPUStr = cast<clang::StringLiteral>(CPUExpr)->getString();
@@ -14383,12 +14440,6 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return EmitX86FunnelShift(*this, Ops[1], Ops[0], Ops[2], true);
// Reductions
- case X86::BI__builtin_ia32_reduce_add_d512:
- case X86::BI__builtin_ia32_reduce_add_q512: {
- Function *F =
- CGM.getIntrinsic(Intrinsic::vector_reduce_add, Ops[0]->getType());
- return Builder.CreateCall(F, {Ops[0]});
- }
case X86::BI__builtin_ia32_reduce_fadd_pd512:
case X86::BI__builtin_ia32_reduce_fadd_ps512:
case X86::BI__builtin_ia32_reduce_fadd_ph512:
@@ -14429,12 +14480,6 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Builder.getFastMathFlags().setNoNaNs();
return Builder.CreateCall(F, {Ops[0]});
}
- case X86::BI__builtin_ia32_reduce_mul_d512:
- case X86::BI__builtin_ia32_reduce_mul_q512: {
- Function *F =
- CGM.getIntrinsic(Intrinsic::vector_reduce_mul, Ops[0]->getType());
- return Builder.CreateCall(F, {Ops[0]});
- }
// 3DNow!
case X86::BI__builtin_ia32_pswapdsf:
@@ -14871,6 +14916,46 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
return EmitX86Select(*this, Ops[2], Res, Ops[1]);
}
+ case X86::BI__cpuid:
+ case X86::BI__cpuidex: {
+ Value *FuncId = EmitScalarExpr(E->getArg(1));
+ Value *SubFuncId = BuiltinID == X86::BI__cpuidex
+ ? EmitScalarExpr(E->getArg(2))
+ : llvm::ConstantInt::get(Int32Ty, 0);
+
+ llvm::StructType *CpuidRetTy =
+ llvm::StructType::get(Int32Ty, Int32Ty, Int32Ty, Int32Ty);
+ llvm::FunctionType *FTy =
+ llvm::FunctionType::get(CpuidRetTy, {Int32Ty, Int32Ty}, false);
+
+ StringRef Asm, Constraints;
+ if (getTarget().getTriple().getArch() == llvm::Triple::x86) {
+ Asm = "cpuid";
+ Constraints = "={ax},={bx},={cx},={dx},{ax},{cx}";
+ } else {
+ // x86-64 uses %rbx as the base register, so preserve it.
+ Asm = "xchgq %rbx, ${1:q}\n"
+ "cpuid\n"
+ "xchgq %rbx, ${1:q}";
+ Constraints = "={ax},=r,={cx},={dx},0,2";
+ }
+
+ llvm::InlineAsm *IA = llvm::InlineAsm::get(FTy, Asm, Constraints,
+ /*hasSideEffects=*/false);
+ Value *IACall = Builder.CreateCall(IA, {FuncId, SubFuncId});
+ Value *BasePtr = EmitScalarExpr(E->getArg(0));
+ Value *Store = nullptr;
+ for (unsigned i = 0; i < 4; i++) {
+ Value *Extracted = Builder.CreateExtractValue(IACall, i);
+ Value *StorePtr = Builder.CreateConstInBoundsGEP1_32(Int32Ty, BasePtr, i);
+ Store = Builder.CreateAlignedStore(Extracted, StorePtr, getIntAlign());
+ }
+
+ // Return the last store instruction to signal that we have emitted the
+ // the intrinsic.
+ return Store;
+ }
+
case X86::BI__emul:
case X86::BI__emulu: {
llvm::Type *Int64Ty = llvm::IntegerType::get(getLLVMContext(), 64);
@@ -14980,34 +15065,6 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Load->setVolatile(true);
return Load;
}
- case X86::BI__builtin_ia32_paddsb512:
- case X86::BI__builtin_ia32_paddsw512:
- case X86::BI__builtin_ia32_paddsb256:
- case X86::BI__builtin_ia32_paddsw256:
- case X86::BI__builtin_ia32_paddsb128:
- case X86::BI__builtin_ia32_paddsw128:
- return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::sadd_sat);
- case X86::BI__builtin_ia32_paddusb512:
- case X86::BI__builtin_ia32_paddusw512:
- case X86::BI__builtin_ia32_paddusb256:
- case X86::BI__builtin_ia32_paddusw256:
- case X86::BI__builtin_ia32_paddusb128:
- case X86::BI__builtin_ia32_paddusw128:
- return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::uadd_sat);
- case X86::BI__builtin_ia32_psubsb512:
- case X86::BI__builtin_ia32_psubsw512:
- case X86::BI__builtin_ia32_psubsb256:
- case X86::BI__builtin_ia32_psubsw256:
- case X86::BI__builtin_ia32_psubsb128:
- case X86::BI__builtin_ia32_psubsw128:
- return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::ssub_sat);
- case X86::BI__builtin_ia32_psubusb512:
- case X86::BI__builtin_ia32_psubusw512:
- case X86::BI__builtin_ia32_psubusb256:
- case X86::BI__builtin_ia32_psubusw256:
- case X86::BI__builtin_ia32_psubusb128:
- case X86::BI__builtin_ia32_psubusw128:
- return EmitX86BinaryIntrinsic(*this, Ops, Intrinsic::usub_sat);
case X86::BI__builtin_ia32_encodekey128_u32: {
Intrinsic::ID IID = Intrinsic::x86_encodekey128;
@@ -15189,14 +15246,17 @@ Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
- SmallVector<Value*, 4> Ops;
-
- for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
- if (E->getArg(i)->getType()->isArrayType())
- Ops.push_back(EmitArrayToPointerDecay(E->getArg(i)).getPointer());
- else
- Ops.push_back(EmitScalarExpr(E->getArg(i)));
- }
+ // Do not emit the builtin arguments in the arguments of a function call,
+ // because the evaluation order of function arguments is not specified in C++.
+ // This is important when testing to ensure the arguments are emitted in the
+ // same order every time. Eg:
+ // Instead of:
+ // return Builder.CreateFDiv(EmitScalarExpr(E->getArg(0)),
+ // EmitScalarExpr(E->getArg(1)), "swdiv");
+ // Use:
+ // Value *Op0 = EmitScalarExpr(E->getArg(0));
+ // Value *Op1 = EmitScalarExpr(E->getArg(1));
+ // return Builder.CreateFDiv(Op0, Op1, "swdiv")
Intrinsic::ID ID = Intrinsic::not_intrinsic;
@@ -15223,6 +15283,9 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
case PPC::BI__builtin_vsx_lxvl:
case PPC::BI__builtin_vsx_lxvll:
{
+ SmallVector<Value *, 2> Ops;
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops.push_back(EmitScalarExpr(E->getArg(1)));
if(BuiltinID == PPC::BI__builtin_vsx_lxvl ||
BuiltinID == PPC::BI__builtin_vsx_lxvll){
Ops[0] = Builder.CreateBitCast(Ops[0], Int8PtrTy);
@@ -15291,6 +15354,10 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
case PPC::BI__builtin_vsx_stxvl:
case PPC::BI__builtin_vsx_stxvll:
{
+ SmallVector<Value *, 3> Ops;
+ Ops.push_back(EmitScalarExpr(E->getArg(0)));
+ Ops.push_back(EmitScalarExpr(E->getArg(1)));
+ Ops.push_back(EmitScalarExpr(E->getArg(2)));
if(BuiltinID == PPC::BI__builtin_vsx_stxvl ||
BuiltinID == PPC::BI__builtin_vsx_stxvll ){
Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
@@ -15343,13 +15410,15 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
// Essentially boils down to performing an unaligned VMX load sequence so
// as to avoid crossing a page boundary and then shuffling the elements
// into the right side of the vector register.
- int64_t NumBytes = cast<ConstantInt>(Ops[1])->getZExtValue();
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ int64_t NumBytes = cast<ConstantInt>(Op1)->getZExtValue();
llvm::Type *ResTy = ConvertType(E->getType());
bool IsLE = getTarget().isLittleEndian();
// If the user wants the entire vector, just load the entire vector.
if (NumBytes == 16) {
- Value *BC = Builder.CreateBitCast(Ops[0], ResTy->getPointerTo());
+ Value *BC = Builder.CreateBitCast(Op0, ResTy->getPointerTo());
Value *LD =
Builder.CreateLoad(Address(BC, ResTy, CharUnits::fromQuantity(1)));
if (!IsLE)
@@ -15367,16 +15436,14 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
: Intrinsic::ppc_altivec_lvsl);
llvm::Function *Vperm = CGM.getIntrinsic(Intrinsic::ppc_altivec_vperm);
Value *HiMem = Builder.CreateGEP(
- Int8Ty, Ops[0], ConstantInt::get(Ops[1]->getType(), NumBytes - 1));
- Value *LoLd = Builder.CreateCall(Lvx, Ops[0], "ld.lo");
+ Int8Ty, Op0, ConstantInt::get(Op1->getType(), NumBytes - 1));
+ Value *LoLd = Builder.CreateCall(Lvx, Op0, "ld.lo");
Value *HiLd = Builder.CreateCall(Lvx, HiMem, "ld.hi");
- Value *Mask1 = Builder.CreateCall(Lvs, Ops[0], "mask1");
+ Value *Mask1 = Builder.CreateCall(Lvs, Op0, "mask1");
- Ops.clear();
- Ops.push_back(IsLE ? HiLd : LoLd);
- Ops.push_back(IsLE ? LoLd : HiLd);
- Ops.push_back(Mask1);
- Value *AllElts = Builder.CreateCall(Vperm, Ops, "shuffle1");
+ Op0 = IsLE ? HiLd : LoLd;
+ Op1 = IsLE ? LoLd : HiLd;
+ Value *AllElts = Builder.CreateCall(Vperm, {Op0, Op1, Mask1}, "shuffle1");
Constant *Zero = llvm::Constant::getNullValue(IsLE ? ResTy : AllElts->getType());
if (IsLE) {
@@ -15397,23 +15464,25 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
Builder.CreateCall(Vperm, {Zero, AllElts, Mask2}, "shuffle2"), ResTy);
}
case PPC::BI__builtin_vsx_strmb: {
- int64_t NumBytes = cast<ConstantInt>(Ops[1])->getZExtValue();
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ Value *Op2 = EmitScalarExpr(E->getArg(2));
+ int64_t NumBytes = cast<ConstantInt>(Op1)->getZExtValue();
bool IsLE = getTarget().isLittleEndian();
auto StoreSubVec = [&](unsigned Width, unsigned Offset, unsigned EltNo) {
// Storing the whole vector, simply store it on BE and reverse bytes and
// store on LE.
if (Width == 16) {
- Value *BC =
- Builder.CreateBitCast(Ops[0], Ops[2]->getType()->getPointerTo());
- Value *StVec = Ops[2];
+ Value *BC = Builder.CreateBitCast(Op0, Op2->getType()->getPointerTo());
+ Value *StVec = Op2;
if (IsLE) {
SmallVector<int, 16> RevMask;
for (int Idx = 0; Idx < 16; Idx++)
RevMask.push_back(15 - Idx);
- StVec = Builder.CreateShuffleVector(Ops[2], Ops[2], RevMask);
+ StVec = Builder.CreateShuffleVector(Op2, Op2, RevMask);
}
return Builder.CreateStore(
- StVec, Address(BC, Ops[2]->getType(), CharUnits::fromQuantity(1)));
+ StVec, Address(BC, Op2->getType(), CharUnits::fromQuantity(1)));
}
auto *ConvTy = Int64Ty;
unsigned NumElts = 0;
@@ -15438,9 +15507,9 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
break;
}
Value *Vec = Builder.CreateBitCast(
- Ops[2], llvm::FixedVectorType::get(ConvTy, NumElts));
- Value *Ptr = Builder.CreateGEP(Int8Ty, Ops[0],
- ConstantInt::get(Int64Ty, Offset));
+ Op2, llvm::FixedVectorType::get(ConvTy, NumElts));
+ Value *Ptr =
+ Builder.CreateGEP(Int8Ty, Op0, ConstantInt::get(Int64Ty, Offset));
Value *PtrBC = Builder.CreateBitCast(Ptr, ConvTy->getPointerTo());
Value *Elt = Builder.CreateExtractElement(Vec, EltNo);
if (IsLE && Width > 1) {
@@ -15512,62 +15581,65 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
Function *F = CGM.getIntrinsic(Intrinsic::cttz, ResultType);
return Builder.CreateCall(F, {X, Undef});
}
- case PPC::BI__builtin_altivec_vec_replace_elt:
- case PPC::BI__builtin_altivec_vec_replace_unaligned: {
- // The third argument of vec_replace_elt and vec_replace_unaligned must
- // be a compile time constant and will be emitted either to the vinsw
- // or vinsd instruction.
- ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
+ case PPC::BI__builtin_altivec_vinsd:
+ case PPC::BI__builtin_altivec_vinsw:
+ case PPC::BI__builtin_altivec_vinsd_elt:
+ case PPC::BI__builtin_altivec_vinsw_elt: {
+ llvm::Type *ResultType = ConvertType(E->getType());
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ Value *Op2 = EmitScalarExpr(E->getArg(2));
+
+ bool IsUnaligned = (BuiltinID == PPC::BI__builtin_altivec_vinsw ||
+ BuiltinID == PPC::BI__builtin_altivec_vinsd);
+
+ bool Is32bit = (BuiltinID == PPC::BI__builtin_altivec_vinsw ||
+ BuiltinID == PPC::BI__builtin_altivec_vinsw_elt);
+
+ // The third argument must be a compile time constant.
+ ConstantInt *ArgCI = dyn_cast<ConstantInt>(Op2);
assert(ArgCI &&
"Third Arg to vinsw/vinsd intrinsic must be a constant integer!");
- llvm::Type *ResultType = ConvertType(E->getType());
- llvm::Function *F = nullptr;
- Value *Call = nullptr;
+
+ // Valid value for the third argument is dependent on the input type and
+ // builtin called.
+ int ValidMaxValue = 0;
+ if (IsUnaligned)
+ ValidMaxValue = (Is32bit) ? 12 : 8;
+ else
+ ValidMaxValue = (Is32bit) ? 3 : 1;
+
+ // Get value of third argument.
int64_t ConstArg = ArgCI->getSExtValue();
- unsigned ArgWidth = Ops[1]->getType()->getPrimitiveSizeInBits();
- bool Is32Bit = false;
- assert((ArgWidth == 32 || ArgWidth == 64) && "Invalid argument width");
- // The input to vec_replace_elt is an element index, not a byte index.
- if (BuiltinID == PPC::BI__builtin_altivec_vec_replace_elt)
- ConstArg *= ArgWidth / 8;
- if (ArgWidth == 32) {
- Is32Bit = true;
- // When the second argument is 32 bits, it can either be an integer or
- // a float. The vinsw intrinsic is used in this case.
- F = CGM.getIntrinsic(Intrinsic::ppc_altivec_vinsw);
+
+ // Compose range checking error message.
+ std::string RangeErrMsg = IsUnaligned ? "byte" : "element";
+ RangeErrMsg += " number " + llvm::to_string(ConstArg);
+ RangeErrMsg += " is outside of the valid range [0, ";
+ RangeErrMsg += llvm::to_string(ValidMaxValue) + "]";
+
+ // Issue error if third argument is not within the valid range.
+ if (ConstArg < 0 || ConstArg > ValidMaxValue)
+ CGM.Error(E->getExprLoc(), RangeErrMsg);
+
+ // Input to vec_replace_elt is an element index, convert to byte index.
+ if (!IsUnaligned) {
+ ConstArg *= Is32bit ? 4 : 8;
// Fix the constant according to endianess.
if (getTarget().isLittleEndian())
- ConstArg = 12 - ConstArg;
- } else {
- // When the second argument is 64 bits, it can either be a long long or
- // a double. The vinsd intrinsic is used in this case.
- F = CGM.getIntrinsic(Intrinsic::ppc_altivec_vinsd);
- // Fix the constant for little endian.
- if (getTarget().isLittleEndian())
- ConstArg = 8 - ConstArg;
+ ConstArg = (Is32bit ? 12 : 8) - ConstArg;
}
- Ops[2] = ConstantInt::getSigned(Int32Ty, ConstArg);
- // Depending on ArgWidth, the input vector could be a float or a double.
- // If the input vector is a float type, bitcast the inputs to integers. Or,
- // if the input vector is a double, bitcast the inputs to 64-bit integers.
- if (!Ops[1]->getType()->isIntegerTy(ArgWidth)) {
- Ops[0] = Builder.CreateBitCast(
- Ops[0], Is32Bit ? llvm::FixedVectorType::get(Int32Ty, 4)
- : llvm::FixedVectorType::get(Int64Ty, 2));
- Ops[1] = Builder.CreateBitCast(Ops[1], Is32Bit ? Int32Ty : Int64Ty);
- }
- // Emit the call to vinsw or vinsd.
- Call = Builder.CreateCall(F, Ops);
- // Depending on the builtin, bitcast to the approriate result type.
- if (BuiltinID == PPC::BI__builtin_altivec_vec_replace_elt &&
- !Ops[1]->getType()->isIntegerTy())
- return Builder.CreateBitCast(Call, ResultType);
- else if (BuiltinID == PPC::BI__builtin_altivec_vec_replace_elt &&
- Ops[1]->getType()->isIntegerTy())
- return Call;
- else
- return Builder.CreateBitCast(Call,
- llvm::FixedVectorType::get(Int8Ty, 16));
+
+ ID = Is32bit ? Intrinsic::ppc_altivec_vinsw : Intrinsic::ppc_altivec_vinsd;
+ Op2 = ConstantInt::getSigned(Int32Ty, ConstArg);
+ // Casting input to vector int as per intrinsic definition.
+ Op0 =
+ Is32bit
+ ? Builder.CreateBitCast(Op0, llvm::FixedVectorType::get(Int32Ty, 4))
+ : Builder.CreateBitCast(Op0,
+ llvm::FixedVectorType::get(Int64Ty, 2));
+ return Builder.CreateBitCast(
+ Builder.CreateCall(CGM.getIntrinsic(ID), {Op0, Op1, Op2}), ResultType);
}
case PPC::BI__builtin_altivec_vpopcntb:
case PPC::BI__builtin_altivec_vpopcnth:
@@ -15580,15 +15652,60 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
}
case PPC::BI__builtin_altivec_vadduqm:
case PPC::BI__builtin_altivec_vsubuqm: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
- Ops[0] =
- Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int128Ty, 1));
- Ops[1] =
- Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int128Ty, 1));
+ Op0 = Builder.CreateBitCast(Op0, llvm::FixedVectorType::get(Int128Ty, 1));
+ Op1 = Builder.CreateBitCast(Op1, llvm::FixedVectorType::get(Int128Ty, 1));
if (BuiltinID == PPC::BI__builtin_altivec_vadduqm)
- return Builder.CreateAdd(Ops[0], Ops[1], "vadduqm");
+ return Builder.CreateAdd(Op0, Op1, "vadduqm");
else
- return Builder.CreateSub(Ops[0], Ops[1], "vsubuqm");
+ return Builder.CreateSub(Op0, Op1, "vsubuqm");
+ }
+ case PPC::BI__builtin_altivec_vaddcuq_c:
+ case PPC::BI__builtin_altivec_vsubcuq_c: {
+ SmallVector<Value *, 2> Ops;
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ llvm::Type *V1I128Ty = llvm::FixedVectorType::get(
+ llvm::IntegerType::get(getLLVMContext(), 128), 1);
+ Ops.push_back(Builder.CreateBitCast(Op0, V1I128Ty));
+ Ops.push_back(Builder.CreateBitCast(Op1, V1I128Ty));
+ ID = (BuiltinID == PPC::BI__builtin_altivec_vaddcuq_c)
+ ? Intrinsic::ppc_altivec_vaddcuq
+ : Intrinsic::ppc_altivec_vsubcuq;
+ return Builder.CreateCall(CGM.getIntrinsic(ID), Ops, "");
+ }
+ case PPC::BI__builtin_altivec_vaddeuqm_c:
+ case PPC::BI__builtin_altivec_vaddecuq_c:
+ case PPC::BI__builtin_altivec_vsubeuqm_c:
+ case PPC::BI__builtin_altivec_vsubecuq_c: {
+ SmallVector<Value *, 3> Ops;
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ Value *Op2 = EmitScalarExpr(E->getArg(2));
+ llvm::Type *V1I128Ty = llvm::FixedVectorType::get(
+ llvm::IntegerType::get(getLLVMContext(), 128), 1);
+ Ops.push_back(Builder.CreateBitCast(Op0, V1I128Ty));
+ Ops.push_back(Builder.CreateBitCast(Op1, V1I128Ty));
+ Ops.push_back(Builder.CreateBitCast(Op2, V1I128Ty));
+ switch (BuiltinID) {
+ default:
+ llvm_unreachable("Unsupported intrinsic!");
+ case PPC::BI__builtin_altivec_vaddeuqm_c:
+ ID = Intrinsic::ppc_altivec_vaddeuqm;
+ break;
+ case PPC::BI__builtin_altivec_vaddecuq_c:
+ ID = Intrinsic::ppc_altivec_vaddecuq;
+ break;
+ case PPC::BI__builtin_altivec_vsubeuqm_c:
+ ID = Intrinsic::ppc_altivec_vsubeuqm;
+ break;
+ case PPC::BI__builtin_altivec_vsubecuq_c:
+ ID = Intrinsic::ppc_altivec_vsubecuq;
+ break;
+ }
+ return Builder.CreateCall(CGM.getIntrinsic(ID), Ops, "");
}
// Rotate and insert under mask operation.
// __rldimi(rs, is, shift, mask)
@@ -15597,29 +15714,37 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
// (rotl(rs, shift) & mask) | (is & ~mask)
case PPC::BI__builtin_ppc_rldimi:
case PPC::BI__builtin_ppc_rlwimi: {
- llvm::Type *Ty = Ops[0]->getType();
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ Value *Op2 = EmitScalarExpr(E->getArg(2));
+ Value *Op3 = EmitScalarExpr(E->getArg(3));
+ llvm::Type *Ty = Op0->getType();
Function *F = CGM.getIntrinsic(Intrinsic::fshl, Ty);
if (BuiltinID == PPC::BI__builtin_ppc_rldimi)
- Ops[2] = Builder.CreateZExt(Ops[2], Int64Ty);
- Value *Shift = Builder.CreateCall(F, {Ops[0], Ops[0], Ops[2]});
- Value *X = Builder.CreateAnd(Shift, Ops[3]);
- Value *Y = Builder.CreateAnd(Ops[1], Builder.CreateNot(Ops[3]));
+ Op2 = Builder.CreateZExt(Op2, Int64Ty);
+ Value *Shift = Builder.CreateCall(F, {Op0, Op0, Op2});
+ Value *X = Builder.CreateAnd(Shift, Op3);
+ Value *Y = Builder.CreateAnd(Op1, Builder.CreateNot(Op3));
return Builder.CreateOr(X, Y);
}
// Rotate and insert under mask operation.
// __rlwnm(rs, shift, mask)
// rotl(rs, shift) & mask
case PPC::BI__builtin_ppc_rlwnm: {
- llvm::Type *Ty = Ops[0]->getType();
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ Value *Op2 = EmitScalarExpr(E->getArg(2));
+ llvm::Type *Ty = Op0->getType();
Function *F = CGM.getIntrinsic(Intrinsic::fshl, Ty);
- Value *Shift = Builder.CreateCall(F, {Ops[0], Ops[0], Ops[1]});
- return Builder.CreateAnd(Shift, Ops[2]);
+ Value *Shift = Builder.CreateCall(F, {Op0, Op0, Op1});
+ return Builder.CreateAnd(Shift, Op2);
}
case PPC::BI__builtin_ppc_poppar4:
case PPC::BI__builtin_ppc_poppar8: {
- llvm::Type *ArgType = Ops[0]->getType();
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ llvm::Type *ArgType = Op0->getType();
Function *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
- Value *Tmp = Builder.CreateCall(F, Ops[0]);
+ Value *Tmp = Builder.CreateCall(F, Op0);
llvm::Type *ResultType = ConvertType(E->getType());
Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
@@ -15629,10 +15754,12 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
return Result;
}
case PPC::BI__builtin_ppc_cmpb: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
if (getTarget().getTriple().isPPC64()) {
Function *F =
CGM.getIntrinsic(Intrinsic::ppc_cmpb, {Int64Ty, Int64Ty, Int64Ty});
- return Builder.CreateCall(F, Ops, "cmpb");
+ return Builder.CreateCall(F, {Op0, Op1}, "cmpb");
}
// For 32 bit, emit the code as below:
// %conv = trunc i64 %a to i32
@@ -15650,13 +15777,13 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
// ret i64 %or
Function *F =
CGM.getIntrinsic(Intrinsic::ppc_cmpb, {Int32Ty, Int32Ty, Int32Ty});
- Value *ArgOneLo = Builder.CreateTrunc(Ops[0], Int32Ty);
- Value *ArgTwoLo = Builder.CreateTrunc(Ops[1], Int32Ty);
+ Value *ArgOneLo = Builder.CreateTrunc(Op0, Int32Ty);
+ Value *ArgTwoLo = Builder.CreateTrunc(Op1, Int32Ty);
Constant *ShiftAmt = ConstantInt::get(Int64Ty, 32);
Value *ArgOneHi =
- Builder.CreateTrunc(Builder.CreateLShr(Ops[0], ShiftAmt), Int32Ty);
+ Builder.CreateTrunc(Builder.CreateLShr(Op0, ShiftAmt), Int32Ty);
Value *ArgTwoHi =
- Builder.CreateTrunc(Builder.CreateLShr(Ops[1], ShiftAmt), Int32Ty);
+ Builder.CreateTrunc(Builder.CreateLShr(Op1, ShiftAmt), Int32Ty);
Value *ResLo = Builder.CreateZExt(
Builder.CreateCall(F, {ArgOneLo, ArgTwoLo}, "cmpb"), Int64Ty);
Value *ResHiShift = Builder.CreateZExt(
@@ -15750,30 +15877,37 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
return FDiv;
}
case PPC::BI__builtin_ppc_alignx: {
- ConstantInt *AlignmentCI = cast<ConstantInt>(Ops[0]);
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ ConstantInt *AlignmentCI = cast<ConstantInt>(Op0);
if (AlignmentCI->getValue().ugt(llvm::Value::MaximumAlignment))
AlignmentCI = ConstantInt::get(AlignmentCI->getType(),
llvm::Value::MaximumAlignment);
- emitAlignmentAssumption(Ops[1], E->getArg(1),
+ emitAlignmentAssumption(Op1, E->getArg(1),
/*The expr loc is sufficient.*/ SourceLocation(),
AlignmentCI, nullptr);
- return Ops[1];
+ return Op1;
}
case PPC::BI__builtin_ppc_rdlam: {
- llvm::Type *Ty = Ops[0]->getType();
- Value *ShiftAmt = Builder.CreateIntCast(Ops[1], Ty, false);
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ Value *Op2 = EmitScalarExpr(E->getArg(2));
+ llvm::Type *Ty = Op0->getType();
+ Value *ShiftAmt = Builder.CreateIntCast(Op1, Ty, false);
Function *F = CGM.getIntrinsic(Intrinsic::fshl, Ty);
- Value *Rotate = Builder.CreateCall(F, {Ops[0], Ops[0], ShiftAmt});
- return Builder.CreateAnd(Rotate, Ops[2]);
+ Value *Rotate = Builder.CreateCall(F, {Op0, Op0, ShiftAmt});
+ return Builder.CreateAnd(Rotate, Op2);
}
case PPC::BI__builtin_ppc_load2r: {
Function *F = CGM.getIntrinsic(Intrinsic::ppc_load2r);
- Ops[0] = Builder.CreateBitCast(Ops[0], Int8PtrTy);
- Value *LoadIntrinsic = Builder.CreateCall(F, Ops);
+ Value *Op0 = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int8PtrTy);
+ Value *LoadIntrinsic = Builder.CreateCall(F, {Op0});
return Builder.CreateTrunc(LoadIntrinsic, Int16Ty);
}
// FMA variations
+ case PPC::BI__builtin_ppc_fnmsub:
+ case PPC::BI__builtin_ppc_fnmsubs:
case PPC::BI__builtin_vsx_xvmaddadp:
case PPC::BI__builtin_vsx_xvmaddasp:
case PPC::BI__builtin_vsx_xvnmaddadp:
@@ -15812,6 +15946,8 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
F, {X, Y, Builder.CreateFNeg(Z, "neg")});
else
return Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")});
+ case PPC::BI__builtin_ppc_fnmsub:
+ case PPC::BI__builtin_ppc_fnmsubs:
case PPC::BI__builtin_vsx_xvnmsubadp:
case PPC::BI__builtin_vsx_xvnmsubasp:
if (Builder.getIsFPConstrained())
@@ -15820,20 +15956,22 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
F, {X, Y, Builder.CreateFNeg(Z, "neg")}),
"neg");
else
- return Builder.CreateFNeg(
- Builder.CreateCall(F, {X, Y, Builder.CreateFNeg(Z, "neg")}),
- "neg");
- }
+ return Builder.CreateCall(
+ CGM.getIntrinsic(Intrinsic::ppc_fnmsub, ResultType), {X, Y, Z});
+ }
llvm_unreachable("Unknown FMA operation");
return nullptr; // Suppress no-return warning
}
case PPC::BI__builtin_vsx_insertword: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ Value *Op2 = EmitScalarExpr(E->getArg(2));
llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxinsertw);
// Third argument is a compile time constant int. It must be clamped to
// to the range [0, 12].
- ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
+ ConstantInt *ArgCI = dyn_cast<ConstantInt>(Op2);
assert(ArgCI &&
"Third arg to xxinsertw intrinsic must be constant integer");
const int64_t MaxIndex = 12;
@@ -15844,40 +15982,38 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
// word from the first argument, and inserts it in the second argument. The
// instruction extracts the word from its second input register and inserts
// it into its first input register, so swap the first and second arguments.
- std::swap(Ops[0], Ops[1]);
+ std::swap(Op0, Op1);
// Need to cast the second argument from a vector of unsigned int to a
// vector of long long.
- Ops[1] =
- Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int64Ty, 2));
+ Op1 = Builder.CreateBitCast(Op1, llvm::FixedVectorType::get(Int64Ty, 2));
if (getTarget().isLittleEndian()) {
// Reverse the double words in the vector we will extract from.
- Ops[0] =
- Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
- Ops[0] = Builder.CreateShuffleVector(Ops[0], Ops[0], ArrayRef<int>{1, 0});
+ Op0 = Builder.CreateBitCast(Op0, llvm::FixedVectorType::get(Int64Ty, 2));
+ Op0 = Builder.CreateShuffleVector(Op0, Op0, ArrayRef<int>{1, 0});
// Reverse the index.
Index = MaxIndex - Index;
}
// Intrinsic expects the first arg to be a vector of int.
- Ops[0] =
- Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4));
- Ops[2] = ConstantInt::getSigned(Int32Ty, Index);
- return Builder.CreateCall(F, Ops);
+ Op0 = Builder.CreateBitCast(Op0, llvm::FixedVectorType::get(Int32Ty, 4));
+ Op2 = ConstantInt::getSigned(Int32Ty, Index);
+ return Builder.CreateCall(F, {Op0, Op1, Op2});
}
case PPC::BI__builtin_vsx_extractuword: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_vsx_xxextractuw);
// Intrinsic expects the first argument to be a vector of doublewords.
- Ops[0] =
- Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
+ Op0 = Builder.CreateBitCast(Op0, llvm::FixedVectorType::get(Int64Ty, 2));
// The second argument is a compile time constant int that needs to
// be clamped to the range [0, 12].
- ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[1]);
+ ConstantInt *ArgCI = dyn_cast<ConstantInt>(Op1);
assert(ArgCI &&
"Second Arg to xxextractuw intrinsic must be a constant integer!");
const int64_t MaxIndex = 12;
@@ -15886,29 +16022,30 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
if (getTarget().isLittleEndian()) {
// Reverse the index.
Index = MaxIndex - Index;
- Ops[1] = ConstantInt::getSigned(Int32Ty, Index);
+ Op1 = ConstantInt::getSigned(Int32Ty, Index);
// Emit the call, then reverse the double words of the results vector.
- Value *Call = Builder.CreateCall(F, Ops);
+ Value *Call = Builder.CreateCall(F, {Op0, Op1});
Value *ShuffleCall =
Builder.CreateShuffleVector(Call, Call, ArrayRef<int>{1, 0});
return ShuffleCall;
} else {
- Ops[1] = ConstantInt::getSigned(Int32Ty, Index);
- return Builder.CreateCall(F, Ops);
+ Op1 = ConstantInt::getSigned(Int32Ty, Index);
+ return Builder.CreateCall(F, {Op0, Op1});
}
}
case PPC::BI__builtin_vsx_xxpermdi: {
- ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ Value *Op2 = EmitScalarExpr(E->getArg(2));
+ ConstantInt *ArgCI = dyn_cast<ConstantInt>(Op2);
assert(ArgCI && "Third arg must be constant integer!");
unsigned Index = ArgCI->getZExtValue();
- Ops[0] =
- Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int64Ty, 2));
- Ops[1] =
- Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int64Ty, 2));
+ Op0 = Builder.CreateBitCast(Op0, llvm::FixedVectorType::get(Int64Ty, 2));
+ Op1 = Builder.CreateBitCast(Op1, llvm::FixedVectorType::get(Int64Ty, 2));
// Account for endianness by treating this as just a shuffle. So we use the
// same indices for both LE and BE in order to produce expected results in
@@ -15917,21 +16054,21 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
int ElemIdx1 = 2 + (Index & 1);
int ShuffleElts[2] = {ElemIdx0, ElemIdx1};
- Value *ShuffleCall =
- Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleElts);
+ Value *ShuffleCall = Builder.CreateShuffleVector(Op0, Op1, ShuffleElts);
QualType BIRetType = E->getType();
auto RetTy = ConvertType(BIRetType);
return Builder.CreateBitCast(ShuffleCall, RetTy);
}
case PPC::BI__builtin_vsx_xxsldwi: {
- ConstantInt *ArgCI = dyn_cast<ConstantInt>(Ops[2]);
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ Value *Op2 = EmitScalarExpr(E->getArg(2));
+ ConstantInt *ArgCI = dyn_cast<ConstantInt>(Op2);
assert(ArgCI && "Third argument must be a compile time constant");
unsigned Index = ArgCI->getZExtValue() & 0x3;
- Ops[0] =
- Builder.CreateBitCast(Ops[0], llvm::FixedVectorType::get(Int32Ty, 4));
- Ops[1] =
- Builder.CreateBitCast(Ops[1], llvm::FixedVectorType::get(Int32Ty, 4));
+ Op0 = Builder.CreateBitCast(Op0, llvm::FixedVectorType::get(Int32Ty, 4));
+ Op1 = Builder.CreateBitCast(Op1, llvm::FixedVectorType::get(Int32Ty, 4));
// Create a shuffle mask
int ElemIdx0;
@@ -15955,28 +16092,31 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
}
int ShuffleElts[4] = {ElemIdx0, ElemIdx1, ElemIdx2, ElemIdx3};
- Value *ShuffleCall =
- Builder.CreateShuffleVector(Ops[0], Ops[1], ShuffleElts);
+ Value *ShuffleCall = Builder.CreateShuffleVector(Op0, Op1, ShuffleElts);
QualType BIRetType = E->getType();
auto RetTy = ConvertType(BIRetType);
return Builder.CreateBitCast(ShuffleCall, RetTy);
}
case PPC::BI__builtin_pack_vector_int128: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
bool isLittleEndian = getTarget().isLittleEndian();
Value *UndefValue =
- llvm::UndefValue::get(llvm::FixedVectorType::get(Ops[0]->getType(), 2));
+ llvm::UndefValue::get(llvm::FixedVectorType::get(Op0->getType(), 2));
Value *Res = Builder.CreateInsertElement(
- UndefValue, Ops[0], (uint64_t)(isLittleEndian ? 1 : 0));
- Res = Builder.CreateInsertElement(Res, Ops[1],
+ UndefValue, Op0, (uint64_t)(isLittleEndian ? 1 : 0));
+ Res = Builder.CreateInsertElement(Res, Op1,
(uint64_t)(isLittleEndian ? 0 : 1));
return Builder.CreateBitCast(Res, ConvertType(E->getType()));
}
case PPC::BI__builtin_unpack_vector_int128: {
- ConstantInt *Index = cast<ConstantInt>(Ops[1]);
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ ConstantInt *Index = cast<ConstantInt>(Op1);
Value *Unpacked = Builder.CreateBitCast(
- Ops[0], llvm::FixedVectorType::get(ConvertType(E->getType()), 2));
+ Op0, llvm::FixedVectorType::get(ConvertType(E->getType()), 2));
if (getTarget().isLittleEndian())
Index = ConstantInt::get(Index->getType(), 1 - Index->getZExtValue());
@@ -15986,9 +16126,9 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
case PPC::BI__builtin_ppc_sthcx: {
llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_sthcx);
- Ops[0] = Builder.CreateBitCast(Ops[0], Int8PtrTy);
- Ops[1] = Builder.CreateSExt(Ops[1], Int32Ty);
- return Builder.CreateCall(F, Ops);
+ Value *Op0 = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int8PtrTy);
+ Value *Op1 = Builder.CreateSExt(EmitScalarExpr(E->getArg(1)), Int32Ty);
+ return Builder.CreateCall(F, {Op0, Op1});
}
// The PPC MMA builtins take a pointer to a __vector_quad as an argument.
@@ -16001,6 +16141,12 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
case PPC::BI__builtin_##Name:
#include "clang/Basic/BuiltinsPPC.def"
{
+ SmallVector<Value *, 4> Ops;
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
+ if (E->getArg(i)->getType()->isArrayType())
+ Ops.push_back(EmitArrayToPointerDecay(E->getArg(i)).getPointer());
+ else
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
// The first argument of these two builtins is a pointer used to store their
// result. However, the llvm intrinsics return their result in multiple
// return values. So, here we emit code extracting these values from the
@@ -16084,8 +16230,9 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
Value *OldVal = Builder.CreateLoad(OldValAddr);
QualType AtomicTy = E->getArg(0)->getType()->getPointeeType();
LValue LV = MakeAddrLValue(Addr, AtomicTy);
+ Value *Op2 = EmitScalarExpr(E->getArg(2));
auto Pair = EmitAtomicCompareExchange(
- LV, RValue::get(OldVal), RValue::get(Ops[2]), E->getExprLoc(),
+ LV, RValue::get(OldVal), RValue::get(Op2), E->getExprLoc(),
llvm::AtomicOrdering::Monotonic, llvm::AtomicOrdering::Monotonic, true);
// Unlike c11's atomic_compare_exchange, accroding to
// https://www.ibm.com/docs/en/xl-c-and-cpp-aix/16.1?topic=functions-compare-swap-compare-swaplp
@@ -16125,38 +16272,45 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
case PPC::BI__builtin_ppc_lbarx:
return emitPPCLoadReserveIntrinsic(*this, BuiltinID, E);
case PPC::BI__builtin_ppc_mfspr: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
llvm::Type *RetType = CGM.getDataLayout().getTypeSizeInBits(VoidPtrTy) == 32
? Int32Ty
: Int64Ty;
Function *F = CGM.getIntrinsic(Intrinsic::ppc_mfspr, RetType);
- return Builder.CreateCall(F, Ops);
+ return Builder.CreateCall(F, {Op0});
}
case PPC::BI__builtin_ppc_mtspr: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
llvm::Type *RetType = CGM.getDataLayout().getTypeSizeInBits(VoidPtrTy) == 32
? Int32Ty
: Int64Ty;
Function *F = CGM.getIntrinsic(Intrinsic::ppc_mtspr, RetType);
- return Builder.CreateCall(F, Ops);
+ return Builder.CreateCall(F, {Op0, Op1});
}
case PPC::BI__builtin_ppc_popcntb: {
Value *ArgValue = EmitScalarExpr(E->getArg(0));
llvm::Type *ArgType = ArgValue->getType();
Function *F = CGM.getIntrinsic(Intrinsic::ppc_popcntb, {ArgType, ArgType});
- return Builder.CreateCall(F, Ops, "popcntb");
+ return Builder.CreateCall(F, {ArgValue}, "popcntb");
}
case PPC::BI__builtin_ppc_mtfsf: {
// The builtin takes a uint32 that needs to be cast to an
// f64 to be passed to the intrinsic.
- Value *Cast = Builder.CreateUIToFP(Ops[1], DoubleTy);
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ Value *Cast = Builder.CreateUIToFP(Op1, DoubleTy);
llvm::Function *F = CGM.getIntrinsic(Intrinsic::ppc_mtfsf);
- return Builder.CreateCall(F, {Ops[0], Cast}, "");
+ return Builder.CreateCall(F, {Op0, Cast}, "");
}
case PPC::BI__builtin_ppc_swdiv_nochk:
case PPC::BI__builtin_ppc_swdivs_nochk: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
FastMathFlags FMF = Builder.getFastMathFlags();
Builder.getFastMathFlags().setFast();
- Value *FDiv = Builder.CreateFDiv(Ops[0], Ops[1], "swdiv_nochk");
+ Value *FDiv = Builder.CreateFDiv(Op0, Op1, "swdiv_nochk");
Builder.getFastMathFlags() &= (FMF);
return FDiv;
}
@@ -16196,7 +16350,9 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
Intrinsic::experimental_constrained_sqrt))
.getScalarVal();
case PPC::BI__builtin_ppc_test_data_class: {
- llvm::Type *ArgType = EmitScalarExpr(E->getArg(0))->getType();
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ llvm::Type *ArgType = Op0->getType();
unsigned IntrinsicID;
if (ArgType->isDoubleTy())
IntrinsicID = Intrinsic::ppc_test_data_class_d;
@@ -16204,12 +16360,63 @@ Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
IntrinsicID = Intrinsic::ppc_test_data_class_f;
else
llvm_unreachable("Invalid Argument Type");
- return Builder.CreateCall(CGM.getIntrinsic(IntrinsicID), Ops,
+ return Builder.CreateCall(CGM.getIntrinsic(IntrinsicID), {Op0, Op1},
"test_data_class");
}
+ case PPC::BI__builtin_ppc_maxfe: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ Value *Op2 = EmitScalarExpr(E->getArg(2));
+ Value *Op3 = EmitScalarExpr(E->getArg(3));
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::ppc_maxfe),
+ {Op0, Op1, Op2, Op3});
+ }
+ case PPC::BI__builtin_ppc_maxfl: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ Value *Op2 = EmitScalarExpr(E->getArg(2));
+ Value *Op3 = EmitScalarExpr(E->getArg(3));
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::ppc_maxfl),
+ {Op0, Op1, Op2, Op3});
+ }
+ case PPC::BI__builtin_ppc_maxfs: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ Value *Op2 = EmitScalarExpr(E->getArg(2));
+ Value *Op3 = EmitScalarExpr(E->getArg(3));
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::ppc_maxfs),
+ {Op0, Op1, Op2, Op3});
+ }
+ case PPC::BI__builtin_ppc_minfe: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ Value *Op2 = EmitScalarExpr(E->getArg(2));
+ Value *Op3 = EmitScalarExpr(E->getArg(3));
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::ppc_minfe),
+ {Op0, Op1, Op2, Op3});
+ }
+ case PPC::BI__builtin_ppc_minfl: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ Value *Op2 = EmitScalarExpr(E->getArg(2));
+ Value *Op3 = EmitScalarExpr(E->getArg(3));
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::ppc_minfl),
+ {Op0, Op1, Op2, Op3});
+ }
+ case PPC::BI__builtin_ppc_minfs: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ Value *Op2 = EmitScalarExpr(E->getArg(2));
+ Value *Op3 = EmitScalarExpr(E->getArg(3));
+ return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::ppc_minfs),
+ {Op0, Op1, Op2, Op3});
+ }
case PPC::BI__builtin_ppc_swdiv:
- case PPC::BI__builtin_ppc_swdivs:
- return Builder.CreateFDiv(Ops[0], Ops[1], "swdiv");
+ case PPC::BI__builtin_ppc_swdivs: {
+ Value *Op0 = EmitScalarExpr(E->getArg(0));
+ Value *Op1 = EmitScalarExpr(E->getArg(1));
+ return Builder.CreateFDiv(Op0, Op1, "swdiv");
+ }
}
}
@@ -16232,12 +16439,31 @@ Value *EmitAMDGPUDispatchPtr(CodeGenFunction &CGF,
return CGF.Builder.CreateAddrSpaceCast(Call, RetTy);
}
+Value *EmitAMDGPUImplicitArgPtr(CodeGenFunction &CGF) {
+ auto *F = CGF.CGM.getIntrinsic(Intrinsic::amdgcn_implicitarg_ptr);
+ auto *Call = CGF.Builder.CreateCall(F);
+ Call->addRetAttr(
+ Attribute::getWithDereferenceableBytes(Call->getContext(), 256));
+ Call->addRetAttr(Attribute::getWithAlignment(Call->getContext(), Align(8)));
+ return Call;
+}
+
// \p Index is 0, 1, and 2 for x, y, and z dimension, respectively.
Value *EmitAMDGPUWorkGroupSize(CodeGenFunction &CGF, unsigned Index) {
- const unsigned XOffset = 4;
- auto *DP = EmitAMDGPUDispatchPtr(CGF);
- // Indexing the HSA kernel_dispatch_packet struct.
- auto *Offset = llvm::ConstantInt::get(CGF.Int32Ty, XOffset + Index * 2);
+ bool IsCOV_5 = CGF.getTarget().getTargetOpts().CodeObjectVersion ==
+ clang::TargetOptions::COV_5;
+ Constant *Offset;
+ Value *DP;
+ if (IsCOV_5) {
+ // Indexing the implicit kernarg segment.
+ Offset = llvm::ConstantInt::get(CGF.Int32Ty, 12 + Index * 2);
+ DP = EmitAMDGPUImplicitArgPtr(CGF);
+ } else {
+ // Indexing the HSA kernel_dispatch_packet struct.
+ Offset = llvm::ConstantInt::get(CGF.Int32Ty, 4 + Index * 2);
+ DP = EmitAMDGPUDispatchPtr(CGF);
+ }
+
auto *GEP = CGF.Builder.CreateGEP(CGF.Int8Ty, DP, Offset);
auto *DstTy =
CGF.Int16Ty->getPointerTo(GEP->getType()->getPointerAddressSpace());
@@ -16506,7 +16732,9 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
case AMDGPU::BI__builtin_amdgcn_global_atomic_fmax_f64:
case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f64:
case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmin_f64:
- case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmax_f64: {
+ case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmax_f64:
+ case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f32:
+ case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_v2f16: {
Intrinsic::ID IID;
llvm::Type *ArgTy = llvm::Type::getDoubleTy(getLLVMContext());
switch (BuiltinID) {
@@ -16537,6 +16765,15 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
case AMDGPU::BI__builtin_amdgcn_flat_atomic_fmax_f64:
IID = Intrinsic::amdgcn_flat_atomic_fmax;
break;
+ case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_f32:
+ ArgTy = llvm::Type::getFloatTy(getLLVMContext());
+ IID = Intrinsic::amdgcn_flat_atomic_fadd;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_v2f16:
+ ArgTy = llvm::FixedVectorType::get(
+ llvm::Type::getHalfTy(getLLVMContext()), 2);
+ IID = Intrinsic::amdgcn_flat_atomic_fadd;
+ break;
}
llvm::Value *Addr = EmitScalarExpr(E->getArg(0));
llvm::Value *Val = EmitScalarExpr(E->getArg(1));
@@ -16544,6 +16781,22 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
CGM.getIntrinsic(IID, {ArgTy, Addr->getType(), Val->getType()});
return Builder.CreateCall(F, {Addr, Val});
}
+ case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2bf16:
+ case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_v2bf16: {
+ Intrinsic::ID IID;
+ switch (BuiltinID) {
+ case AMDGPU::BI__builtin_amdgcn_global_atomic_fadd_v2bf16:
+ IID = Intrinsic::amdgcn_global_atomic_fadd_v2bf16;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_flat_atomic_fadd_v2bf16:
+ IID = Intrinsic::amdgcn_flat_atomic_fadd_v2bf16;
+ break;
+ }
+ llvm::Value *Addr = EmitScalarExpr(E->getArg(0));
+ llvm::Value *Val = EmitScalarExpr(E->getArg(1));
+ llvm::Function *F = CGM.getIntrinsic(IID, {Addr->getType()});
+ return Builder.CreateCall(F, {Addr, Val});
+ }
case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f64:
case AMDGPU::BI__builtin_amdgcn_ds_atomic_fadd_f32: {
Intrinsic::ID IID;
@@ -16608,6 +16861,69 @@ Value *CodeGenFunction::EmitAMDGPUBuiltinExpr(unsigned BuiltinID,
RayInverseDir, TextureDescr});
}
+ case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w32:
+ case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w64:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w32:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w64:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf16_w32:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf16_w64:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_f16_w32:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_f16_w64:
+ case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu4_w32:
+ case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu4_w64:
+ case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu8_w32:
+ case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu8_w64: {
+
+ // These operations perform a matrix multiplication and accumulation of
+ // the form:
+ // D = A * B + C
+ // The return type always matches the type of matrix C.
+ unsigned ArgForMatchingRetType;
+ unsigned BuiltinWMMAOp;
+
+ switch (BuiltinID) {
+ case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_f16_w32:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_f16_w64:
+ ArgForMatchingRetType = 2;
+ BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f32_16x16x16_f16;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf16_w32:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f32_16x16x16_bf16_w64:
+ ArgForMatchingRetType = 2;
+ BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f32_16x16x16_bf16;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w32:
+ case AMDGPU::BI__builtin_amdgcn_wmma_f16_16x16x16_f16_w64:
+ ArgForMatchingRetType = 2;
+ BuiltinWMMAOp = Intrinsic::amdgcn_wmma_f16_16x16x16_f16;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w32:
+ case AMDGPU::BI__builtin_amdgcn_wmma_bf16_16x16x16_bf16_w64:
+ ArgForMatchingRetType = 2;
+ BuiltinWMMAOp = Intrinsic::amdgcn_wmma_bf16_16x16x16_bf16;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu8_w32:
+ case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu8_w64:
+ ArgForMatchingRetType = 4;
+ BuiltinWMMAOp = Intrinsic::amdgcn_wmma_i32_16x16x16_iu8;
+ break;
+ case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu4_w32:
+ case AMDGPU::BI__builtin_amdgcn_wmma_i32_16x16x16_iu4_w64:
+ ArgForMatchingRetType = 4;
+ BuiltinWMMAOp = Intrinsic::amdgcn_wmma_i32_16x16x16_iu4;
+ break;
+ }
+
+ SmallVector<Value *, 6> Args;
+ for (int i = 0, e = E->getNumArgs(); i != e; ++i)
+ Args.push_back(EmitScalarExpr(E->getArg(i)));
+
+ Function *F = CGM.getIntrinsic(BuiltinWMMAOp,
+ {Args[ArgForMatchingRetType]->getType()});
+
+ return Builder.CreateCall(F, Args);
+ }
+
// amdgcn workitem
case AMDGPU::BI__builtin_amdgcn_workitem_id_x:
return emitRangedBuiltin(*this, Intrinsic::amdgcn_workitem_id_x, 0, 1024);
@@ -17411,18 +17727,19 @@ Value *
CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
auto MakeLdg = [&](unsigned IntrinsicID) {
Value *Ptr = EmitScalarExpr(E->getArg(0));
- clang::CharUnits Align =
- CGM.getNaturalPointeeTypeAlignment(E->getArg(0)->getType());
+ QualType ArgType = E->getArg(0)->getType();
+ clang::CharUnits Align = CGM.getNaturalPointeeTypeAlignment(ArgType);
+ llvm::Type *ElemTy = ConvertTypeForMem(ArgType->getPointeeType());
return Builder.CreateCall(
- CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(),
- Ptr->getType()}),
+ CGM.getIntrinsic(IntrinsicID, {ElemTy, Ptr->getType()}),
{Ptr, ConstantInt::get(Builder.getInt32Ty(), Align.getQuantity())});
};
auto MakeScopedAtomic = [&](unsigned IntrinsicID) {
Value *Ptr = EmitScalarExpr(E->getArg(0));
+ llvm::Type *ElemTy =
+ ConvertTypeForMem(E->getArg(0)->getType()->getPointeeType());
return Builder.CreateCall(
- CGM.getIntrinsic(IntrinsicID, {Ptr->getType()->getPointerElementType(),
- Ptr->getType()}),
+ CGM.getIntrinsic(IntrinsicID, {ElemTy, Ptr->getType()}),
{Ptr, EmitScalarExpr(E->getArg(1))});
};
switch (BuiltinID) {
@@ -17628,20 +17945,22 @@ CodeGenFunction::EmitNVPTXBuiltinExpr(unsigned BuiltinID, const CallExpr *E) {
case NVPTX::BI__nvvm_atom_cta_cas_gen_l:
case NVPTX::BI__nvvm_atom_cta_cas_gen_ll: {
Value *Ptr = EmitScalarExpr(E->getArg(0));
+ llvm::Type *ElemTy =
+ ConvertTypeForMem(E->getArg(0)->getType()->getPointeeType());
return Builder.CreateCall(
CGM.getIntrinsic(
- Intrinsic::nvvm_atomic_cas_gen_i_cta,
- {Ptr->getType()->getPointerElementType(), Ptr->getType()}),
+ Intrinsic::nvvm_atomic_cas_gen_i_cta, {ElemTy, Ptr->getType()}),
{Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
}
case NVPTX::BI__nvvm_atom_sys_cas_gen_i:
case NVPTX::BI__nvvm_atom_sys_cas_gen_l:
case NVPTX::BI__nvvm_atom_sys_cas_gen_ll: {
Value *Ptr = EmitScalarExpr(E->getArg(0));
+ llvm::Type *ElemTy =
+ ConvertTypeForMem(E->getArg(0)->getType()->getPointeeType());
return Builder.CreateCall(
CGM.getIntrinsic(
- Intrinsic::nvvm_atomic_cas_gen_i_sys,
- {Ptr->getType()->getPointerElementType(), Ptr->getType()}),
+ Intrinsic::nvvm_atomic_cas_gen_i_sys, {ElemTy, Ptr->getType()}),
{Ptr, EmitScalarExpr(E->getArg(1)), EmitScalarExpr(E->getArg(2))});
}
case NVPTX::BI__nvvm_match_all_sync_i32p:
@@ -17936,7 +18255,7 @@ RValue CodeGenFunction::EmitBuiltinIsAligned(const CallExpr *E) {
/// Generate (x & ~(y-1)) to align down or ((x+(y-1)) & ~(y-1)) to align up.
/// Note: For pointer types we can avoid ptrtoint/inttoptr pairs by using the
-/// llvm.ptrmask instrinsic (with a GEP before in the align_up case).
+/// llvm.ptrmask intrinsic (with a GEP before in the align_up case).
/// TODO: actually use ptrmask once most optimization passes know about it.
RValue CodeGenFunction::EmitBuiltinAlignTo(const CallExpr *E, bool AlignUp) {
BuiltinAlignArgs Args(E, *this);
@@ -18368,15 +18687,15 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
CGM.getIntrinsic(IntNo, {ConvertType(E->getType()), Low->getType()});
return Builder.CreateCall(Callee, {Low, High});
}
- case WebAssembly::BI__builtin_wasm_trunc_sat_zero_s_f64x2_i32x4:
- case WebAssembly::BI__builtin_wasm_trunc_sat_zero_u_f64x2_i32x4: {
+ case WebAssembly::BI__builtin_wasm_trunc_sat_s_zero_f64x2_i32x4:
+ case WebAssembly::BI__builtin_wasm_trunc_sat_u_zero_f64x2_i32x4: {
Value *Vec = EmitScalarExpr(E->getArg(0));
unsigned IntNo;
switch (BuiltinID) {
- case WebAssembly::BI__builtin_wasm_trunc_sat_zero_s_f64x2_i32x4:
+ case WebAssembly::BI__builtin_wasm_trunc_sat_s_zero_f64x2_i32x4:
IntNo = Intrinsic::fptosi_sat;
break;
- case WebAssembly::BI__builtin_wasm_trunc_sat_zero_u_f64x2_i32x4:
+ case WebAssembly::BI__builtin_wasm_trunc_sat_u_zero_f64x2_i32x4:
IntNo = Intrinsic::fptoui_sat;
break;
default:
@@ -18467,8 +18786,8 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
}
case WebAssembly::BI__builtin_wasm_relaxed_trunc_s_i32x4_f32x4:
case WebAssembly::BI__builtin_wasm_relaxed_trunc_u_i32x4_f32x4:
- case WebAssembly::BI__builtin_wasm_relaxed_trunc_zero_s_i32x4_f64x2:
- case WebAssembly::BI__builtin_wasm_relaxed_trunc_zero_u_i32x4_f64x2: {
+ case WebAssembly::BI__builtin_wasm_relaxed_trunc_s_zero_i32x4_f64x2:
+ case WebAssembly::BI__builtin_wasm_relaxed_trunc_u_zero_i32x4_f64x2: {
Value *Vec = EmitScalarExpr(E->getArg(0));
unsigned IntNo;
switch (BuiltinID) {
@@ -18478,11 +18797,11 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
case WebAssembly::BI__builtin_wasm_relaxed_trunc_u_i32x4_f32x4:
IntNo = Intrinsic::wasm_relaxed_trunc_unsigned;
break;
- case WebAssembly::BI__builtin_wasm_relaxed_trunc_zero_s_i32x4_f64x2:
- IntNo = Intrinsic::wasm_relaxed_trunc_zero_signed;
+ case WebAssembly::BI__builtin_wasm_relaxed_trunc_s_zero_i32x4_f64x2:
+ IntNo = Intrinsic::wasm_relaxed_trunc_signed_zero;
break;
- case WebAssembly::BI__builtin_wasm_relaxed_trunc_zero_u_i32x4_f64x2:
- IntNo = Intrinsic::wasm_relaxed_trunc_zero_unsigned;
+ case WebAssembly::BI__builtin_wasm_relaxed_trunc_u_zero_i32x4_f64x2:
+ IntNo = Intrinsic::wasm_relaxed_trunc_unsigned_zero;
break;
default:
llvm_unreachable("unexpected builtin ID");
@@ -18490,13 +18809,33 @@ Value *CodeGenFunction::EmitWebAssemblyBuiltinExpr(unsigned BuiltinID,
Function *Callee = CGM.getIntrinsic(IntNo);
return Builder.CreateCall(Callee, {Vec});
}
+ case WebAssembly::BI__builtin_wasm_relaxed_q15mulr_s_i16x8: {
+ Value *LHS = EmitScalarExpr(E->getArg(0));
+ Value *RHS = EmitScalarExpr(E->getArg(1));
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_relaxed_q15mulr_signed);
+ return Builder.CreateCall(Callee, {LHS, RHS});
+ }
+ case WebAssembly::BI__builtin_wasm_dot_i8x16_i7x16_s_i16x8: {
+ Value *LHS = EmitScalarExpr(E->getArg(0));
+ Value *RHS = EmitScalarExpr(E->getArg(1));
+ Function *Callee = CGM.getIntrinsic(Intrinsic::wasm_dot_i8x16_i7x16_signed);
+ return Builder.CreateCall(Callee, {LHS, RHS});
+ }
+ case WebAssembly::BI__builtin_wasm_dot_i8x16_i7x16_add_s_i32x4: {
+ Value *LHS = EmitScalarExpr(E->getArg(0));
+ Value *RHS = EmitScalarExpr(E->getArg(1));
+ Value *Acc = EmitScalarExpr(E->getArg(2));
+ Function *Callee =
+ CGM.getIntrinsic(Intrinsic::wasm_dot_i8x16_i7x16_add_signed);
+ return Builder.CreateCall(Callee, {LHS, RHS, Acc});
+ }
default:
return nullptr;
}
}
static std::pair<Intrinsic::ID, unsigned>
-getIntrinsicForHexagonNonGCCBuiltin(unsigned BuiltinID) {
+getIntrinsicForHexagonNonClangBuiltin(unsigned BuiltinID) {
struct Info {
unsigned BuiltinID;
Intrinsic::ID IntrinsicID;
@@ -18556,7 +18895,7 @@ Value *CodeGenFunction::EmitHexagonBuiltinExpr(unsigned BuiltinID,
const CallExpr *E) {
Intrinsic::ID ID;
unsigned VecLen;
- std::tie(ID, VecLen) = getIntrinsicForHexagonNonGCCBuiltin(BuiltinID);
+ std::tie(ID, VecLen) = getIntrinsicForHexagonNonClangBuiltin(BuiltinID);
auto MakeCircOp = [this, E](unsigned IntID, bool IsLoad) {
// The base pointer is passed by address, so it needs to be loaded.
@@ -18733,8 +19072,34 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
SmallVector<Value *, 4> Ops;
llvm::Type *ResultType = ConvertType(E->getType());
- for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
- Ops.push_back(EmitScalarExpr(E->getArg(i)));
+ // Find out if any arguments are required to be integer constant expressions.
+ unsigned ICEArguments = 0;
+ ASTContext::GetBuiltinTypeError Error;
+ getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
+ if (Error == ASTContext::GE_Missing_type) {
+ // Vector intrinsics don't have a type string.
+ assert(BuiltinID >= clang::RISCV::FirstRVVBuiltin &&
+ BuiltinID <= clang::RISCV::LastRVVBuiltin);
+ ICEArguments = 0;
+ if (BuiltinID == RISCVVector::BI__builtin_rvv_vget_v ||
+ BuiltinID == RISCVVector::BI__builtin_rvv_vset_v)
+ ICEArguments = 1 << 1;
+ } else {
+ assert(Error == ASTContext::GE_None && "Unexpected error");
+ }
+
+ for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
+ // If this is a normal argument, just emit it as a scalar.
+ if ((ICEArguments & (1 << i)) == 0) {
+ Ops.push_back(EmitScalarExpr(E->getArg(i)));
+ continue;
+ }
+
+ // If this is required to be a constant, constant fold it so that we know
+ // that the generated intrinsic gets a ConstantInt.
+ Ops.push_back(llvm::ConstantInt::get(
+ getLLVMContext(), *E->getArg(i)->getIntegerConstantExpr(getContext())));
+ }
Intrinsic::ID ID = Intrinsic::not_intrinsic;
unsigned NF = 1;
@@ -18746,6 +19111,10 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
default: llvm_unreachable("unexpected builtin ID");
case RISCV::BI__builtin_riscv_orc_b_32:
case RISCV::BI__builtin_riscv_orc_b_64:
+ case RISCV::BI__builtin_riscv_clz_32:
+ case RISCV::BI__builtin_riscv_clz_64:
+ case RISCV::BI__builtin_riscv_ctz_32:
+ case RISCV::BI__builtin_riscv_ctz_64:
case RISCV::BI__builtin_riscv_clmul:
case RISCV::BI__builtin_riscv_clmulh:
case RISCV::BI__builtin_riscv_clmulr:
@@ -18763,6 +19132,8 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
case RISCV::BI__builtin_riscv_shfl_64:
case RISCV::BI__builtin_riscv_unshfl_32:
case RISCV::BI__builtin_riscv_unshfl_64:
+ case RISCV::BI__builtin_riscv_xperm4:
+ case RISCV::BI__builtin_riscv_xperm8:
case RISCV::BI__builtin_riscv_xperm_n:
case RISCV::BI__builtin_riscv_xperm_b:
case RISCV::BI__builtin_riscv_xperm_h:
@@ -18778,7 +19149,10 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
case RISCV::BI__builtin_riscv_fsl_32:
case RISCV::BI__builtin_riscv_fsr_32:
case RISCV::BI__builtin_riscv_fsl_64:
- case RISCV::BI__builtin_riscv_fsr_64: {
+ case RISCV::BI__builtin_riscv_fsr_64:
+ case RISCV::BI__builtin_riscv_brev8:
+ case RISCV::BI__builtin_riscv_zip_32:
+ case RISCV::BI__builtin_riscv_unzip_32: {
switch (BuiltinID) {
default: llvm_unreachable("unexpected builtin ID");
// Zbb
@@ -18786,6 +19160,16 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
case RISCV::BI__builtin_riscv_orc_b_64:
ID = Intrinsic::riscv_orc_b;
break;
+ case RISCV::BI__builtin_riscv_clz_32:
+ case RISCV::BI__builtin_riscv_clz_64: {
+ Function *F = CGM.getIntrinsic(Intrinsic::ctlz, Ops[0]->getType());
+ return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
+ }
+ case RISCV::BI__builtin_riscv_ctz_32:
+ case RISCV::BI__builtin_riscv_ctz_64: {
+ Function *F = CGM.getIntrinsic(Intrinsic::cttz, Ops[0]->getType());
+ return Builder.CreateCall(F, {Ops[0], Builder.getInt1(false)});
+ }
// Zbc
case RISCV::BI__builtin_riscv_clmul:
@@ -18879,11 +19263,140 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
case RISCV::BI__builtin_riscv_fsr_64:
ID = Intrinsic::riscv_fsr;
break;
+
+ // Zbkx
+ case RISCV::BI__builtin_riscv_xperm8:
+ ID = Intrinsic::riscv_xperm8;
+ break;
+ case RISCV::BI__builtin_riscv_xperm4:
+ ID = Intrinsic::riscv_xperm4;
+ break;
+
+ // Zbkb
+ case RISCV::BI__builtin_riscv_brev8:
+ ID = Intrinsic::riscv_brev8;
+ break;
+ case RISCV::BI__builtin_riscv_zip_32:
+ ID = Intrinsic::riscv_zip;
+ break;
+ case RISCV::BI__builtin_riscv_unzip_32:
+ ID = Intrinsic::riscv_unzip;
+ break;
}
IntrinsicTypes = {ResultType};
break;
}
+
+ // Zk builtins
+
+ // Zknd
+ case RISCV::BI__builtin_riscv_aes32dsi_32:
+ ID = Intrinsic::riscv_aes32dsi;
+ break;
+ case RISCV::BI__builtin_riscv_aes32dsmi_32:
+ ID = Intrinsic::riscv_aes32dsmi;
+ break;
+ case RISCV::BI__builtin_riscv_aes64ds_64:
+ ID = Intrinsic::riscv_aes64ds;
+ break;
+ case RISCV::BI__builtin_riscv_aes64dsm_64:
+ ID = Intrinsic::riscv_aes64dsm;
+ break;
+ case RISCV::BI__builtin_riscv_aes64im_64:
+ ID = Intrinsic::riscv_aes64im;
+ break;
+
+ // Zkne
+ case RISCV::BI__builtin_riscv_aes32esi_32:
+ ID = Intrinsic::riscv_aes32esi;
+ break;
+ case RISCV::BI__builtin_riscv_aes32esmi_32:
+ ID = Intrinsic::riscv_aes32esmi;
+ break;
+ case RISCV::BI__builtin_riscv_aes64es_64:
+ ID = Intrinsic::riscv_aes64es;
+ break;
+ case RISCV::BI__builtin_riscv_aes64esm_64:
+ ID = Intrinsic::riscv_aes64esm;
+ break;
+
+ // Zknd & Zkne
+ case RISCV::BI__builtin_riscv_aes64ks1i_64:
+ ID = Intrinsic::riscv_aes64ks1i;
+ break;
+ case RISCV::BI__builtin_riscv_aes64ks2_64:
+ ID = Intrinsic::riscv_aes64ks2;
+ break;
+
+ // Zknh
+ case RISCV::BI__builtin_riscv_sha256sig0:
+ ID = Intrinsic::riscv_sha256sig0;
+ IntrinsicTypes = {ResultType};
+ break;
+ case RISCV::BI__builtin_riscv_sha256sig1:
+ ID = Intrinsic::riscv_sha256sig1;
+ IntrinsicTypes = {ResultType};
+ break;
+ case RISCV::BI__builtin_riscv_sha256sum0:
+ ID = Intrinsic::riscv_sha256sum0;
+ IntrinsicTypes = {ResultType};
+ break;
+ case RISCV::BI__builtin_riscv_sha256sum1:
+ ID = Intrinsic::riscv_sha256sum1;
+ IntrinsicTypes = {ResultType};
+ break;
+ case RISCV::BI__builtin_riscv_sha512sig0_64:
+ ID = Intrinsic::riscv_sha512sig0;
+ break;
+ case RISCV::BI__builtin_riscv_sha512sig0h_32:
+ ID = Intrinsic::riscv_sha512sig0h;
+ break;
+ case RISCV::BI__builtin_riscv_sha512sig0l_32:
+ ID = Intrinsic::riscv_sha512sig0l;
+ break;
+ case RISCV::BI__builtin_riscv_sha512sig1_64:
+ ID = Intrinsic::riscv_sha512sig1;
+ break;
+ case RISCV::BI__builtin_riscv_sha512sig1h_32:
+ ID = Intrinsic::riscv_sha512sig1h;
+ break;
+ case RISCV::BI__builtin_riscv_sha512sig1l_32:
+ ID = Intrinsic::riscv_sha512sig1l;
+ break;
+ case RISCV::BI__builtin_riscv_sha512sum0_64:
+ ID = Intrinsic::riscv_sha512sum0;
+ break;
+ case RISCV::BI__builtin_riscv_sha512sum0r_32:
+ ID = Intrinsic::riscv_sha512sum0r;
+ break;
+ case RISCV::BI__builtin_riscv_sha512sum1_64:
+ ID = Intrinsic::riscv_sha512sum1;
+ break;
+ case RISCV::BI__builtin_riscv_sha512sum1r_32:
+ ID = Intrinsic::riscv_sha512sum1r;
+ break;
+
+ // Zksed
+ case RISCV::BI__builtin_riscv_sm4ks:
+ ID = Intrinsic::riscv_sm4ks;
+ IntrinsicTypes = {ResultType};
+ break;
+ case RISCV::BI__builtin_riscv_sm4ed:
+ ID = Intrinsic::riscv_sm4ed;
+ IntrinsicTypes = {ResultType};
+ break;
+
+ // Zksh
+ case RISCV::BI__builtin_riscv_sm3p0:
+ ID = Intrinsic::riscv_sm3p0;
+ IntrinsicTypes = {ResultType};
+ break;
+ case RISCV::BI__builtin_riscv_sm3p1:
+ ID = Intrinsic::riscv_sm3p1;
+ IntrinsicTypes = {ResultType};
+ break;
+
// Vector builtins are handled from here.
#include "clang/Basic/riscv_vector_builtin_cg.inc"
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCUDANV.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGCUDANV.cpp
index 414e61f25fb3..6f2679cb15e4 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCUDANV.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCUDANV.cpp
@@ -157,6 +157,8 @@ private:
llvm::Function *makeModuleDtorFunction();
/// Transform managed variables for device compilation.
void transformManagedVars();
+ /// Create offloading entries to register globals in RDC mode.
+ void createOffloadingEntries();
public:
CGNVCUDARuntime(CodeGenModule &CGM);
@@ -210,7 +212,8 @@ static std::unique_ptr<MangleContext> InitDeviceMC(CodeGenModule &CGM) {
CGNVCUDARuntime::CGNVCUDARuntime(CodeGenModule &CGM)
: CGCUDARuntime(CGM), Context(CGM.getLLVMContext()),
TheModule(CGM.getModule()),
- RelocatableDeviceCode(CGM.getLangOpts().GPURelocatableDeviceCode),
+ RelocatableDeviceCode(CGM.getLangOpts().GPURelocatableDeviceCode ||
+ CGM.getLangOpts().OffloadingNewDriver),
DeviceMC(InitDeviceMC(CGM)) {
CodeGen::CodeGenTypes &Types = CGM.getTypes();
ASTContext &Ctx = CGM.getContext();
@@ -281,9 +284,8 @@ std::string CGNVCUDARuntime::getDeviceSideName(const NamedDecl *ND) {
DeviceSideName = std::string(ND->getIdentifier()->getName());
// Make unique name for device side static file-scope variable for HIP.
- if (CGM.getContext().shouldExternalizeStaticVar(ND) &&
- CGM.getLangOpts().GPURelocatableDeviceCode &&
- !CGM.getLangOpts().CUID.empty()) {
+ if (CGM.getContext().shouldExternalize(ND) &&
+ CGM.getLangOpts().GPURelocatableDeviceCode) {
SmallString<256> Buffer;
llvm::raw_svector_ostream Out(Buffer);
Out << DeviceSideName;
@@ -332,15 +334,22 @@ void CGNVCUDARuntime::emitDeviceStubBodyNew(CodeGenFunction &CGF,
llvm::BasicBlock *EndBlock = CGF.createBasicBlock("setup.end");
// Lookup cudaLaunchKernel/hipLaunchKernel function.
+ // HIP kernel launching API name depends on -fgpu-default-stream option. For
+ // the default value 'legacy', it is hipLaunchKernel. For 'per-thread',
+ // it is hipLaunchKernel_spt.
// cudaError_t cudaLaunchKernel(const void *func, dim3 gridDim, dim3 blockDim,
// void **args, size_t sharedMem,
// cudaStream_t stream);
- // hipError_t hipLaunchKernel(const void *func, dim3 gridDim, dim3 blockDim,
- // void **args, size_t sharedMem,
- // hipStream_t stream);
+ // hipError_t hipLaunchKernel[_spt](const void *func, dim3 gridDim,
+ // dim3 blockDim, void **args,
+ // size_t sharedMem, hipStream_t stream);
TranslationUnitDecl *TUDecl = CGM.getContext().getTranslationUnitDecl();
DeclContext *DC = TranslationUnitDecl::castToDeclContext(TUDecl);
- auto LaunchKernelName = addPrefixToName("LaunchKernel");
+ std::string KernelLaunchAPI = "LaunchKernel";
+ if (CGF.getLangOpts().HIP && CGF.getLangOpts().GPUDefaultStream ==
+ LangOptions::GPUDefaultStreamKind::PerThread)
+ KernelLaunchAPI = KernelLaunchAPI + "_spt";
+ auto LaunchKernelName = addPrefixToName(KernelLaunchAPI);
IdentifierInfo &cudaLaunchKernelII =
CGM.getContext().Idents.get(LaunchKernelName);
FunctionDecl *cudaLaunchKernelFD = nullptr;
@@ -652,7 +661,7 @@ llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
///
/// For CUDA:
/// \code
-/// void __cuda_module_ctor(void*) {
+/// void __cuda_module_ctor() {
/// Handle = __cudaRegisterFatBinary(GpuBinaryBlob);
/// __cuda_register_globals(Handle);
/// }
@@ -660,7 +669,7 @@ llvm::Function *CGNVCUDARuntime::makeRegisterGlobalsFn() {
///
/// For HIP:
/// \code
-/// void __hip_module_ctor(void*) {
+/// void __hip_module_ctor() {
/// if (__hip_gpubin_handle == 0) {
/// __hip_gpubin_handle = __hipRegisterFatBinary(GpuBinaryBlob);
/// __hip_register_globals(__hip_gpubin_handle);
@@ -710,7 +719,7 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
}
llvm::Function *ModuleCtorFunc = llvm::Function::Create(
- llvm::FunctionType::get(VoidTy, VoidPtrTy, false),
+ llvm::FunctionType::get(VoidTy, false),
llvm::GlobalValue::InternalLinkage,
addUnderscoredPrefixToName("_module_ctor"), &TheModule);
llvm::BasicBlock *CtorEntryBB =
@@ -822,7 +831,7 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
if (Linkage != llvm::GlobalValue::InternalLinkage)
GpuBinaryHandle->setVisibility(llvm::GlobalValue::HiddenVisibility);
Address GpuBinaryAddr(
- GpuBinaryHandle,
+ GpuBinaryHandle, VoidPtrPtrTy,
CharUnits::fromQuantity(GpuBinaryHandle->getAlignment()));
{
auto *HandleValue = CtorBuilder.CreateLoad(GpuBinaryAddr);
@@ -924,14 +933,14 @@ llvm::Function *CGNVCUDARuntime::makeModuleCtorFunction() {
///
/// For CUDA:
/// \code
-/// void __cuda_module_dtor(void*) {
+/// void __cuda_module_dtor() {
/// __cudaUnregisterFatBinary(Handle);
/// }
/// \endcode
///
/// For HIP:
/// \code
-/// void __hip_module_dtor(void*) {
+/// void __hip_module_dtor() {
/// if (__hip_gpubin_handle) {
/// __hipUnregisterFatBinary(__hip_gpubin_handle);
/// __hip_gpubin_handle = 0;
@@ -949,7 +958,7 @@ llvm::Function *CGNVCUDARuntime::makeModuleDtorFunction() {
addUnderscoredPrefixToName("UnregisterFatBinary"));
llvm::Function *ModuleDtorFunc = llvm::Function::Create(
- llvm::FunctionType::get(VoidTy, VoidPtrTy, false),
+ llvm::FunctionType::get(VoidTy, false),
llvm::GlobalValue::InternalLinkage,
addUnderscoredPrefixToName("_module_dtor"), &TheModule);
@@ -958,8 +967,9 @@ llvm::Function *CGNVCUDARuntime::makeModuleDtorFunction() {
CGBuilderTy DtorBuilder(CGM, Context);
DtorBuilder.SetInsertPoint(DtorEntryBB);
- Address GpuBinaryAddr(GpuBinaryHandle, CharUnits::fromQuantity(
- GpuBinaryHandle->getAlignment()));
+ Address GpuBinaryAddr(
+ GpuBinaryHandle, GpuBinaryHandle->getValueType(),
+ CharUnits::fromQuantity(GpuBinaryHandle->getAlignment()));
auto *HandleValue = DtorBuilder.CreateLoad(GpuBinaryAddr);
// There is only one HIP fat binary per linked module, however there are
// multiple destructor functions. Make sure the fat binary is unregistered
@@ -1099,6 +1109,40 @@ void CGNVCUDARuntime::transformManagedVars() {
}
}
+// Creates offloading entries for all the kernels and globals that must be
+// registered. The linker will provide a pointer to this section so we can
+// register the symbols with the linked device image.
+void CGNVCUDARuntime::createOffloadingEntries() {
+ llvm::OpenMPIRBuilder OMPBuilder(CGM.getModule());
+ OMPBuilder.initialize();
+
+ StringRef Section = "cuda_offloading_entries";
+ for (KernelInfo &I : EmittedKernels)
+ OMPBuilder.emitOffloadingEntry(KernelHandles[I.Kernel],
+ getDeviceSideName(cast<NamedDecl>(I.D)), 0,
+ DeviceVarFlags::OffloadGlobalEntry, Section);
+
+ for (VarInfo &I : DeviceVars) {
+ uint64_t VarSize =
+ CGM.getDataLayout().getTypeAllocSize(I.Var->getValueType());
+ if (I.Flags.getKind() == DeviceVarFlags::Variable) {
+ OMPBuilder.emitOffloadingEntry(
+ I.Var, getDeviceSideName(I.D), VarSize,
+ I.Flags.isManaged() ? DeviceVarFlags::OffloadGlobalManagedEntry
+ : DeviceVarFlags::OffloadGlobalEntry,
+ Section);
+ } else if (I.Flags.getKind() == DeviceVarFlags::Surface) {
+ OMPBuilder.emitOffloadingEntry(I.Var, getDeviceSideName(I.D), VarSize,
+ DeviceVarFlags::OffloadGlobalSurfaceEntry,
+ Section);
+ } else if (I.Flags.getKind() == DeviceVarFlags::Texture) {
+ OMPBuilder.emitOffloadingEntry(I.Var, getDeviceSideName(I.D), VarSize,
+ DeviceVarFlags::OffloadGlobalTextureEntry,
+ Section);
+ }
+ }
+}
+
// Returns module constructor to be added.
llvm::Function *CGNVCUDARuntime::finalizeModule() {
if (CGM.getLangOpts().CUDAIsDevice) {
@@ -1127,7 +1171,11 @@ llvm::Function *CGNVCUDARuntime::finalizeModule() {
}
return nullptr;
}
- return makeModuleCtorFunction();
+ if (!(CGM.getLangOpts().OffloadingNewDriver && RelocatableDeviceCode))
+ return makeModuleCtorFunction();
+
+ createOffloadingEntries();
+ return nullptr;
}
llvm::GlobalValue *CGNVCUDARuntime::getKernelHandle(llvm::Function *F,
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCUDARuntime.h b/contrib/llvm-project/clang/lib/CodeGen/CGCUDARuntime.h
index 1c119dc77fd4..73c7ca7bc15f 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCUDARuntime.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCUDARuntime.h
@@ -52,6 +52,19 @@ public:
Texture, // Builtin texture
};
+ /// The kind flag for an offloading entry.
+ enum OffloadEntryKindFlag : uint32_t {
+ /// Mark the entry as a global entry. This indicates the presense of a
+ /// kernel if the size size field is zero and a variable otherwise.
+ OffloadGlobalEntry = 0x0,
+ /// Mark the entry as a managed global variable.
+ OffloadGlobalManagedEntry = 0x1,
+ /// Mark the entry as a surface variable.
+ OffloadGlobalSurfaceEntry = 0x2,
+ /// Mark the entry as a texture variable.
+ OffloadGlobalTextureEntry = 0x3,
+ };
+
private:
unsigned Kind : 2;
unsigned Extern : 1;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.cpp
index 0b441e382f11..42e6c916bed0 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.cpp
@@ -45,8 +45,7 @@ CGCallee CGCXXABI::EmitLoadOfMemberFunctionPointer(
ErrorUnsupportedABI(CGF, "calls through member pointers");
ThisPtrForCall = This.getPointer();
- const FunctionProtoType *FPT =
- MPT->getPointeeType()->getAs<FunctionProtoType>();
+ const auto *FPT = MPT->getPointeeType()->castAs<FunctionProtoType>();
const auto *RD =
cast<CXXRecordDecl>(MPT->getClass()->castAs<RecordType>()->getDecl());
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h b/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h
index b96222b3ce28..a46f7f37141f 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCXXABI.h
@@ -56,7 +56,10 @@ protected:
return CGF.CXXABIThisValue;
}
Address getThisAddress(CodeGenFunction &CGF) {
- return Address(CGF.CXXABIThisValue, CGF.CXXABIThisAlignment);
+ return Address(
+ CGF.CXXABIThisValue,
+ CGF.ConvertTypeForMem(CGF.CXXABIThisDecl->getType()->getPointeeType()),
+ CGF.CXXABIThisAlignment);
}
/// Issue a diagnostic about unsupported features in the ABI.
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp
index 34f7a421c933..4e26c35c6342 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCall.cpp
@@ -62,6 +62,8 @@ unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
// TODO: Add support for __vectorcall to LLVM.
case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall;
+ case CC_AArch64SVEPCS: return llvm::CallingConv::AArch64_SVE_VectorCall;
+ case CC_AMDGPUKernelCall: return llvm::CallingConv::AMDGPU_KERNEL;
case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv();
case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
@@ -228,6 +230,12 @@ static CallingConv getCallingConventionForDecl(const ObjCMethodDecl *D,
if (D->hasAttr<AArch64VectorPcsAttr>())
return CC_AArch64VectorCall;
+ if (D->hasAttr<AArch64SVEPcsAttr>())
+ return CC_AArch64SVEPCS;
+
+ if (D->hasAttr<AMDGPUKernelCallAttr>())
+ return CC_AMDGPUKernelCall;
+
if (D->hasAttr<IntelOclBiccAttr>())
return CC_IntelOclBicc;
@@ -833,6 +841,7 @@ CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
FI->NumArgs = argTypes.size();
FI->HasExtParameterInfos = !paramInfos.empty();
FI->getArgsBuffer()[0].type = resultType;
+ FI->MaxVectorWidth = 0;
for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
FI->getArgsBuffer()[i + 1].type = argTypes[i];
for (unsigned i = 0, e = paramInfos.size(); i != e; ++i)
@@ -942,8 +951,7 @@ getTypeExpansion(QualType Ty, const ASTContext &Context) {
if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
assert(!CXXRD->isDynamicClass() &&
"cannot expand vtable pointers in dynamic classes");
- for (const CXXBaseSpecifier &BS : CXXRD->bases())
- Bases.push_back(&BS);
+ llvm::append_range(Bases, llvm::make_pointer_range(CXXRD->bases()));
}
for (const auto *FD : RD->fields()) {
@@ -1012,11 +1020,12 @@ static void forConstantArrayExpansion(CodeGenFunction &CGF,
CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
CharUnits EltAlign =
BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
+ llvm::Type *EltTy = CGF.ConvertTypeForMem(CAE->EltTy);
for (int i = 0, n = CAE->NumElts; i < n; i++) {
llvm::Value *EltAddr = CGF.Builder.CreateConstGEP2_32(
BaseAddr.getElementType(), BaseAddr.getPointer(), 0, i);
- Fn(Address(EltAddr, EltAlign));
+ Fn(Address(EltAddr, EltTy, EltAlign));
}
}
@@ -1276,8 +1285,8 @@ static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
}
// If coercing a fixed vector to a scalable vector for ABI compatibility, and
- // the types match, use the llvm.experimental.vector.insert intrinsic to
- // perform the conversion.
+ // the types match, use the llvm.vector.insert intrinsic to perform the
+ // conversion.
if (auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(Ty)) {
if (auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
// If we are casting a fixed i8 vector to a scalable 16 x i1 predicate
@@ -1804,6 +1813,8 @@ void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
if (AttrOnCallSite) {
// Attributes that should go on the call site only.
+ // FIXME: Look for 'BuiltinAttr' on the function rather than re-checking
+ // the -fno-builtin-foo list.
if (!CodeGenOpts.SimplifyLibCalls || LangOpts.isNoBuiltinFunc(Name))
FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
if (!CodeGenOpts.TrapFuncName.empty())
@@ -1838,7 +1849,7 @@ void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
CodeGenOpts.FP32DenormalMode.str());
}
- if (LangOpts.getFPExceptionMode() == LangOptions::FPE_Ignore)
+ if (LangOpts.getDefaultExceptionMode() == LangOptions::FPE_Ignore)
FuncAttrs.addAttribute("no-trapping-math", "true");
// TODO: Are these all needed?
@@ -1878,6 +1889,37 @@ void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
if (CodeGenOpts.SpeculativeLoadHardening)
FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
+
+ // Add zero-call-used-regs attribute.
+ switch (CodeGenOpts.getZeroCallUsedRegs()) {
+ case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Skip:
+ FuncAttrs.removeAttribute("zero-call-used-regs");
+ break;
+ case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPRArg:
+ FuncAttrs.addAttribute("zero-call-used-regs", "used-gpr-arg");
+ break;
+ case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPR:
+ FuncAttrs.addAttribute("zero-call-used-regs", "used-gpr");
+ break;
+ case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedArg:
+ FuncAttrs.addAttribute("zero-call-used-regs", "used-arg");
+ break;
+ case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Used:
+ FuncAttrs.addAttribute("zero-call-used-regs", "used");
+ break;
+ case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPRArg:
+ FuncAttrs.addAttribute("zero-call-used-regs", "all-gpr-arg");
+ break;
+ case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPR:
+ FuncAttrs.addAttribute("zero-call-used-regs", "all-gpr");
+ break;
+ case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllArg:
+ FuncAttrs.addAttribute("zero-call-used-regs", "all-arg");
+ break;
+ case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::All:
+ FuncAttrs.addAttribute("zero-call-used-regs", "all");
+ break;
+ }
}
if (getLangOpts().assumeFunctionsAreConvergent()) {
@@ -2166,6 +2208,15 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
if (TargetDecl->hasAttr<NoSplitStackAttr>())
FuncAttrs.removeAttribute("split-stack");
+ if (TargetDecl->hasAttr<ZeroCallUsedRegsAttr>()) {
+ // A function "__attribute__((...))" overrides the command-line flag.
+ auto Kind =
+ TargetDecl->getAttr<ZeroCallUsedRegsAttr>()->getZeroCallUsedRegs();
+ FuncAttrs.removeAttribute("zero-call-used-regs");
+ FuncAttrs.addAttribute(
+ "zero-call-used-regs",
+ ZeroCallUsedRegsAttr::ConvertZeroCallUsedRegsKindToStr(Kind));
+ }
// Add NonLazyBind attribute to function declarations when -fno-plt
// is used.
@@ -2253,7 +2304,7 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
getLangOpts().Sanitize.has(SanitizerKind::Return);
// Determine if the return type could be partially undef
- if (!CodeGenOpts.DisableNoundefAttrs && HasStrictReturn) {
+ if (CodeGenOpts.EnableNoundefAttrs && HasStrictReturn) {
if (!RetTy->isVoidType() && RetAI.getKind() != ABIArgInfo::Indirect &&
DetermineNoUndef(RetTy, getTypes(), DL, RetAI))
RetAttrs.addAttribute(llvm::Attribute::NoUndef);
@@ -2387,7 +2438,7 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
}
// Decide whether the argument we're handling could be partially undef
- if (!CodeGenOpts.DisableNoundefAttrs &&
+ if (CodeGenOpts.EnableNoundefAttrs &&
DetermineNoUndef(ParamType, getTypes(), DL, AI)) {
Attrs.addAttribute(llvm::Attribute::NoUndef);
}
@@ -2485,6 +2536,20 @@ void CodeGenModule::ConstructAttributeList(StringRef Name,
}
}
+ // From OpenCL spec v3.0.10 section 6.3.5 Alignment of Types:
+ // > For arguments to a __kernel function declared to be a pointer to a
+ // > data type, the OpenCL compiler can assume that the pointee is always
+ // > appropriately aligned as required by the data type.
+ if (TargetDecl && TargetDecl->hasAttr<OpenCLKernelAttr>() &&
+ ParamType->isPointerType()) {
+ QualType PTy = ParamType->getPointeeType();
+ if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
+ llvm::Align Alignment =
+ getNaturalPointeeTypeAlignment(ParamType).getAsAlign();
+ Attrs.addAlignmentAttr(Alignment);
+ }
+ }
+
switch (FI.getExtParameterInfo(ArgNo).getABI()) {
case ParameterABI::Ordinary:
break;
@@ -2632,7 +2697,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
Address ArgStruct = Address::invalid();
if (IRFunctionArgs.hasInallocaArg()) {
ArgStruct = Address(Fn->getArg(IRFunctionArgs.getInallocaArgNo()),
- FI.getArgStructAlignment());
+ FI.getArgStruct(), FI.getArgStructAlignment());
assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo());
}
@@ -2682,7 +2747,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
Address V =
Builder.CreateStructGEP(ArgStruct, FieldIndex, Arg->getName());
if (ArgI.getInAllocaIndirect())
- V = Address(Builder.CreateLoad(V),
+ V = Address(Builder.CreateLoad(V), ConvertTypeForMem(Ty),
getContext().getTypeAlignInChars(Ty));
ArgVals.push_back(ParamValue::forIndirect(V));
break;
@@ -2831,7 +2896,8 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
assert(pointeeTy->isPointerType());
Address temp =
CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
- Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy));
+ Address arg(V, ConvertTypeForMem(pointeeTy),
+ getContext().getTypeAlignInChars(pointeeTy));
llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
Builder.CreateStore(incomingErrorValue, temp);
V = temp.getPointer();
@@ -2864,8 +2930,7 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// VLST arguments are coerced to VLATs at the function boundary for
// ABI consistency. If this is a VLST that was coerced to
// a VLAT at the function boundary and the types match up, use
- // llvm.experimental.vector.extract to convert back to the original
- // VLST.
+ // llvm.vector.extract to convert back to the original VLST.
if (auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(ConvertType(Ty))) {
llvm::Value *Coerced = Fn->getArg(FirstIRArg);
if (auto *VecTyFrom =
@@ -3172,7 +3237,8 @@ static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
// ReturnValue to some other location.
auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
auto *SI = dyn_cast<llvm::StoreInst>(U);
- if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
+ if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer() ||
+ SI->getValueOperand()->getType() != CGF.ReturnValue.getElementType())
return nullptr;
// These aren't actually possible for non-coerced returns, and we
// only care about non-coerced returns on this code path.
@@ -3186,28 +3252,19 @@ static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
if (IP->empty()) return nullptr;
- llvm::Instruction *I = &IP->back();
-
- // Skip lifetime markers
- for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
- IE = IP->rend();
- II != IE; ++II) {
- if (llvm::IntrinsicInst *Intrinsic =
- dyn_cast<llvm::IntrinsicInst>(&*II)) {
- if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
- const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
- ++II;
- if (II == IE)
- break;
- if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
- continue;
- }
- }
- I = &*II;
- break;
- }
- return GetStoreIfValid(I);
+ // Look at directly preceding instruction, skipping bitcasts and lifetime
+ // markers.
+ for (llvm::Instruction &I : make_range(IP->rbegin(), IP->rend())) {
+ if (isa<llvm::BitCastInst>(&I))
+ continue;
+ if (auto *II = dyn_cast<llvm::IntrinsicInst>(&I))
+ if (II->getIntrinsicID() == llvm::Intrinsic::lifetime_end)
+ continue;
+
+ return GetStoreIfValid(&I);
+ }
+ return nullptr;
}
llvm::StoreInst *store =
@@ -3408,7 +3465,7 @@ llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src,
int CharsPerElt =
ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth;
int MaskIndex = 0;
- llvm::Value *R = llvm::UndefValue::get(ATy);
+ llvm::Value *R = llvm::PoisonValue::get(ATy);
for (int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) {
uint64_t Mask = buildMultiCharMask(Bits, MaskIndex, CharsPerElt, CharWidth,
DataLayout.isBigEndian());
@@ -3457,8 +3514,7 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
--EI;
llvm::Value *ArgStruct = &*EI;
llvm::Value *SRet = Builder.CreateStructGEP(
- EI->getType()->getPointerElementType(), ArgStruct,
- RetAI.getInAllocaFieldIndex());
+ FI.getArgStruct(), ArgStruct, RetAI.getInAllocaFieldIndex());
llvm::Type *Ty =
cast<llvm::GetElementPtrInst>(SRet)->getResultElementType();
RV = Builder.CreateAlignedLoad(Ty, SRet, getPointerAlign(), "sret");
@@ -3583,7 +3639,7 @@ void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
// Construct a return type that lacks padding elements.
llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType();
- RV = llvm::UndefValue::get(returnType);
+ RV = llvm::PoisonValue::get(returnType);
for (unsigned i = 0, e = results.size(); i != e; ++i) {
RV = Builder.CreateInsertValue(RV, results[i], i);
}
@@ -3690,14 +3746,14 @@ static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF,
// placeholders.
llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
llvm::Type *IRPtrTy = IRTy->getPointerTo();
- llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo());
+ llvm::Value *Placeholder = llvm::PoisonValue::get(IRPtrTy->getPointerTo());
// FIXME: When we generate this IR in one pass, we shouldn't need
// this win32-specific alignment hack.
CharUnits Align = CharUnits::fromQuantity(4);
Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align);
- return AggValueSlot::forAddr(Address(Placeholder, Align),
+ return AggValueSlot::forAddr(Address(Placeholder, IRTy, Align),
Ty.getQualifiers(),
AggValueSlot::IsNotDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
@@ -3880,7 +3936,9 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
// because of the crazy ObjC compatibility rules.
llvm::PointerType *destType =
- cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
+ cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
+ llvm::Type *destElemType =
+ CGF.ConvertTypeForMem(CRE->getType()->getPointeeType());
// If the address is a constant null, just pass the appropriate null.
if (isProvablyNull(srcAddr.getPointer())) {
@@ -3890,8 +3948,8 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
}
// Create the temporary.
- Address temp = CGF.CreateTempAlloca(destType->getPointerElementType(),
- CGF.getPointerAlign(), "icr.temp");
+ Address temp =
+ CGF.CreateTempAlloca(destElemType, CGF.getPointerAlign(), "icr.temp");
// Loading an l-value can introduce a cleanup if the l-value is __weak,
// and that cleanup will be conditional if we can't prove that the l-value
// isn't null, so we need to register a dominating point so that the cleanups
@@ -3901,8 +3959,8 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
// Zero-initialize it if we're not doing a copy-initialization.
bool shouldCopy = CRE->shouldCopy();
if (!shouldCopy) {
- llvm::Value *null = llvm::ConstantPointerNull::get(
- cast<llvm::PointerType>(destType->getPointerElementType()));
+ llvm::Value *null =
+ llvm::ConstantPointerNull::get(cast<llvm::PointerType>(destElemType));
CGF.Builder.CreateStore(null, temp);
}
@@ -3944,8 +4002,7 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
assert(srcRV.isScalar());
llvm::Value *src = srcRV.getScalarVal();
- src = CGF.Builder.CreateBitCast(src, destType->getPointerElementType(),
- "icr.cast");
+ src = CGF.Builder.CreateBitCast(src, destElemType, "icr.cast");
// Use an ordinary store, not a store-to-lvalue.
CGF.Builder.CreateStore(src, temp);
@@ -4341,7 +4398,7 @@ void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
type);
// This unreachable is a temporary marker which will be removed later.
llvm::Instruction *IsActive = Builder.CreateUnreachable();
- args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
+ args.addArgCleanupDeactivation(EHStack.stable_begin(), IsActive);
}
return;
}
@@ -4613,6 +4670,19 @@ public:
} // namespace
+static unsigned getMaxVectorWidth(const llvm::Type *Ty) {
+ if (auto *VT = dyn_cast<llvm::VectorType>(Ty))
+ return VT->getPrimitiveSizeInBits().getKnownMinSize();
+ if (auto *AT = dyn_cast<llvm::ArrayType>(Ty))
+ return getMaxVectorWidth(AT->getElementType());
+
+ unsigned MaxVectorWidth = 0;
+ if (auto *ST = dyn_cast<llvm::StructType>(Ty))
+ for (auto *I : ST->elements())
+ MaxVectorWidth = std::max(MaxVectorWidth, getMaxVectorWidth(I));
+ return MaxVectorWidth;
+}
+
RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
const CGCallee &Callee,
ReturnValueSlot ReturnValue,
@@ -4687,7 +4757,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
AI->setAlignment(Align.getAsAlign());
AI->setUsedWithInAlloca(true);
assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
- ArgMemory = Address(AI, Align);
+ ArgMemory = Address(AI, ArgStruct, Align);
}
ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
@@ -4785,13 +4855,10 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// Store the RValue into the argument struct.
Address Addr =
Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
- unsigned AS = Addr.getType()->getPointerAddressSpace();
- llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
// There are some cases where a trivial bitcast is not avoidable. The
// definition of a type later in a translation unit may change it's type
// from {}* to (%struct.foo*)*.
- if (Addr.getType() != MemType)
- Addr = Builder.CreateBitCast(Addr, MemType);
+ Addr = Builder.CreateElementBitCast(Addr, ConvertTypeForMem(I->Ty));
I->copyInto(*this, Addr);
}
break;
@@ -4914,8 +4981,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
assert(!swiftErrorTemp.isValid() && "multiple swifterror args");
QualType pointeeTy = I->Ty->getPointeeType();
- swiftErrorArg =
- Address(V, getContext().getTypeAlignInChars(pointeeTy));
+ swiftErrorArg = Address(V, ConvertTypeForMem(pointeeTy),
+ getContext().getTypeAlignInChars(pointeeTy));
swiftErrorTemp =
CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
@@ -5083,14 +5150,16 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
#ifndef NDEBUG
// Assert that these structs have equivalent element types.
llvm::StructType *FullTy = CallInfo.getArgStruct();
- llvm::StructType *DeclaredTy =
- cast<llvm::StructType>(LastParamTy->getPointerElementType());
- assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
- for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
- DE = DeclaredTy->element_end(),
- FI = FullTy->element_begin();
- DI != DE; ++DI, ++FI)
- assert(*DI == *FI);
+ if (!LastParamTy->isOpaquePointerTy()) {
+ llvm::StructType *DeclaredTy = cast<llvm::StructType>(
+ LastParamTy->getNonOpaquePointerElementType());
+ assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
+ for (auto DI = DeclaredTy->element_begin(),
+ DE = DeclaredTy->element_end(),
+ FI = FullTy->element_begin();
+ DI != DE; ++DI, ++FI)
+ assert(*DI == *FI);
+ }
#endif
Arg = Builder.CreateBitCast(Arg, LastParamTy);
}
@@ -5167,12 +5236,9 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
#endif
// Update the largest vector width if any arguments have vector types.
- for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
- if (auto *VT = dyn_cast<llvm::VectorType>(IRCallArgs[i]->getType()))
- LargestVectorWidth =
- std::max((uint64_t)LargestVectorWidth,
- VT->getPrimitiveSizeInBits().getKnownMinSize());
- }
+ for (unsigned i = 0; i < IRCallArgs.size(); ++i)
+ LargestVectorWidth = std::max(LargestVectorWidth,
+ getMaxVectorWidth(IRCallArgs[i]->getType()));
// Compute the calling convention and attributes.
unsigned CallingConv;
@@ -5191,12 +5257,22 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (InNoMergeAttributedStmt)
Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::NoMerge);
+ // Add call-site noinline attribute if exists.
+ if (InNoInlineAttributedStmt)
+ Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::NoInline);
+
+ // Add call-site always_inline attribute if exists.
+ if (InAlwaysInlineAttributedStmt)
+ Attrs =
+ Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::AlwaysInline);
+
// Apply some call-site-specific attributes.
// TODO: work this into building the attribute set.
// Apply always_inline to all calls within flatten functions.
// FIXME: should this really take priority over __try, below?
if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
+ !InNoInlineAttributedStmt &&
!(TargetDecl && TargetDecl->hasAttr<NoInlineAttr>())) {
Attrs =
Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::AlwaysInline);
@@ -5284,10 +5360,8 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
CI->setName("call");
// Update largest vector width from the return type.
- if (auto *VT = dyn_cast<llvm::VectorType>(CI->getType()))
- LargestVectorWidth =
- std::max((uint64_t)LargestVectorWidth,
- VT->getPrimitiveSizeInBits().getKnownMinSize());
+ LargestVectorWidth =
+ std::max(LargestVectorWidth, getMaxVectorWidth(CI->getType()));
// Insert instrumentation or attach profile metadata at indirect call sites.
// For more details, see the comment before the definition of
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp
index 76b90924750c..153f299a1c4b 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGClass.cpp
@@ -1649,92 +1649,73 @@ namespace {
}
};
- static void EmitSanitizerDtorCallback(CodeGenFunction &CGF, llvm::Value *Ptr,
- CharUnits::QuantityType PoisonSize) {
- CodeGenFunction::SanitizerScope SanScope(&CGF);
- // Pass in void pointer and size of region as arguments to runtime
- // function
- llvm::Value *Args[] = {CGF.Builder.CreateBitCast(Ptr, CGF.VoidPtrTy),
- llvm::ConstantInt::get(CGF.SizeTy, PoisonSize)};
-
- llvm::Type *ArgTypes[] = {CGF.VoidPtrTy, CGF.SizeTy};
-
- llvm::FunctionType *FnType =
- llvm::FunctionType::get(CGF.VoidTy, ArgTypes, false);
- llvm::FunctionCallee Fn =
- CGF.CGM.CreateRuntimeFunction(FnType, "__sanitizer_dtor_callback");
- CGF.EmitNounwindRuntimeCall(Fn, Args);
- }
-
- class SanitizeDtorMembers final : public EHScopeStack::Cleanup {
- const CXXDestructorDecl *Dtor;
+ static void EmitSanitizerDtorCallback(CodeGenFunction &CGF, llvm::Value *Ptr,
+ CharUnits::QuantityType PoisonSize) {
+ CodeGenFunction::SanitizerScope SanScope(&CGF);
+ // Pass in void pointer and size of region as arguments to runtime
+ // function
+ llvm::Value *Args[] = {CGF.Builder.CreateBitCast(Ptr, CGF.VoidPtrTy),
+ llvm::ConstantInt::get(CGF.SizeTy, PoisonSize)};
- public:
- SanitizeDtorMembers(const CXXDestructorDecl *Dtor) : Dtor(Dtor) {}
+ llvm::Type *ArgTypes[] = {CGF.VoidPtrTy, CGF.SizeTy};
+
+ llvm::FunctionType *FnType =
+ llvm::FunctionType::get(CGF.VoidTy, ArgTypes, false);
+ llvm::FunctionCallee Fn =
+ CGF.CGM.CreateRuntimeFunction(FnType, "__sanitizer_dtor_callback");
+ CGF.EmitNounwindRuntimeCall(Fn, Args);
+ }
+
+ /// Poison base class with a trivial destructor.
+ struct SanitizeDtorTrivialBase final : EHScopeStack::Cleanup {
+ const CXXRecordDecl *BaseClass;
+ bool BaseIsVirtual;
+ SanitizeDtorTrivialBase(const CXXRecordDecl *Base, bool BaseIsVirtual)
+ : BaseClass(Base), BaseIsVirtual(BaseIsVirtual) {}
- // Generate function call for handling object poisoning.
- // Disables tail call elimination, to prevent the current stack frame
- // from disappearing from the stack trace.
void Emit(CodeGenFunction &CGF, Flags flags) override {
- const ASTRecordLayout &Layout =
- CGF.getContext().getASTRecordLayout(Dtor->getParent());
+ const CXXRecordDecl *DerivedClass =
+ cast<CXXMethodDecl>(CGF.CurCodeDecl)->getParent();
- // Nothing to poison.
- if (Layout.getFieldCount() == 0)
+ Address Addr = CGF.GetAddressOfDirectBaseInCompleteClass(
+ CGF.LoadCXXThisAddress(), DerivedClass, BaseClass, BaseIsVirtual);
+
+ const ASTRecordLayout &BaseLayout =
+ CGF.getContext().getASTRecordLayout(BaseClass);
+ CharUnits BaseSize = BaseLayout.getSize();
+
+ if (!BaseSize.isPositive())
return;
+ EmitSanitizerDtorCallback(CGF, Addr.getPointer(), BaseSize.getQuantity());
+
// Prevent the current stack frame from disappearing from the stack trace.
CGF.CurFn->addFnAttr("disable-tail-calls", "true");
+ }
+ };
- // Construct pointer to region to begin poisoning, and calculate poison
- // size, so that only members declared in this class are poisoned.
- ASTContext &Context = CGF.getContext();
+ class SanitizeDtorFieldRange final : public EHScopeStack::Cleanup {
+ const CXXDestructorDecl *Dtor;
+ unsigned StartIndex;
+ unsigned EndIndex;
- const RecordDecl *Decl = Dtor->getParent();
- auto Fields = Decl->fields();
- auto IsTrivial = [&](const FieldDecl *F) {
- return FieldHasTrivialDestructorBody(Context, F);
- };
-
- auto IsZeroSize = [&](const FieldDecl *F) {
- return F->isZeroSize(Context);
- };
-
- // Poison blocks of fields with trivial destructors making sure that block
- // begin and end do not point to zero-sized fields. They don't have
- // correct offsets so can't be used to calculate poisoning range.
- for (auto It = Fields.begin(); It != Fields.end();) {
- It = std::find_if(It, Fields.end(), [&](const FieldDecl *F) {
- return IsTrivial(F) && !IsZeroSize(F);
- });
- if (It == Fields.end())
- break;
- auto Start = It++;
- It = std::find_if(It, Fields.end(), [&](const FieldDecl *F) {
- return !IsTrivial(F) && !IsZeroSize(F);
- });
-
- PoisonMembers(CGF, (*Start)->getFieldIndex(),
- It == Fields.end() ? -1 : (*It)->getFieldIndex());
- }
- }
+ public:
+ SanitizeDtorFieldRange(const CXXDestructorDecl *Dtor, unsigned StartIndex,
+ unsigned EndIndex)
+ : Dtor(Dtor), StartIndex(StartIndex), EndIndex(EndIndex) {}
- private:
- /// \param layoutStartOffset index of the ASTRecordLayout field to
- /// start poisoning (inclusive)
- /// \param layoutEndOffset index of the ASTRecordLayout field to
- /// end poisoning (exclusive)
- void PoisonMembers(CodeGenFunction &CGF, unsigned layoutStartOffset,
- unsigned layoutEndOffset) {
- ASTContext &Context = CGF.getContext();
+ // Generate function call for handling object poisoning.
+ // Disables tail call elimination, to prevent the current stack frame
+ // from disappearing from the stack trace.
+ void Emit(CodeGenFunction &CGF, Flags flags) override {
+ const ASTContext &Context = CGF.getContext();
const ASTRecordLayout &Layout =
Context.getASTRecordLayout(Dtor->getParent());
- // It's a first trivia field so it should be at the begining of char,
+ // It's a first trivial field so it should be at the begining of a char,
// still round up start offset just in case.
- CharUnits PoisonStart =
- Context.toCharUnitsFromBits(Layout.getFieldOffset(layoutStartOffset) +
- Context.getCharWidth() - 1);
+ CharUnits PoisonStart = Context.toCharUnitsFromBits(
+ Layout.getFieldOffset(StartIndex) + Context.getCharWidth() - 1);
llvm::ConstantInt *OffsetSizePtr =
llvm::ConstantInt::get(CGF.SizeTy, PoisonStart.getQuantity());
@@ -1744,17 +1725,20 @@ namespace {
OffsetSizePtr);
CharUnits PoisonEnd;
- if (layoutEndOffset >= Layout.getFieldCount()) {
+ if (EndIndex >= Layout.getFieldCount()) {
PoisonEnd = Layout.getNonVirtualSize();
} else {
PoisonEnd =
- Context.toCharUnitsFromBits(Layout.getFieldOffset(layoutEndOffset));
+ Context.toCharUnitsFromBits(Layout.getFieldOffset(EndIndex));
}
CharUnits PoisonSize = PoisonEnd - PoisonStart;
if (!PoisonSize.isPositive())
return;
EmitSanitizerDtorCallback(CGF, OffsetPtr, PoisonSize.getQuantity());
+
+ // Prevent the current stack frame from disappearing from the stack trace.
+ CGF.CurFn->addFnAttr("disable-tail-calls", "true");
}
};
@@ -1779,6 +1763,36 @@ namespace {
EmitSanitizerDtorCallback(CGF, VTablePtr, PoisonSize);
}
};
+
+ class SanitizeDtorCleanupBuilder {
+ ASTContext &Context;
+ EHScopeStack &EHStack;
+ const CXXDestructorDecl *DD;
+ llvm::Optional<unsigned> StartIndex;
+
+ public:
+ SanitizeDtorCleanupBuilder(ASTContext &Context, EHScopeStack &EHStack,
+ const CXXDestructorDecl *DD)
+ : Context(Context), EHStack(EHStack), DD(DD), StartIndex(llvm::None) {}
+ void PushCleanupForField(const FieldDecl *Field) {
+ if (Field->isZeroSize(Context))
+ return;
+ unsigned FieldIndex = Field->getFieldIndex();
+ if (FieldHasTrivialDestructorBody(Context, Field)) {
+ if (!StartIndex)
+ StartIndex = FieldIndex;
+ } else if (StartIndex) {
+ EHStack.pushCleanup<SanitizeDtorFieldRange>(
+ NormalAndEHCleanup, DD, StartIndex.getValue(), FieldIndex);
+ StartIndex = None;
+ }
+ }
+ void End() {
+ if (StartIndex)
+ EHStack.pushCleanup<SanitizeDtorFieldRange>(NormalAndEHCleanup, DD,
+ StartIndex.getValue(), -1);
+ }
+ };
} // end anonymous namespace
/// Emit all code that comes at the end of class's
@@ -1841,13 +1855,19 @@ void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD,
auto *BaseClassDecl =
cast<CXXRecordDecl>(Base.getType()->castAs<RecordType>()->getDecl());
- // Ignore trivial destructors.
- if (BaseClassDecl->hasTrivialDestructor())
- continue;
-
- EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup,
- BaseClassDecl,
- /*BaseIsVirtual*/ true);
+ if (BaseClassDecl->hasTrivialDestructor()) {
+ // Under SanitizeMemoryUseAfterDtor, poison the trivial base class
+ // memory. For non-trival base classes the same is done in the class
+ // destructor.
+ if (CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor &&
+ SanOpts.has(SanitizerKind::Memory) && !BaseClassDecl->isEmpty())
+ EHStack.pushCleanup<SanitizeDtorTrivialBase>(NormalAndEHCleanup,
+ BaseClassDecl,
+ /*BaseIsVirtual*/ true);
+ } else {
+ EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup, BaseClassDecl,
+ /*BaseIsVirtual*/ true);
+ }
}
return;
@@ -1869,36 +1889,46 @@ void CodeGenFunction::EnterDtorCleanups(const CXXDestructorDecl *DD,
CXXRecordDecl *BaseClassDecl = Base.getType()->getAsCXXRecordDecl();
- // Ignore trivial destructors.
- if (BaseClassDecl->hasTrivialDestructor())
- continue;
-
- EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup,
- BaseClassDecl,
- /*BaseIsVirtual*/ false);
+ if (BaseClassDecl->hasTrivialDestructor()) {
+ if (CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor &&
+ SanOpts.has(SanitizerKind::Memory) && !BaseClassDecl->isEmpty())
+ EHStack.pushCleanup<SanitizeDtorTrivialBase>(NormalAndEHCleanup,
+ BaseClassDecl,
+ /*BaseIsVirtual*/ false);
+ } else {
+ EHStack.pushCleanup<CallBaseDtor>(NormalAndEHCleanup, BaseClassDecl,
+ /*BaseIsVirtual*/ false);
+ }
}
// Poison fields such that access after their destructors are
// invoked, and before the base class destructor runs, is invalid.
- if (CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor &&
- SanOpts.has(SanitizerKind::Memory))
- EHStack.pushCleanup<SanitizeDtorMembers>(NormalAndEHCleanup, DD);
+ bool SanitizeFields = CGM.getCodeGenOpts().SanitizeMemoryUseAfterDtor &&
+ SanOpts.has(SanitizerKind::Memory);
+ SanitizeDtorCleanupBuilder SanitizeBuilder(getContext(), EHStack, DD);
// Destroy direct fields.
for (const auto *Field : ClassDecl->fields()) {
+ if (SanitizeFields)
+ SanitizeBuilder.PushCleanupForField(Field);
+
QualType type = Field->getType();
QualType::DestructionKind dtorKind = type.isDestructedType();
- if (!dtorKind) continue;
+ if (!dtorKind)
+ continue;
// Anonymous union members do not have their destructors called.
const RecordType *RT = type->getAsUnionType();
- if (RT && RT->getDecl()->isAnonymousStructOrUnion()) continue;
+ if (RT && RT->getDecl()->isAnonymousStructOrUnion())
+ continue;
CleanupKind cleanupKind = getCleanupKind(dtorKind);
- EHStack.pushCleanup<DestroyField>(cleanupKind, Field,
- getDestroyer(dtorKind),
- cleanupKind & EHCleanup);
+ EHStack.pushCleanup<DestroyField>(
+ cleanupKind, Field, getDestroyer(dtorKind), cleanupKind & EHCleanup);
}
+
+ if (SanitizeFields)
+ SanitizeBuilder.End();
}
/// EmitCXXAggrConstructorCall - Emit a loop to call a particular
@@ -2148,8 +2178,8 @@ void CodeGenFunction::EmitCXXConstructorCall(const CXXConstructorDecl *D,
assert(Args.size() == 2 && "unexpected argcount for trivial ctor");
QualType SrcTy = D->getParamDecl(0)->getType().getNonReferenceType();
- Address Src(Args[1].getRValue(*this).getScalarVal(),
- CGM.getNaturalTypeAlignment(SrcTy));
+ Address Src = Address(Args[1].getRValue(*this).getScalarVal(), ConvertTypeForMem(SrcTy),
+ CGM.getNaturalTypeAlignment(SrcTy));
LValue SrcLVal = MakeAddrLValue(Src, SrcTy);
QualType DestTy = getContext().getTypeDeclType(ClassDecl);
LValue DestLVal = MakeAddrLValue(This, DestTy);
@@ -2332,8 +2362,8 @@ CodeGenFunction::EmitSynthesizedCXXCopyCtorCall(const CXXConstructorDecl *D,
// Push the src ptr.
QualType QT = *(FPT->param_type_begin());
llvm::Type *t = CGM.getTypes().ConvertType(QT);
- Src = Builder.CreateBitCast(Src, t);
- Args.add(RValue::get(Src.getPointer()), QT);
+ llvm::Value *SrcVal = Builder.CreateBitCast(Src.getPointer(), t);
+ Args.add(RValue::get(SrcVal), QT);
// Skip over first argument (Src).
EmitCallArgs(Args, FPT, drop_begin(E->arguments(), 1), E->getConstructor(),
@@ -2665,9 +2695,9 @@ void CodeGenFunction::EmitTypeMetadataCodeForVCall(const CXXRecordDecl *RD,
if (SanOpts.has(SanitizerKind::CFIVCall))
EmitVTablePtrCheckForCall(RD, VTable, CodeGenFunction::CFITCK_VCall, Loc);
else if (CGM.getCodeGenOpts().WholeProgramVTables &&
- // Don't insert type test assumes if we are forcing public std
+ // Don't insert type test assumes if we are forcing public
// visibility.
- !CGM.HasLTOVisibilityPublicStd(RD)) {
+ !CGM.AlwaysHasLTOVisibilityPublic(RD)) {
llvm::Metadata *MD =
CGM.CreateMetadataIdentifierForType(QualType(RD->getTypeForDecl(), 0));
llvm::Value *TypeId =
@@ -2691,8 +2721,7 @@ void CodeGenFunction::EmitVTablePtrCheckForCall(const CXXRecordDecl *RD,
EmitVTablePtrCheck(RD, VTable, TCK, Loc);
}
-void CodeGenFunction::EmitVTablePtrCheckForCast(QualType T,
- llvm::Value *Derived,
+void CodeGenFunction::EmitVTablePtrCheckForCast(QualType T, Address Derived,
bool MayBeNull,
CFITypeCheckKind TCK,
SourceLocation Loc) {
@@ -2715,7 +2744,7 @@ void CodeGenFunction::EmitVTablePtrCheckForCast(QualType T,
if (MayBeNull) {
llvm::Value *DerivedNotNull =
- Builder.CreateIsNotNull(Derived, "cast.nonnull");
+ Builder.CreateIsNotNull(Derived.getPointer(), "cast.nonnull");
llvm::BasicBlock *CheckBlock = createBasicBlock("cast.check");
ContBlock = createBasicBlock("cast.cont");
@@ -2726,8 +2755,8 @@ void CodeGenFunction::EmitVTablePtrCheckForCast(QualType T,
}
llvm::Value *VTable;
- std::tie(VTable, ClassDecl) = CGM.getCXXABI().LoadVTablePtr(
- *this, Address(Derived, getPointerAlign()), ClassDecl);
+ std::tie(VTable, ClassDecl) =
+ CGM.getCXXABI().LoadVTablePtr(*this, Derived, ClassDecl);
EmitVTablePtrCheck(ClassDecl, VTable, TCK, Loc);
@@ -2829,7 +2858,8 @@ bool CodeGenFunction::ShouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *RD) {
}
llvm::Value *CodeGenFunction::EmitVTableTypeCheckedLoad(
- const CXXRecordDecl *RD, llvm::Value *VTable, uint64_t VTableByteOffset) {
+ const CXXRecordDecl *RD, llvm::Value *VTable, llvm::Type *VTableTy,
+ uint64_t VTableByteOffset) {
SanitizerScope SanScope(this);
EmitSanitizerStatReport(llvm::SanStat_CFI_VCall);
@@ -2854,7 +2884,7 @@ llvm::Value *CodeGenFunction::EmitVTableTypeCheckedLoad(
}
return Builder.CreateBitCast(Builder.CreateExtractValue(CheckedLoad, 0),
- VTable->getType()->getPointerElementType());
+ VTableTy);
}
void CodeGenFunction::EmitForwardingCallToLambda(
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.cpp
index b9364fcd2231..5035ed34358d 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCleanup.cpp
@@ -38,13 +38,13 @@ DominatingValue<RValue>::saved_type::save(CodeGenFunction &CGF, RValue rv) {
// These automatically dominate and don't need to be saved.
if (!DominatingLLVMValue::needsSaving(V))
- return saved_type(V, ScalarLiteral);
+ return saved_type(V, nullptr, ScalarLiteral);
// Everything else needs an alloca.
Address addr =
CGF.CreateDefaultAlignTempAlloca(V->getType(), "saved-rvalue");
CGF.Builder.CreateStore(V, addr);
- return saved_type(addr.getPointer(), ScalarAddress);
+ return saved_type(addr.getPointer(), nullptr, ScalarAddress);
}
if (rv.isComplex()) {
@@ -54,19 +54,19 @@ DominatingValue<RValue>::saved_type::save(CodeGenFunction &CGF, RValue rv) {
Address addr = CGF.CreateDefaultAlignTempAlloca(ComplexTy, "saved-complex");
CGF.Builder.CreateStore(V.first, CGF.Builder.CreateStructGEP(addr, 0));
CGF.Builder.CreateStore(V.second, CGF.Builder.CreateStructGEP(addr, 1));
- return saved_type(addr.getPointer(), ComplexAddress);
+ return saved_type(addr.getPointer(), nullptr, ComplexAddress);
}
assert(rv.isAggregate());
Address V = rv.getAggregateAddress(); // TODO: volatile?
if (!DominatingLLVMValue::needsSaving(V.getPointer()))
- return saved_type(V.getPointer(), AggregateLiteral,
+ return saved_type(V.getPointer(), V.getElementType(), AggregateLiteral,
V.getAlignment().getQuantity());
Address addr =
CGF.CreateTempAlloca(V.getType(), CGF.getPointerAlign(), "saved-rvalue");
CGF.Builder.CreateStore(V.getPointer(), addr);
- return saved_type(addr.getPointer(), AggregateAddress,
+ return saved_type(addr.getPointer(), V.getElementType(), AggregateAddress,
V.getAlignment().getQuantity());
}
@@ -75,8 +75,9 @@ DominatingValue<RValue>::saved_type::save(CodeGenFunction &CGF, RValue rv) {
/// point.
RValue DominatingValue<RValue>::saved_type::restore(CodeGenFunction &CGF) {
auto getSavingAddress = [&](llvm::Value *value) {
- auto alignment = cast<llvm::AllocaInst>(value)->getAlignment();
- return Address(value, CharUnits::fromQuantity(alignment));
+ auto *AI = cast<llvm::AllocaInst>(value);
+ return Address(value, AI->getAllocatedType(),
+ CharUnits::fromQuantity(AI->getAlign().value()));
};
switch (K) {
case ScalarLiteral:
@@ -84,10 +85,12 @@ RValue DominatingValue<RValue>::saved_type::restore(CodeGenFunction &CGF) {
case ScalarAddress:
return RValue::get(CGF.Builder.CreateLoad(getSavingAddress(Value)));
case AggregateLiteral:
- return RValue::getAggregate(Address(Value, CharUnits::fromQuantity(Align)));
+ return RValue::getAggregate(
+ Address(Value, ElementType, CharUnits::fromQuantity(Align)));
case AggregateAddress: {
auto addr = CGF.Builder.CreateLoad(getSavingAddress(Value));
- return RValue::getAggregate(Address(addr, CharUnits::fromQuantity(Align)));
+ return RValue::getAggregate(
+ Address(addr, ElementType, CharUnits::fromQuantity(Align)));
}
case ComplexAddress: {
Address address = getSavingAddress(Value);
@@ -180,6 +183,15 @@ void *EHScopeStack::pushCleanup(CleanupKind Kind, size_t Size) {
bool IsNormalCleanup = Kind & NormalCleanup;
bool IsEHCleanup = Kind & EHCleanup;
bool IsLifetimeMarker = Kind & LifetimeMarker;
+
+ // Per C++ [except.terminate], it is implementation-defined whether none,
+ // some, or all cleanups are called before std::terminate. Thus, when
+ // terminate is the current EH scope, we may skip adding any EH cleanup
+ // scopes.
+ if (InnermostEHScope != stable_end() &&
+ find(InnermostEHScope)->getKind() == EHScope::Terminate)
+ IsEHCleanup = false;
+
EHCleanupScope *Scope =
new (Buffer) EHCleanupScope(IsNormalCleanup,
IsEHCleanup,
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGCoroutine.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGCoroutine.cpp
index c1763cbbc5a0..594c7d49df3c 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGCoroutine.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGCoroutine.cpp
@@ -238,8 +238,8 @@ static LValueOrRValue emitSuspendExpression(CodeGenFunction &CGF, CGCoroData &Co
auto Loc = S.getResumeExpr()->getExprLoc();
auto *Catch = new (CGF.getContext())
CXXCatchStmt(Loc, /*exDecl=*/nullptr, Coro.ExceptionHandler);
- auto *TryBody =
- CompoundStmt::Create(CGF.getContext(), S.getResumeExpr(), Loc, Loc);
+ auto *TryBody = CompoundStmt::Create(CGF.getContext(), S.getResumeExpr(),
+ FPOptionsOverride(), Loc, Loc);
TryStmt = CXXTryStmt::Create(CGF.getContext(), Loc, TryBody, Catch);
CGF.EnterCXXTryStmt(*TryStmt);
}
@@ -465,72 +465,6 @@ struct CallCoroDelete final : public EHScopeStack::Cleanup {
};
}
-namespace {
-struct GetReturnObjectManager {
- CodeGenFunction &CGF;
- CGBuilderTy &Builder;
- const CoroutineBodyStmt &S;
-
- Address GroActiveFlag;
- CodeGenFunction::AutoVarEmission GroEmission;
-
- GetReturnObjectManager(CodeGenFunction &CGF, const CoroutineBodyStmt &S)
- : CGF(CGF), Builder(CGF.Builder), S(S), GroActiveFlag(Address::invalid()),
- GroEmission(CodeGenFunction::AutoVarEmission::invalid()) {}
-
- // The gro variable has to outlive coroutine frame and coroutine promise, but,
- // it can only be initialized after coroutine promise was created, thus, we
- // split its emission in two parts. EmitGroAlloca emits an alloca and sets up
- // cleanups. Later when coroutine promise is available we initialize the gro
- // and sets the flag that the cleanup is now active.
-
- void EmitGroAlloca() {
- auto *GroDeclStmt = dyn_cast<DeclStmt>(S.getResultDecl());
- if (!GroDeclStmt) {
- // If get_return_object returns void, no need to do an alloca.
- return;
- }
-
- auto *GroVarDecl = cast<VarDecl>(GroDeclStmt->getSingleDecl());
-
- // Set GRO flag that it is not initialized yet
- GroActiveFlag =
- CGF.CreateTempAlloca(Builder.getInt1Ty(), CharUnits::One(), "gro.active");
- Builder.CreateStore(Builder.getFalse(), GroActiveFlag);
-
- GroEmission = CGF.EmitAutoVarAlloca(*GroVarDecl);
-
- // Remember the top of EHStack before emitting the cleanup.
- auto old_top = CGF.EHStack.stable_begin();
- CGF.EmitAutoVarCleanups(GroEmission);
- auto top = CGF.EHStack.stable_begin();
-
- // Make the cleanup conditional on gro.active
- for (auto b = CGF.EHStack.find(top), e = CGF.EHStack.find(old_top);
- b != e; b++) {
- if (auto *Cleanup = dyn_cast<EHCleanupScope>(&*b)) {
- assert(!Cleanup->hasActiveFlag() && "cleanup already has active flag?");
- Cleanup->setActiveFlag(GroActiveFlag);
- Cleanup->setTestFlagInEHCleanup();
- Cleanup->setTestFlagInNormalCleanup();
- }
- }
- }
-
- void EmitGroInit() {
- if (!GroActiveFlag.isValid()) {
- // No Gro variable was allocated. Simply emit the call to
- // get_return_object.
- CGF.EmitStmt(S.getResultDecl());
- return;
- }
-
- CGF.EmitAutoVarInit(GroEmission);
- Builder.CreateStore(Builder.getTrue(), GroActiveFlag);
- }
-};
-}
-
static void emitBodyAndFallthrough(CodeGenFunction &CGF,
const CoroutineBodyStmt &S, Stmt *Body) {
CGF.EmitStmt(Body);
@@ -597,13 +531,6 @@ void CodeGenFunction::EmitCoroutineBody(const CoroutineBodyStmt &S) {
CGM.getIntrinsic(llvm::Intrinsic::coro_begin), {CoroId, Phi});
CurCoro.Data->CoroBegin = CoroBegin;
- // We need to emit `get_­return_­object` first. According to:
- // [dcl.fct.def.coroutine]p7
- // The call to get_­return_­object is sequenced before the call to
- // initial_­suspend and is invoked at most once.
- GetReturnObjectManager GroManager(*this, S);
- GroManager.EmitGroAlloca();
-
CurCoro.Data->CleanupJD = getJumpDestInCurrentScope(RetBB);
{
CGDebugInfo *DI = getDebugInfo();
@@ -641,8 +568,23 @@ void CodeGenFunction::EmitCoroutineBody(const CoroutineBodyStmt &S) {
// promise local variable was not emitted yet.
CoroId->setArgOperand(1, PromiseAddrVoidPtr);
- // Now we have the promise, initialize the GRO
- GroManager.EmitGroInit();
+ // ReturnValue should be valid as long as the coroutine's return type
+ // is not void. The assertion could help us to reduce the check later.
+ assert(ReturnValue.isValid() == (bool)S.getReturnStmt());
+ // Now we have the promise, initialize the GRO.
+ // We need to emit `get_return_object` first. According to:
+ // [dcl.fct.def.coroutine]p7
+ // The call to get_return_­object is sequenced before the call to
+ // initial_suspend and is invoked at most once.
+ //
+ // So we couldn't emit return value when we emit return statment,
+ // otherwise the call to get_return_object wouldn't be in front
+ // of initial_suspend.
+ if (ReturnValue.isValid()) {
+ EmitAnyExprToMem(S.getReturnValue(), ReturnValue,
+ S.getReturnValue()->getType().getQualifiers(),
+ /*IsInit*/ true);
+ }
EHStack.pushCleanup<CallCoroEnd>(EHCleanup);
@@ -705,12 +647,15 @@ void CodeGenFunction::EmitCoroutineBody(const CoroutineBodyStmt &S) {
llvm::Function *CoroEnd = CGM.getIntrinsic(llvm::Intrinsic::coro_end);
Builder.CreateCall(CoroEnd, {NullPtr, Builder.getFalse()});
- if (Stmt *Ret = S.getReturnStmt())
+ if (Stmt *Ret = S.getReturnStmt()) {
+ // Since we already emitted the return value above, so we shouldn't
+ // emit it again here.
+ cast<ReturnStmt>(Ret)->setRetValue(nullptr);
EmitStmt(Ret);
+ }
- // LLVM require the frontend to add the function attribute. See
- // Coroutines.rst.
- CurFn->addFnAttr("coroutine.presplit", "0");
+ // LLVM require the frontend to mark the coroutine.
+ CurFn->setPresplitCoroutine();
}
// Emit coroutine intrinsic and patch up arguments of the token type.
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp
index 1a9080604a79..7e9e86763af9 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.cpp
@@ -248,6 +248,7 @@ PrintingPolicy CGDebugInfo::getPrintingPolicy() const {
PP.PrintCanonicalTypes = true;
PP.UsePreferredNames = false;
PP.AlwaysIncludeTypeForTemplateArgument = true;
+ PP.UseEnumerators = false;
// Apply -fdebug-prefix-map.
PP.Callbacks = &PrintCB;
@@ -424,16 +425,16 @@ CGDebugInfo::createFile(StringRef FileName,
SmallString<128> DirBuf;
SmallString<128> FileBuf;
if (llvm::sys::path::is_absolute(RemappedFile)) {
- // Strip the common prefix (if it is more than just "/") from current
- // directory and FileName for a more space-efficient encoding.
+ // Strip the common prefix (if it is more than just "/" or "C:\") from
+ // current directory and FileName for a more space-efficient encoding.
auto FileIt = llvm::sys::path::begin(RemappedFile);
auto FileE = llvm::sys::path::end(RemappedFile);
auto CurDirIt = llvm::sys::path::begin(CurDir);
auto CurDirE = llvm::sys::path::end(CurDir);
for (; CurDirIt != CurDirE && *CurDirIt == *FileIt; ++CurDirIt, ++FileIt)
llvm::sys::path::append(DirBuf, *CurDirIt);
- if (std::distance(llvm::sys::path::begin(CurDir), CurDirIt) == 1) {
- // Don't strip the common prefix if it is only the root "/"
+ if (llvm::sys::path::root_path(DirBuf) == DirBuf) {
+ // Don't strip the common prefix if it is only the root ("/" or "C:\")
// since that would make LLVM diagnostic locations confusing.
Dir = {};
File = RemappedFile;
@@ -444,7 +445,8 @@ CGDebugInfo::createFile(StringRef FileName,
File = FileBuf;
}
} else {
- Dir = CurDir;
+ if (!llvm::sys::path::is_absolute(FileName))
+ Dir = CurDir;
File = RemappedFile;
}
llvm::DIFile *F = DBuilder.createFile(File, Dir, CSInfo, Source);
@@ -517,8 +519,9 @@ void CGDebugInfo::CreateCompileUnit() {
// a relative path, so we look into the actual file entry for the main
// file to determine the real absolute path for the file.
std::string MainFileDir;
- if (const FileEntry *MainFile = SM.getFileEntryForID(SM.getMainFileID())) {
- MainFileDir = std::string(MainFile->getDir()->getName());
+ if (Optional<FileEntryRef> MainFile =
+ SM.getFileEntryRefForID(SM.getMainFileID())) {
+ MainFileDir = std::string(MainFile->getDir().getName());
if (!llvm::sys::path::is_absolute(MainFileName)) {
llvm::SmallString<1024> MainFileDirSS(MainFileDir);
llvm::sys::path::append(MainFileDirSS, MainFileName);
@@ -927,28 +930,8 @@ static llvm::dwarf::Tag getNextQualifier(Qualifiers &Q) {
return (llvm::dwarf::Tag)0;
}
-// Strip MacroQualifiedTypeLoc and AttributedTypeLoc
-// as their corresponding types will be ignored
-// during code generation. Stripping them allows
-// to maintain proper TypeLoc for a given type
-// during code generation.
-static TypeLoc StripMacroAttributed(TypeLoc TL) {
- if (!TL)
- return TL;
-
- while (true) {
- if (auto MTL = TL.getAs<MacroQualifiedTypeLoc>())
- TL = MTL.getInnerLoc();
- else if (auto ATL = TL.getAs<AttributedTypeLoc>())
- TL = ATL.getModifiedLoc();
- else
- break;
- }
- return TL;
-}
-
-llvm::DIType *CGDebugInfo::CreateQualifiedType(QualType Ty, llvm::DIFile *Unit,
- TypeLoc TL) {
+llvm::DIType *CGDebugInfo::CreateQualifiedType(QualType Ty,
+ llvm::DIFile *Unit) {
QualifierCollector Qc;
const Type *T = Qc.strip(Ty);
@@ -962,15 +945,7 @@ llvm::DIType *CGDebugInfo::CreateQualifiedType(QualType Ty, llvm::DIFile *Unit,
return getOrCreateType(QualType(T, 0), Unit);
}
- QualType NextTy = Qc.apply(CGM.getContext(), T);
- TypeLoc NextTL;
- if (NextTy.hasQualifiers())
- NextTL = TL;
- else if (TL) {
- if (auto QTL = TL.getAs<QualifiedTypeLoc>())
- NextTL = StripMacroAttributed(QTL.getNextTypeLoc());
- }
- auto *FromTy = getOrCreateType(NextTy, Unit, NextTL);
+ auto *FromTy = getOrCreateType(Qc.apply(CGM.getContext(), T), Unit);
// No need to fill in the Name, Line, Size, Alignment, Offset in case of
// CVR derived types.
@@ -1014,10 +989,10 @@ llvm::DIType *CGDebugInfo::CreateType(const ObjCObjectPointerType *Ty,
Ty->getPointeeType(), Unit);
}
-llvm::DIType *CGDebugInfo::CreateType(const PointerType *Ty, llvm::DIFile *Unit,
- TypeLoc TL) {
+llvm::DIType *CGDebugInfo::CreateType(const PointerType *Ty,
+ llvm::DIFile *Unit) {
return CreatePointerLikeType(llvm::dwarf::DW_TAG_pointer_type, Ty,
- Ty->getPointeeType(), Unit, TL);
+ Ty->getPointeeType(), Unit);
}
/// \return whether a C++ mangling exists for the type defined by TD.
@@ -1158,8 +1133,7 @@ CGDebugInfo::getOrCreateRecordFwdDecl(const RecordType *Ty,
llvm::DIType *CGDebugInfo::CreatePointerLikeType(llvm::dwarf::Tag Tag,
const Type *Ty,
QualType PointeeTy,
- llvm::DIFile *Unit,
- TypeLoc TL) {
+ llvm::DIFile *Unit) {
// Bit size, align and offset of the type.
// Size is always the size of a pointer. We can't use getTypeSize here
// because that does not return the correct value for references.
@@ -1169,52 +1143,32 @@ llvm::DIType *CGDebugInfo::CreatePointerLikeType(llvm::dwarf::Tag Tag,
Optional<unsigned> DWARFAddressSpace =
CGM.getTarget().getDWARFAddressSpace(AddressSpace);
- llvm::DINodeArray Annotations = nullptr;
- TypeLoc NextTL;
- if (TL) {
- SmallVector<llvm::Metadata *, 4> Annots;
- NextTL = TL.getNextTypeLoc();
- if (NextTL) {
- // Traverse all MacroQualifiedTypeLoc, QualifiedTypeLoc and
- // AttributedTypeLoc type locations so we can collect
- // BTFTypeTag attributes for this pointer.
- while (true) {
- if (auto MTL = NextTL.getAs<MacroQualifiedTypeLoc>()) {
- NextTL = MTL.getInnerLoc();
- } else if (auto QTL = NextTL.getAs<QualifiedTypeLoc>()) {
- NextTL = QTL.getNextTypeLoc();
- } else if (auto ATL = NextTL.getAs<AttributedTypeLoc>()) {
- if (const auto *A = ATL.getAttrAs<BTFTypeTagAttr>()) {
- StringRef BTFTypeTag = A->getBTFTypeTag();
- if (!BTFTypeTag.empty()) {
- llvm::Metadata *Ops[2] = {
- llvm::MDString::get(CGM.getLLVMContext(),
- StringRef("btf_type_tag")),
- llvm::MDString::get(CGM.getLLVMContext(), BTFTypeTag)};
- Annots.insert(Annots.begin(),
- llvm::MDNode::get(CGM.getLLVMContext(), Ops));
- }
- }
- NextTL = ATL.getModifiedLoc();
- } else {
- break;
- }
- }
+ SmallVector<llvm::Metadata *, 4> Annots;
+ auto *BTFAttrTy = dyn_cast<BTFTagAttributedType>(PointeeTy);
+ while (BTFAttrTy) {
+ StringRef Tag = BTFAttrTy->getAttr()->getBTFTypeTag();
+ if (!Tag.empty()) {
+ llvm::Metadata *Ops[2] = {
+ llvm::MDString::get(CGM.getLLVMContext(), StringRef("btf_type_tag")),
+ llvm::MDString::get(CGM.getLLVMContext(), Tag)};
+ Annots.insert(Annots.begin(),
+ llvm::MDNode::get(CGM.getLLVMContext(), Ops));
}
-
- NextTL = StripMacroAttributed(TL.getNextTypeLoc());
- if (Annots.size() > 0)
- Annotations = DBuilder.getOrCreateArray(Annots);
+ BTFAttrTy = dyn_cast<BTFTagAttributedType>(BTFAttrTy->getWrappedType());
}
+ llvm::DINodeArray Annotations = nullptr;
+ if (Annots.size() > 0)
+ Annotations = DBuilder.getOrCreateArray(Annots);
+
if (Tag == llvm::dwarf::DW_TAG_reference_type ||
Tag == llvm::dwarf::DW_TAG_rvalue_reference_type)
return DBuilder.createReferenceType(Tag, getOrCreateType(PointeeTy, Unit),
Size, Align, DWARFAddressSpace);
else
- return DBuilder.createPointerType(getOrCreateType(PointeeTy, Unit, NextTL),
- Size, Align, DWARFAddressSpace,
- StringRef(), Annotations);
+ return DBuilder.createPointerType(getOrCreateType(PointeeTy, Unit), Size,
+ Align, DWARFAddressSpace, StringRef(),
+ Annotations);
}
llvm::DIType *CGDebugInfo::getOrCreateStructPtrType(StringRef Name,
@@ -1331,11 +1285,8 @@ llvm::DIType *CGDebugInfo::CreateType(const TemplateSpecializationType *Ty,
llvm::DIType *CGDebugInfo::CreateType(const TypedefType *Ty,
llvm::DIFile *Unit) {
- TypeLoc TL;
- if (const TypeSourceInfo *TSI = Ty->getDecl()->getTypeSourceInfo())
- TL = TSI->getTypeLoc();
llvm::DIType *Underlying =
- getOrCreateType(Ty->getDecl()->getUnderlyingType(), Unit, TL);
+ getOrCreateType(Ty->getDecl()->getUnderlyingType(), Unit);
if (Ty->getDecl()->hasAttr<NoDebugAttr>())
return Underlying;
@@ -1375,6 +1326,7 @@ static unsigned getDwarfCC(CallingConv CC) {
return llvm::dwarf::DW_CC_LLVM_X86_64SysV;
case CC_AAPCS:
case CC_AArch64VectorCall:
+ case CC_AArch64SVEPCS:
return llvm::dwarf::DW_CC_LLVM_AAPCS;
case CC_AAPCS_VFP:
return llvm::dwarf::DW_CC_LLVM_AAPCS_VFP;
@@ -1383,6 +1335,7 @@ static unsigned getDwarfCC(CallingConv CC) {
case CC_SpirFunction:
return llvm::dwarf::DW_CC_LLVM_SpirFunction;
case CC_OpenCLKernel:
+ case CC_AMDGPUKernelCall:
return llvm::dwarf::DW_CC_LLVM_OpenCLKernel;
case CC_Swift:
return llvm::dwarf::DW_CC_LLVM_Swift;
@@ -1409,7 +1362,7 @@ static llvm::DINode::DIFlags getRefFlags(const FunctionProtoType *Func) {
}
llvm::DIType *CGDebugInfo::CreateType(const FunctionType *Ty,
- llvm::DIFile *Unit, TypeLoc TL) {
+ llvm::DIFile *Unit) {
const auto *FPT = dyn_cast<FunctionProtoType>(Ty);
if (FPT) {
if (llvm::DIType *QTy = CreateQualifiedType(FPT, Unit))
@@ -1421,12 +1374,7 @@ llvm::DIType *CGDebugInfo::CreateType(const FunctionType *Ty,
SmallVector<llvm::Metadata *, 16> EltTys;
// Add the result type at least.
- TypeLoc RetTL;
- if (TL) {
- if (auto FTL = TL.getAs<FunctionTypeLoc>())
- RetTL = FTL.getReturnLoc();
- }
- EltTys.push_back(getOrCreateType(Ty->getReturnType(), Unit, RetTL));
+ EltTys.push_back(getOrCreateType(Ty->getReturnType(), Unit));
llvm::DINode::DIFlags Flags = llvm::DINode::FlagZero;
// Set up remainder of arguments if there is a prototype.
@@ -1435,30 +1383,8 @@ llvm::DIType *CGDebugInfo::CreateType(const FunctionType *Ty,
EltTys.push_back(DBuilder.createUnspecifiedParameter());
} else {
Flags = getRefFlags(FPT);
- bool DoneWithTL = false;
- if (TL) {
- if (auto FTL = TL.getAs<FunctionTypeLoc>()) {
- DoneWithTL = true;
- unsigned Idx = 0;
- unsigned FTL_NumParams = FTL.getNumParams();
- for (const QualType &ParamType : FPT->param_types()) {
- TypeLoc ParamTL;
- if (Idx < FTL_NumParams) {
- if (ParmVarDecl *Param = FTL.getParam(Idx)) {
- if (const TypeSourceInfo *TSI = Param->getTypeSourceInfo())
- ParamTL = TSI->getTypeLoc();
- }
- }
- EltTys.push_back(getOrCreateType(ParamType, Unit, ParamTL));
- Idx++;
- }
- }
- }
-
- if (!DoneWithTL) {
- for (const QualType &ParamType : FPT->param_types())
- EltTys.push_back(getOrCreateType(ParamType, Unit));
- }
+ for (const QualType &ParamType : FPT->param_types())
+ EltTys.push_back(getOrCreateType(ParamType, Unit));
if (FPT->isVariadic())
EltTys.push_back(DBuilder.createUnspecifiedParameter());
}
@@ -1529,13 +1455,11 @@ llvm::DIType *CGDebugInfo::createBitFieldType(const FieldDecl *BitFieldDecl,
Flags, DebugType, Annotations);
}
-llvm::DIType *
-CGDebugInfo::createFieldType(StringRef name, QualType type, SourceLocation loc,
- AccessSpecifier AS, uint64_t offsetInBits,
- uint32_t AlignInBits, llvm::DIFile *tunit,
- llvm::DIScope *scope, const RecordDecl *RD,
- llvm::DINodeArray Annotations, TypeLoc TL) {
- llvm::DIType *debugType = getOrCreateType(type, tunit, TL);
+llvm::DIType *CGDebugInfo::createFieldType(
+ StringRef name, QualType type, SourceLocation loc, AccessSpecifier AS,
+ uint64_t offsetInBits, uint32_t AlignInBits, llvm::DIFile *tunit,
+ llvm::DIScope *scope, const RecordDecl *RD, llvm::DINodeArray Annotations) {
+ llvm::DIType *debugType = getOrCreateType(type, tunit);
// Get the location for the field.
llvm::DIFile *file = getOrCreateFile(loc);
@@ -1643,12 +1567,9 @@ void CGDebugInfo::CollectRecordNormalField(
} else {
auto Align = getDeclAlignIfRequired(field, CGM.getContext());
llvm::DINodeArray Annotations = CollectBTFDeclTagAnnotations(field);
- TypeLoc TL;
- if (const TypeSourceInfo *TSI = field->getTypeSourceInfo())
- TL = TSI->getTypeLoc();
- FieldType = createFieldType(name, type, field->getLocation(),
- field->getAccess(), OffsetInBits, Align, tunit,
- RecordTy, RD, Annotations, TL);
+ FieldType =
+ createFieldType(name, type, field->getLocation(), field->getAccess(),
+ OffsetInBits, Align, tunit, RecordTy, RD, Annotations);
}
elements.push_back(FieldType);
@@ -1725,7 +1646,7 @@ void CGDebugInfo::CollectRecordFields(
llvm::DISubroutineType *
CGDebugInfo::getOrCreateMethodType(const CXXMethodDecl *Method,
llvm::DIFile *Unit, bool decl) {
- const FunctionProtoType *Func = Method->getType()->getAs<FunctionProtoType>();
+ const auto *Func = Method->getType()->castAs<FunctionProtoType>();
if (Method->isStatic())
return cast_or_null<llvm::DISubroutineType>(
getOrCreateType(QualType(Func, 0), Unit));
@@ -1760,9 +1681,17 @@ CGDebugInfo::getOrCreateInstanceMethodType(QualType ThisPtr,
SmallVector<llvm::Metadata *, 16> Elts;
// First element is always return type. For 'void' functions it is NULL.
QualType temp = Func->getReturnType();
- if (temp->getTypeClass() == Type::Auto && decl)
- Elts.push_back(CreateType(cast<AutoType>(temp)));
- else
+ if (temp->getTypeClass() == Type::Auto && decl) {
+ const AutoType *AT = cast<AutoType>(temp);
+
+ // It may be tricky in some cases to link the specification back the lambda
+ // call operator and so we skip emitting "auto" for lambdas. This is
+ // consistent with gcc as well.
+ if (AT->isDeduced() && ThisPtr->getPointeeCXXRecordDecl()->isLambda())
+ Elts.push_back(getOrCreateType(AT->getDeducedType(), Unit));
+ else
+ Elts.push_back(CreateType(AT));
+ } else
Elts.push_back(Args[0]);
// "this" pointer is always first argument.
@@ -3035,6 +2964,23 @@ llvm::DIType *CGDebugInfo::CreateTypeDefinition(const ObjCInterfaceType *Ty,
llvm::DIType *CGDebugInfo::CreateType(const VectorType *Ty,
llvm::DIFile *Unit) {
+ if (Ty->isExtVectorBoolType()) {
+ // Boolean ext_vector_type(N) are special because their real element type
+ // (bits of bit size) is not their Clang element type (_Bool of size byte).
+ // For now, we pretend the boolean vector were actually a vector of bytes
+ // (where each byte represents 8 bits of the actual vector).
+ // FIXME Debug info should actually represent this proper as a vector mask
+ // type.
+ auto &Ctx = CGM.getContext();
+ uint64_t Size = CGM.getContext().getTypeSize(Ty);
+ uint64_t NumVectorBytes = Size / Ctx.getCharWidth();
+
+ // Construct the vector of 'char' type.
+ QualType CharVecTy = Ctx.getVectorType(Ctx.CharTy, NumVectorBytes,
+ VectorType::GenericVector);
+ return CreateType(CharVecTy->getAs<VectorType>(), Unit);
+ }
+
llvm::DIType *ElementTy = getOrCreateType(Ty->getElementType(), Unit);
int64_t Count = Ty->getNumElements();
@@ -3348,6 +3294,9 @@ static QualType UnwrapTypeForDebugInfo(QualType T, const ASTContext &C) {
case Type::Attributed:
T = cast<AttributedType>(T)->getEquivalentType();
break;
+ case Type::BTFTagAttributed:
+ T = cast<BTFTagAttributedType>(T)->getWrappedType();
+ break;
case Type::Elaborated:
T = cast<ElaboratedType>(T)->getNamedType();
break;
@@ -3409,8 +3358,7 @@ void CGDebugInfo::completeUnusedClass(const CXXRecordDecl &D) {
RetainedTypes.push_back(CGM.getContext().getRecordType(&D).getAsOpaquePtr());
}
-llvm::DIType *CGDebugInfo::getOrCreateType(QualType Ty, llvm::DIFile *Unit,
- TypeLoc TL) {
+llvm::DIType *CGDebugInfo::getOrCreateType(QualType Ty, llvm::DIFile *Unit) {
if (Ty.isNull())
return nullptr;
@@ -3427,7 +3375,7 @@ llvm::DIType *CGDebugInfo::getOrCreateType(QualType Ty, llvm::DIFile *Unit,
if (auto *T = getTypeOrNull(Ty))
return T;
- llvm::DIType *Res = CreateTypeNode(Ty, Unit, TL);
+ llvm::DIType *Res = CreateTypeNode(Ty, Unit);
void *TyPtr = Ty.getAsOpaquePtr();
// And update the type cache.
@@ -3471,11 +3419,10 @@ llvm::DIModule *CGDebugInfo::getParentModuleOrNull(const Decl *D) {
return nullptr;
}
-llvm::DIType *CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile *Unit,
- TypeLoc TL) {
+llvm::DIType *CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile *Unit) {
// Handle qualifiers, which recursively handles what they refer to.
if (Ty.hasLocalQualifiers())
- return CreateQualifiedType(Ty, Unit, TL);
+ return CreateQualifiedType(Ty, Unit);
// Work out details of type.
switch (Ty->getTypeClass()) {
@@ -3504,7 +3451,7 @@ llvm::DIType *CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile *Unit,
case Type::Complex:
return CreateType(cast<ComplexType>(Ty));
case Type::Pointer:
- return CreateType(cast<PointerType>(Ty), Unit, TL);
+ return CreateType(cast<PointerType>(Ty), Unit);
case Type::BlockPointer:
return CreateType(cast<BlockPointerType>(Ty), Unit);
case Type::Typedef:
@@ -3515,7 +3462,7 @@ llvm::DIType *CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile *Unit,
return CreateEnumType(cast<EnumType>(Ty));
case Type::FunctionProto:
case Type::FunctionNoProto:
- return CreateType(cast<FunctionType>(Ty), Unit, TL);
+ return CreateType(cast<FunctionType>(Ty), Unit);
case Type::ConstantArray:
case Type::VariableArray:
case Type::IncompleteArray:
@@ -3542,6 +3489,7 @@ llvm::DIType *CGDebugInfo::CreateTypeNode(QualType Ty, llvm::DIFile *Unit,
case Type::Auto:
case Type::Attributed:
+ case Type::BTFTagAttributed:
case Type::Adjusted:
case Type::Decayed:
case Type::DeducedTemplateSpecialization:
@@ -3615,7 +3563,11 @@ llvm::DICompositeType *CGDebugInfo::CreateLimitedType(const RecordType *Ty) {
return getOrCreateRecordFwdDecl(Ty, RDContext);
uint64_t Size = CGM.getContext().getTypeSize(Ty);
- auto Align = getDeclAlignIfRequired(D, CGM.getContext());
+ // __attribute__((aligned)) can increase or decrease alignment *except* on a
+ // struct or struct member, where it only increases alignment unless 'packed'
+ // is also specified. To handle this case, the `getTypeAlignIfRequired` needs
+ // to be used.
+ auto Align = getTypeAlignIfRequired(Ty, CGM.getContext());
SmallString<256> Identifier = getTypeIdentifier(Ty, CGM, TheCU);
@@ -3908,6 +3860,17 @@ llvm::DINode *CGDebugInfo::getDeclarationOrDefinition(const Decl *D) {
auto N = I->second;
if (auto *GVE = dyn_cast_or_null<llvm::DIGlobalVariableExpression>(N))
return GVE->getVariable();
+ return cast<llvm::DINode>(N);
+ }
+
+ // Search imported declaration cache if it is already defined
+ // as imported declaration.
+ auto IE = ImportedDeclCache.find(D->getCanonicalDecl());
+
+ if (IE != ImportedDeclCache.end()) {
+ auto N = IE->second;
+ if (auto *GVE = dyn_cast_or_null<llvm::DIImportedEntity>(N))
+ return cast<llvm::DINode>(GVE);
return dyn_cast_or_null<llvm::DINode>(N);
}
@@ -4064,12 +4027,7 @@ llvm::DISubroutineType *CGDebugInfo::getOrCreateFunctionType(const Decl *D,
getDwarfCC(CC));
}
- TypeLoc TL;
- if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
- if (const TypeSourceInfo *TSI = FD->getTypeSourceInfo())
- TL = TSI->getTypeLoc();
- }
- return cast<llvm::DISubroutineType>(getOrCreateType(FnType, F, TL));
+ return cast<llvm::DISubroutineType>(getOrCreateType(FnType, F));
}
QualType
@@ -4328,7 +4286,7 @@ void CGDebugInfo::AppendAddressSpaceXDeref(
return;
Expr.push_back(llvm::dwarf::DW_OP_constu);
- Expr.push_back(DWARFAddressSpace.getValue());
+ Expr.push_back(*DWARFAddressSpace);
Expr.push_back(llvm::dwarf::DW_OP_swap);
Expr.push_back(llvm::dwarf::DW_OP_xderef);
}
@@ -4471,12 +4429,8 @@ llvm::DILocalVariable *CGDebugInfo::EmitDeclare(const VarDecl *VD,
uint64_t XOffset = 0;
if (VD->hasAttr<BlocksAttr>())
Ty = EmitTypeForVarWithBlocksAttr(VD, &XOffset).WrappedType;
- else {
- TypeLoc TL;
- if (const TypeSourceInfo *TSI = VD->getTypeSourceInfo())
- TL = TSI->getTypeLoc();
- Ty = getOrCreateType(VD->getType(), Unit, TL);
- }
+ else
+ Ty = getOrCreateType(VD->getType(), Unit);
// If there is no debug info for this type then do not emit debug info
// for this variable.
@@ -4635,11 +4589,103 @@ llvm::DILocalVariable *CGDebugInfo::EmitDeclare(const VarDecl *VD,
return D;
}
+llvm::DILocalVariable *CGDebugInfo::EmitDeclare(const BindingDecl *BD,
+ llvm::Value *Storage,
+ llvm::Optional<unsigned> ArgNo,
+ CGBuilderTy &Builder,
+ const bool UsePointerValue) {
+ assert(CGM.getCodeGenOpts().hasReducedDebugInfo());
+ assert(!LexicalBlockStack.empty() && "Region stack mismatch, stack empty!");
+ if (BD->hasAttr<NoDebugAttr>())
+ return nullptr;
+
+ // Skip the tuple like case, we don't handle that here
+ if (isa<DeclRefExpr>(BD->getBinding()))
+ return nullptr;
+
+ llvm::DIFile *Unit = getOrCreateFile(BD->getLocation());
+ llvm::DIType *Ty = getOrCreateType(BD->getType(), Unit);
+
+ // If there is no debug info for this type then do not emit debug info
+ // for this variable.
+ if (!Ty)
+ return nullptr;
+
+ auto Align = getDeclAlignIfRequired(BD, CGM.getContext());
+ unsigned AddressSpace = CGM.getContext().getTargetAddressSpace(BD->getType());
+
+ SmallVector<uint64_t, 3> Expr;
+ AppendAddressSpaceXDeref(AddressSpace, Expr);
+
+ // Clang stores the sret pointer provided by the caller in a static alloca.
+ // Use DW_OP_deref to tell the debugger to load the pointer and treat it as
+ // the address of the variable.
+ if (UsePointerValue) {
+ assert(!llvm::is_contained(Expr, llvm::dwarf::DW_OP_deref) &&
+ "Debug info already contains DW_OP_deref.");
+ Expr.push_back(llvm::dwarf::DW_OP_deref);
+ }
+
+ unsigned Line = getLineNumber(BD->getLocation());
+ unsigned Column = getColumnNumber(BD->getLocation());
+ StringRef Name = BD->getName();
+ auto *Scope = cast<llvm::DIScope>(LexicalBlockStack.back());
+ // Create the descriptor for the variable.
+ llvm::DILocalVariable *D = DBuilder.createAutoVariable(
+ Scope, Name, Unit, Line, Ty, CGM.getLangOpts().Optimize,
+ llvm::DINode::FlagZero, Align);
+
+ if (const MemberExpr *ME = dyn_cast<MemberExpr>(BD->getBinding())) {
+ if (const FieldDecl *FD = dyn_cast<FieldDecl>(ME->getMemberDecl())) {
+ const unsigned fieldIndex = FD->getFieldIndex();
+ const clang::CXXRecordDecl *parent =
+ (const CXXRecordDecl *)FD->getParent();
+ const ASTRecordLayout &layout =
+ CGM.getContext().getASTRecordLayout(parent);
+ const uint64_t fieldOffset = layout.getFieldOffset(fieldIndex);
+
+ if (fieldOffset != 0) {
+ Expr.push_back(llvm::dwarf::DW_OP_plus_uconst);
+ Expr.push_back(
+ CGM.getContext().toCharUnitsFromBits(fieldOffset).getQuantity());
+ }
+ }
+ } else if (const ArraySubscriptExpr *ASE =
+ dyn_cast<ArraySubscriptExpr>(BD->getBinding())) {
+ if (const IntegerLiteral *IL = dyn_cast<IntegerLiteral>(ASE->getIdx())) {
+ const uint64_t value = IL->getValue().getZExtValue();
+ const uint64_t typeSize = CGM.getContext().getTypeSize(BD->getType());
+
+ if (value != 0) {
+ Expr.push_back(llvm::dwarf::DW_OP_plus_uconst);
+ Expr.push_back(CGM.getContext()
+ .toCharUnitsFromBits(value * typeSize)
+ .getQuantity());
+ }
+ }
+ }
+
+ // Insert an llvm.dbg.declare into the current block.
+ DBuilder.insertDeclare(Storage, D, DBuilder.createExpression(Expr),
+ llvm::DILocation::get(CGM.getLLVMContext(), Line,
+ Column, Scope, CurInlinedAt),
+ Builder.GetInsertBlock());
+
+ return D;
+}
+
llvm::DILocalVariable *
CGDebugInfo::EmitDeclareOfAutoVariable(const VarDecl *VD, llvm::Value *Storage,
CGBuilderTy &Builder,
const bool UsePointerValue) {
assert(CGM.getCodeGenOpts().hasReducedDebugInfo());
+
+ if (auto *DD = dyn_cast<DecompositionDecl>(VD))
+ for (auto *B : DD->bindings()) {
+ EmitDeclare(B, Storage, llvm::None, Builder,
+ VD->getType()->isReferenceType());
+ }
+
return EmitDeclare(VD, Storage, llvm::None, Builder, UsePointerValue);
}
@@ -4975,6 +5021,52 @@ llvm::DIGlobalVariableExpression *CGDebugInfo::CollectAnonRecordDecls(
return GVE;
}
+static bool ReferencesAnonymousEntity(ArrayRef<TemplateArgument> Args);
+static bool ReferencesAnonymousEntity(RecordType *RT) {
+ // Unnamed classes/lambdas can't be reconstituted due to a lack of column
+ // info we produce in the DWARF, so we can't get Clang's full name back.
+ // But so long as it's not one of those, it doesn't matter if some sub-type
+ // of the record (a template parameter) can't be reconstituted - because the
+ // un-reconstitutable type itself will carry its own name.
+ const auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
+ if (!RD)
+ return false;
+ if (!RD->getIdentifier())
+ return true;
+ auto *TSpecial = dyn_cast<ClassTemplateSpecializationDecl>(RD);
+ if (!TSpecial)
+ return false;
+ return ReferencesAnonymousEntity(TSpecial->getTemplateArgs().asArray());
+}
+static bool ReferencesAnonymousEntity(ArrayRef<TemplateArgument> Args) {
+ return llvm::any_of(Args, [&](const TemplateArgument &TA) {
+ switch (TA.getKind()) {
+ case TemplateArgument::Pack:
+ return ReferencesAnonymousEntity(TA.getPackAsArray());
+ case TemplateArgument::Type: {
+ struct ReferencesAnonymous
+ : public RecursiveASTVisitor<ReferencesAnonymous> {
+ bool RefAnon = false;
+ bool VisitRecordType(RecordType *RT) {
+ if (ReferencesAnonymousEntity(RT)) {
+ RefAnon = true;
+ return false;
+ }
+ return true;
+ }
+ };
+ ReferencesAnonymous RT;
+ RT.TraverseType(TA.getAsType());
+ if (RT.RefAnon)
+ return true;
+ break;
+ }
+ default:
+ break;
+ }
+ return false;
+ });
+}
namespace {
struct ReconstitutableType : public RecursiveASTVisitor<ReconstitutableType> {
bool Reconstitutable = true;
@@ -4986,6 +5078,15 @@ struct ReconstitutableType : public RecursiveASTVisitor<ReconstitutableType> {
Reconstitutable = false;
return false;
}
+ bool VisitType(Type *T) {
+ // _BitInt(N) isn't reconstitutable because the bit width isn't encoded in
+ // the DWARF, only the byte width.
+ if (T->isBitIntType()) {
+ Reconstitutable = false;
+ return false;
+ }
+ return true;
+ }
bool TraverseEnumType(EnumType *ET) {
// Unnamed enums can't be reconstituted due to a lack of column info we
// produce in the DWARF, so we can't get Clang's full name back.
@@ -4994,24 +5095,21 @@ struct ReconstitutableType : public RecursiveASTVisitor<ReconstitutableType> {
Reconstitutable = false;
return false;
}
+ if (!ED->isExternallyVisible()) {
+ Reconstitutable = false;
+ return false;
+ }
}
return true;
}
bool VisitFunctionProtoType(FunctionProtoType *FT) {
// noexcept is not encoded in DWARF, so the reversi
Reconstitutable &= !isNoexceptExceptionSpec(FT->getExceptionSpecType());
+ Reconstitutable &= !FT->getNoReturnAttr();
return Reconstitutable;
}
- bool TraverseRecordType(RecordType *RT) {
- // Unnamed classes/lambdas can't be reconstituted due to a lack of column
- // info we produce in the DWARF, so we can't get Clang's full name back.
- // But so long as it's not one of those, it doesn't matter if some sub-type
- // of the record (a template parameter) can't be reconstituted - because the
- // un-reconstitutable type itself will carry its own name.
- const auto *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
- if (!RD)
- return true;
- if (RD->isLambda() || !RD->getIdentifier()) {
+ bool VisitRecordType(RecordType *RT) {
+ if (ReferencesAnonymousEntity(RT)) {
Reconstitutable = false;
return false;
}
@@ -5035,6 +5133,10 @@ std::string CGDebugInfo::GetName(const Decl *D, bool Qualified) const {
return Name;
codegenoptions::DebugTemplateNamesKind TemplateNamesKind =
CGM.getCodeGenOpts().getDebugSimpleTemplateNames();
+
+ if (!CGM.getCodeGenOpts().hasReducedDebugInfo())
+ TemplateNamesKind = codegenoptions::DebugTemplateNamesKind::Full;
+
Optional<TemplateArgs> Args;
bool IsOperatorOverload = false; // isa<CXXConversionDecl>(ND);
@@ -5080,7 +5182,8 @@ std::string CGDebugInfo::GetName(const Decl *D, bool Qualified) const {
// harder to parse back into a large integer, etc - so punting on
// this for now. Re-parsing the integers back into APInt is probably
// feasible some day.
- return TA.getAsIntegral().getBitWidth() <= 64;
+ return TA.getAsIntegral().getBitWidth() <= 64 &&
+ IsReconstitutableType(TA.getIntegralType());
case TemplateArgument::Type:
return IsReconstitutableType(TA.getAsType());
default:
@@ -5123,7 +5226,7 @@ std::string CGDebugInfo::GetName(const Decl *D, bool Qualified) const {
TemplateNamesKind == codegenoptions::DebugTemplateNamesKind::Mangled;
// check if it's a template
if (Mangled)
- OS << "_STN";
+ OS << "_STN|";
OS << ND->getDeclName();
std::string EncodedOriginalName;
@@ -5200,14 +5303,10 @@ void CGDebugInfo::EmitGlobalVariable(llvm::GlobalVariable *Var,
}
AppendAddressSpaceXDeref(AddressSpace, Expr);
- TypeLoc TL;
- if (const TypeSourceInfo *TSI = D->getTypeSourceInfo())
- TL = TSI->getTypeLoc();
-
llvm::DINodeArray Annotations = CollectBTFDeclTagAnnotations(D);
GVE = DBuilder.createGlobalVariableExpression(
- DContext, DeclName, LinkageName, Unit, LineNo,
- getOrCreateType(T, Unit, TL), Var->hasLocalLinkage(), true,
+ DContext, DeclName, LinkageName, Unit, LineNo, getOrCreateType(T, Unit),
+ Var->hasLocalLinkage(), true,
Expr.empty() ? nullptr : DBuilder.createExpression(Expr),
getOrCreateStaticDataMemberDeclarationOrNull(D), TemplateParameters,
Align, Annotations);
@@ -5320,6 +5419,62 @@ void CGDebugInfo::EmitExternalVariable(llvm::GlobalVariable *Var,
Var->addDebugInfo(GVE);
}
+void CGDebugInfo::EmitGlobalAlias(const llvm::GlobalValue *GV,
+ const GlobalDecl GD) {
+
+ assert(GV);
+
+ if (!CGM.getCodeGenOpts().hasReducedDebugInfo())
+ return;
+
+ const auto *D = cast<ValueDecl>(GD.getDecl());
+ if (D->hasAttr<NoDebugAttr>())
+ return;
+
+ auto AliaseeDecl = CGM.getMangledNameDecl(GV->getName());
+ llvm::DINode *DI;
+
+ if (!AliaseeDecl)
+ // FIXME: Aliasee not declared yet - possibly declared later
+ // For example,
+ //
+ // 1 extern int newname __attribute__((alias("oldname")));
+ // 2 int oldname = 1;
+ //
+ // No debug info would be generated for 'newname' in this case.
+ //
+ // Fix compiler to generate "newname" as imported_declaration
+ // pointing to the DIE of "oldname".
+ return;
+ if (!(DI = getDeclarationOrDefinition(
+ AliaseeDecl.getCanonicalDecl().getDecl())))
+ return;
+
+ llvm::DIScope *DContext = getDeclContextDescriptor(D);
+ auto Loc = D->getLocation();
+
+ llvm::DIImportedEntity *ImportDI = DBuilder.createImportedDeclaration(
+ DContext, DI, getOrCreateFile(Loc), getLineNumber(Loc), D->getName());
+
+ // Record this DIE in the cache for nested declaration reference.
+ ImportedDeclCache[GD.getCanonicalDecl().getDecl()].reset(ImportDI);
+}
+
+void CGDebugInfo::AddStringLiteralDebugInfo(llvm::GlobalVariable *GV,
+ const StringLiteral *S) {
+ SourceLocation Loc = S->getStrTokenLoc(0);
+ PresumedLoc PLoc = CGM.getContext().getSourceManager().getPresumedLoc(Loc);
+ if (!PLoc.isValid())
+ return;
+
+ llvm::DIFile *File = getOrCreateFile(Loc);
+ llvm::DIGlobalVariableExpression *Debug =
+ DBuilder.createGlobalVariableExpression(
+ nullptr, StringRef(), StringRef(), getOrCreateFile(Loc),
+ getLineNumber(Loc), getOrCreateType(S->getType(), File), true);
+ GV->addDebugInfo(Debug);
+}
+
llvm::DIScope *CGDebugInfo::getCurrentContextDescriptor(const Decl *D) {
if (!LexicalBlockStack.empty())
return LexicalBlockStack.back();
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.h b/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.h
index a76426e585c8..38e3fa5b2fa9 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGDebugInfo.h
@@ -152,8 +152,10 @@ class CGDebugInfo {
llvm::DenseMap<const char *, llvm::TrackingMDRef> DIFileCache;
llvm::DenseMap<const FunctionDecl *, llvm::TrackingMDRef> SPCache;
/// Cache declarations relevant to DW_TAG_imported_declarations (C++
- /// using declarations) that aren't covered by other more specific caches.
+ /// using declarations and global alias variables) that aren't covered
+ /// by other more specific caches.
llvm::DenseMap<const Decl *, llvm::TrackingMDRef> DeclCache;
+ llvm::DenseMap<const Decl *, llvm::TrackingMDRef> ImportedDeclCache;
llvm::DenseMap<const NamespaceDecl *, llvm::TrackingMDRef> NamespaceCache;
llvm::DenseMap<const NamespaceAliasDecl *, llvm::TrackingMDRef>
NamespaceAliasCache;
@@ -177,19 +179,16 @@ class CGDebugInfo {
llvm::DIType *CreateType(const ComplexType *Ty);
llvm::DIType *CreateType(const AutoType *Ty);
llvm::DIType *CreateType(const BitIntType *Ty);
- llvm::DIType *CreateQualifiedType(QualType Ty, llvm::DIFile *Fg,
- TypeLoc TL = TypeLoc());
+ llvm::DIType *CreateQualifiedType(QualType Ty, llvm::DIFile *Fg);
llvm::DIType *CreateQualifiedType(const FunctionProtoType *Ty,
llvm::DIFile *Fg);
llvm::DIType *CreateType(const TypedefType *Ty, llvm::DIFile *Fg);
llvm::DIType *CreateType(const TemplateSpecializationType *Ty,
llvm::DIFile *Fg);
llvm::DIType *CreateType(const ObjCObjectPointerType *Ty, llvm::DIFile *F);
- llvm::DIType *CreateType(const PointerType *Ty, llvm::DIFile *F,
- TypeLoc TL = TypeLoc());
+ llvm::DIType *CreateType(const PointerType *Ty, llvm::DIFile *F);
llvm::DIType *CreateType(const BlockPointerType *Ty, llvm::DIFile *F);
- llvm::DIType *CreateType(const FunctionType *Ty, llvm::DIFile *F,
- TypeLoc TL = TypeLoc());
+ llvm::DIType *CreateType(const FunctionType *Ty, llvm::DIFile *F);
/// Get structure or union type.
llvm::DIType *CreateType(const RecordType *Tyg);
llvm::DIType *CreateTypeDefinition(const RecordType *Ty);
@@ -244,8 +243,7 @@ class CGDebugInfo {
/// \return namespace descriptor for the given namespace decl.
llvm::DINamespace *getOrCreateNamespace(const NamespaceDecl *N);
llvm::DIType *CreatePointerLikeType(llvm::dwarf::Tag Tag, const Type *Ty,
- QualType PointeeTy, llvm::DIFile *F,
- TypeLoc TL = TypeLoc());
+ QualType PointeeTy, llvm::DIFile *F);
llvm::DIType *getOrCreateStructPtrType(StringRef Name, llvm::DIType *&Cache);
/// A helper function to create a subprogram for a single member
@@ -311,8 +309,7 @@ class CGDebugInfo {
uint64_t offsetInBits, uint32_t AlignInBits,
llvm::DIFile *tunit, llvm::DIScope *scope,
const RecordDecl *RD = nullptr,
- llvm::DINodeArray Annotations = nullptr,
- TypeLoc TL = TypeLoc());
+ llvm::DINodeArray Annotations = nullptr);
llvm::DIType *createFieldType(StringRef name, QualType type,
SourceLocation loc, AccessSpecifier AS,
@@ -512,6 +509,9 @@ public:
/// Emit information about an external variable.
void EmitExternalVariable(llvm::GlobalVariable *GV, const VarDecl *Decl);
+ /// Emit information about global variable alias.
+ void EmitGlobalAlias(const llvm::GlobalValue *GV, const GlobalDecl Decl);
+
/// Emit C++ using directive.
void EmitUsingDirective(const UsingDirectiveDecl &UD);
@@ -533,6 +533,14 @@ public:
/// Emit an @import declaration.
void EmitImportDecl(const ImportDecl &ID);
+ /// DebugInfo isn't attached to string literals by default. While certain
+ /// aspects of debuginfo aren't useful for string literals (like a name), it's
+ /// nice to be able to symbolize the line and column information. This is
+ /// especially useful for sanitizers, as it allows symbolization of
+ /// heap-buffer-overflows on constant strings.
+ void AddStringLiteralDebugInfo(llvm::GlobalVariable *GV,
+ const StringLiteral *S);
+
/// Emit C++ namespace alias.
llvm::DIImportedEntity *EmitNamespaceAlias(const NamespaceAliasDecl &NA);
@@ -583,6 +591,14 @@ private:
CGBuilderTy &Builder,
const bool UsePointerValue = false);
+ /// Emit call to llvm.dbg.declare for a binding declaration.
+ /// Returns a pointer to the DILocalVariable associated with the
+ /// llvm.dbg.declare, or nullptr otherwise.
+ llvm::DILocalVariable *EmitDeclare(const BindingDecl *decl, llvm::Value *AI,
+ llvm::Optional<unsigned> ArgNo,
+ CGBuilderTy &Builder,
+ const bool UsePointerValue = false);
+
struct BlockByRefType {
/// The wrapper struct used inside the __block_literal struct.
llvm::DIType *BlockByRefWrapper;
@@ -632,8 +648,7 @@ private:
Optional<StringRef> Source);
/// Get the type from the cache or create a new type if necessary.
- llvm::DIType *getOrCreateType(QualType Ty, llvm::DIFile *Fg,
- TypeLoc TL = TypeLoc());
+ llvm::DIType *getOrCreateType(QualType Ty, llvm::DIFile *Fg);
/// Get a reference to a clang module. If \p CreateSkeletonCU is true,
/// this also creates a split dwarf skeleton compile unit.
@@ -648,8 +663,7 @@ private:
llvm::DICompositeType *getOrCreateLimitedType(const RecordType *Ty);
/// Create type metadata for a source language type.
- llvm::DIType *CreateTypeNode(QualType Ty, llvm::DIFile *Fg,
- TypeLoc TL = TypeLoc());
+ llvm::DIType *CreateTypeNode(QualType Ty, llvm::DIFile *Fg);
/// Create new member and increase Offset by FType's size.
llvm::DIType *CreateMemberType(llvm::DIFile *Unit, QualType FType,
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGDecl.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGDecl.cpp
index 18d658436086..f04af0d2cdf8 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGDecl.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGDecl.cpp
@@ -118,6 +118,7 @@ void CodeGenFunction::EmitDecl(const Decl &D) {
case Decl::Label: // __label__ x;
case Decl::Import:
case Decl::MSGuid: // __declspec(uuid("..."))
+ case Decl::UnnamedGlobalConstant:
case Decl::TemplateParamObject:
case Decl::OMPThreadPrivate:
case Decl::OMPAllocate:
@@ -341,6 +342,8 @@ CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
if (!Init) {
if (!getLangOpts().CPlusPlus)
CGM.ErrorUnsupported(D.getInit(), "constant l-value expression");
+ else if (D.hasFlexibleArrayInit(getContext()))
+ CGM.ErrorUnsupported(D.getInit(), "flexible array initializer");
else if (HaveInsertPoint()) {
// Since we have a static initializer, this global variable can't
// be constant.
@@ -351,6 +354,14 @@ CodeGenFunction::AddInitializerToStaticVarDecl(const VarDecl &D,
return GV;
}
+#ifndef NDEBUG
+ CharUnits VarSize = CGM.getContext().getTypeSizeInChars(D.getType()) +
+ D.getFlexibleArrayInitChars(getContext());
+ CharUnits CstSize = CharUnits::fromQuantity(
+ CGM.getDataLayout().getTypeAllocSize(Init->getType()));
+ assert(VarSize == CstSize && "Emitted constant has unexpected size");
+#endif
+
// The initializer may differ in type from the global. Rewrite
// the global to match the initializer. (We have to do this
// because some types, like unions, can't be completely represented
@@ -462,7 +473,7 @@ void CodeGenFunction::EmitStaticVarDecl(const VarDecl &D,
LocalDeclMap.find(&D)->second = Address(castedAddr, elemTy, alignment);
CGM.setStaticLocalDeclAddress(&D, castedAddr);
- CGM.getSanitizerMetadata()->reportGlobalToASan(var, D);
+ CGM.getSanitizerMetadata()->reportGlobal(var, D);
// Emit global variable debug descriptor for static vars.
CGDebugInfo *DI = getDebugInfo();
@@ -1155,11 +1166,7 @@ static Address createUnnamedGlobalForMemcpyFrom(CodeGenModule &CGM,
llvm::Constant *Constant,
CharUnits Align) {
Address SrcPtr = CGM.createUnnamedGlobalFrom(D, Constant, Align);
- llvm::Type *BP = llvm::PointerType::getInt8PtrTy(CGM.getLLVMContext(),
- SrcPtr.getAddressSpace());
- if (SrcPtr.getType() != BP)
- SrcPtr = Builder.CreateBitCast(SrcPtr, BP);
- return SrcPtr;
+ return Builder.CreateElementBitCast(SrcPtr, CGM.Int8Ty);
}
static void emitStoresForConstant(CodeGenModule &CGM, const VarDecl &D,
@@ -1786,7 +1793,7 @@ void CodeGenFunction::emitZeroOrPatternForAutoVarInit(QualType type,
Cur->addIncoming(Begin.getPointer(), OriginBB);
CharUnits CurAlign = Loc.getAlignment().alignmentOfArrayElement(EltSize);
auto *I =
- Builder.CreateMemCpy(Address(Cur, CurAlign),
+ Builder.CreateMemCpy(Address(Cur, Int8Ty, CurAlign),
createUnnamedGlobalForMemcpyFrom(
CGM, D, Builder, Constant, ConstantAlign),
BaseSizeInChars, isVolatile);
@@ -1908,10 +1915,9 @@ void CodeGenFunction::EmitAutoVarInit(const AutoVarEmission &emission) {
return EmitStoreThroughLValue(RValue::get(constant), lv, true);
}
- llvm::Type *BP = CGM.Int8Ty->getPointerTo(Loc.getAddressSpace());
- emitStoresForConstant(
- CGM, D, (Loc.getType() == BP) ? Loc : Builder.CreateBitCast(Loc, BP),
- type.isVolatileQualified(), Builder, constant, /*IsAutoInit=*/false);
+ emitStoresForConstant(CGM, D, Builder.CreateElementBitCast(Loc, CGM.Int8Ty),
+ type.isVolatileQualified(), Builder, constant,
+ /*IsAutoInit=*/false);
}
/// Emit an expression as an initializer for an object (variable, field, etc.)
@@ -2282,6 +2288,8 @@ static void emitPartialArrayDestroy(CodeGenFunction &CGF,
llvm::Value *begin, llvm::Value *end,
QualType type, CharUnits elementAlign,
CodeGenFunction::Destroyer *destroyer) {
+ llvm::Type *elemTy = CGF.ConvertTypeForMem(type);
+
// If the element type is itself an array, drill down.
unsigned arrayDepth = 0;
while (const ArrayType *arrayType = CGF.getContext().getAsArrayType(type)) {
@@ -2295,7 +2303,6 @@ static void emitPartialArrayDestroy(CodeGenFunction &CGF,
llvm::Value *zero = llvm::ConstantInt::get(CGF.SizeTy, 0);
SmallVector<llvm::Value*,4> gepIndices(arrayDepth+1, zero);
- llvm::Type *elemTy = begin->getType()->getPointerElementType();
begin = CGF.Builder.CreateInBoundsGEP(
elemTy, begin, gepIndices, "pad.arraybegin");
end = CGF.Builder.CreateInBoundsGEP(
@@ -2435,6 +2442,7 @@ namespace {
/// for the specified parameter and set up LocalDeclMap.
void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
unsigned ArgNo) {
+ bool NoDebugInfo = false;
// FIXME: Why isn't ImplicitParamDecl a ParmVarDecl?
assert((isa<ParmVarDecl>(D) || isa<ImplicitParamDecl>(D)) &&
"Invalid argument to EmitParmDecl");
@@ -2454,6 +2462,10 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
setBlockContextParameter(IPD, ArgNo, V);
return;
}
+ // Suppressing debug info for ThreadPrivateVar parameters, else it hides
+ // debug info of TLS variables.
+ NoDebugInfo =
+ (IPD->getParameterKind() == ImplicitParamDecl::ThreadPrivateVar);
}
Address DeclPtr = Address::invalid();
@@ -2462,12 +2474,10 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
bool IsScalar = hasScalarEvaluationKind(Ty);
// If we already have a pointer to the argument, reuse the input pointer.
if (Arg.isIndirect()) {
- DeclPtr = Arg.getIndirectAddress();
// If we have a prettier pointer type at this point, bitcast to that.
- unsigned AS = DeclPtr.getType()->getAddressSpace();
- llvm::Type *IRTy = ConvertTypeForMem(Ty)->getPointerTo(AS);
- if (DeclPtr.getType() != IRTy)
- DeclPtr = Builder.CreateBitCast(DeclPtr, IRTy, D.getName());
+ DeclPtr = Arg.getIndirectAddress();
+ DeclPtr = Builder.CreateElementBitCast(DeclPtr, ConvertTypeForMem(Ty),
+ D.getName());
// Indirect argument is in alloca address space, which may be different
// from the default address space.
auto AllocaAS = CGM.getASTAllocaAddressSpace();
@@ -2480,10 +2490,9 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
assert(getContext().getTargetAddressSpace(SrcLangAS) ==
CGM.getDataLayout().getAllocaAddrSpace());
auto DestAS = getContext().getTargetAddressSpace(DestLangAS);
- auto *T = V->getType()->getPointerElementType()->getPointerTo(DestAS);
- DeclPtr = Address(getTargetHooks().performAddrSpaceCast(
- *this, V, SrcLangAS, DestLangAS, T, true),
- DeclPtr.getAlignment());
+ auto *T = DeclPtr.getElementType()->getPointerTo(DestAS);
+ DeclPtr = DeclPtr.withPointer(getTargetHooks().performAddrSpaceCast(
+ *this, V, SrcLangAS, DestLangAS, T, true));
}
// Push a destructor cleanup for this parameter if the ABI requires it.
@@ -2587,7 +2596,8 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
// Emit debug info for param declarations in non-thunk functions.
if (CGDebugInfo *DI = getDebugInfo()) {
- if (CGM.getCodeGenOpts().hasReducedDebugInfo() && !CurFuncIsThunk) {
+ if (CGM.getCodeGenOpts().hasReducedDebugInfo() && !CurFuncIsThunk &&
+ !NoDebugInfo) {
llvm::DILocalVariable *DILocalVar = DI->EmitDeclareOfArgVariable(
&D, AllocaPtr.getPointer(), ArgNo, Builder);
if (const auto *Var = dyn_cast_or_null<ParmVarDecl>(&D))
@@ -2684,3 +2694,22 @@ void CodeGenModule::EmitOMPAllocateDecl(const OMPAllocateDecl *D) {
DummyGV->eraseFromParent();
}
}
+
+llvm::Optional<CharUnits>
+CodeGenModule::getOMPAllocateAlignment(const VarDecl *VD) {
+ if (const auto *AA = VD->getAttr<OMPAllocateDeclAttr>()) {
+ if (Expr *Alignment = AA->getAlignment()) {
+ unsigned UserAlign =
+ Alignment->EvaluateKnownConstInt(getContext()).getExtValue();
+ CharUnits NaturalAlign =
+ getNaturalTypeAlignment(VD->getType().getNonReferenceType());
+
+ // OpenMP5.1 pg 185 lines 7-10
+ // Each item in the align modifier list must be aligned to the maximum
+ // of the specified alignment and the type's natural alignment.
+ return CharUnits::fromQuantity(
+ std::max<unsigned>(UserAlign, NaturalAlign.getQuantity()));
+ }
+ }
+ return llvm::None;
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGDeclCXX.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGDeclCXX.cpp
index 7b880c1354e1..de5cb913220a 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGDeclCXX.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGDeclCXX.cpp
@@ -425,9 +425,8 @@ void CodeGenFunction::EmitCXXGuardedInitBranch(llvm::Value *NeedsInit,
llvm::Function *CodeGenModule::CreateGlobalInitOrCleanUpFunction(
llvm::FunctionType *FTy, const Twine &Name, const CGFunctionInfo &FI,
- SourceLocation Loc, bool TLS) {
- llvm::Function *Fn = llvm::Function::Create(
- FTy, llvm::GlobalValue::InternalLinkage, Name, &getModule());
+ SourceLocation Loc, bool TLS, llvm::GlobalVariable::LinkageTypes Linkage) {
+ llvm::Function *Fn = llvm::Function::Create(FTy, Linkage, Name, &getModule());
if (!getLangOpts().AppleKext && !TLS) {
// Set the section if needed.
@@ -435,7 +434,8 @@ llvm::Function *CodeGenModule::CreateGlobalInitOrCleanUpFunction(
Fn->setSection(Section);
}
- SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
+ if (Linkage == llvm::GlobalVariable::InternalLinkage)
+ SetInternalFunctionAttributes(GlobalDecl(), Fn, FI);
Fn->setCallingConv(getRuntimeCC());
@@ -458,8 +458,8 @@ llvm::Function *CodeGenModule::CreateGlobalInitOrCleanUpFunction(
!isInNoSanitizeList(SanitizerKind::KernelHWAddress, Fn, Loc))
Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
- if (getLangOpts().Sanitize.has(SanitizerKind::MemTag) &&
- !isInNoSanitizeList(SanitizerKind::MemTag, Fn, Loc))
+ if (getLangOpts().Sanitize.has(SanitizerKind::MemtagStack) &&
+ !isInNoSanitizeList(SanitizerKind::MemtagStack, Fn, Loc))
Fn->addFnAttr(llvm::Attribute::SanitizeMemTag);
if (getLangOpts().Sanitize.has(SanitizerKind::Thread) &&
@@ -707,7 +707,7 @@ CodeGenModule::EmitCXXGlobalInitFunc() {
// dynamic resource allocation on the device and program scope variables are
// destroyed by the runtime when program is released.
if (getLangOpts().OpenCL) {
- GenOpenCLArgMetadata(Fn);
+ GenKernelArgMetadata(Fn);
Fn->setCallingConv(llvm::CallingConv::SPIR_KERNEL);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGException.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGException.cpp
index 91ecbecc843f..76c6beb090a9 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGException.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGException.cpp
@@ -1845,7 +1845,7 @@ Address CodeGenFunction::recoverAddrOfEscapedLocal(CodeGenFunction &ParentCGF,
llvm::Value *ChildVar =
Builder.CreateBitCast(RecoverCall, ParentVar.getType());
ChildVar->setName(ParentVar.getName());
- return Address(ChildVar, ParentVar.getAlignment());
+ return ParentVar.withPointer(ChildVar);
}
void CodeGenFunction::EmitCapturedLocals(CodeGenFunction &ParentCGF,
@@ -1931,7 +1931,8 @@ void CodeGenFunction::EmitCapturedLocals(CodeGenFunction &ParentCGF,
FrameRecoverFn, {ParentI8Fn, ParentFP,
llvm::ConstantInt::get(Int32Ty, FrameEscapeIdx)});
ParentFP = Builder.CreateBitCast(ParentFP, CGM.VoidPtrPtrTy);
- ParentFP = Builder.CreateLoad(Address(ParentFP, getPointerAlign()));
+ ParentFP = Builder.CreateLoad(
+ Address(ParentFP, CGM.VoidPtrTy, getPointerAlign()));
}
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp
index 2a9b108c31bc..cbeb6c938bee 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGExpr.cpp
@@ -189,7 +189,17 @@ llvm::Value *CodeGenFunction::EvaluateExprAsBool(const Expr *E) {
/// ignoring the result.
void CodeGenFunction::EmitIgnoredExpr(const Expr *E) {
if (E->isPRValue())
- return (void) EmitAnyExpr(E, AggValueSlot::ignored(), true);
+ return (void)EmitAnyExpr(E, AggValueSlot::ignored(), true);
+
+ // if this is a bitfield-resulting conditional operator, we can special case
+ // emit this. The normal 'EmitLValue' version of this is particularly
+ // difficult to codegen for, since creating a single "LValue" for two
+ // different sized arguments here is not particularly doable.
+ if (const auto *CondOp = dyn_cast<AbstractConditionalOperator>(
+ E->IgnoreParenNoopCasts(getContext()))) {
+ if (CondOp->getObjectKind() == OK_BitField)
+ return EmitIgnoredConditionalOperator(CondOp);
+ }
// Just emit it as an l-value and drop the result.
EmitLValue(E);
@@ -407,7 +417,7 @@ static Address createReferenceTemporary(CodeGenFunction &CGF,
GV->getValueType()->getPointerTo(
CGF.getContext().getTargetAddressSpace(LangAS::Default)));
// FIXME: Should we put the new global into a COMDAT?
- return Address(C, alignment);
+ return Address(C, GV->getValueType(), alignment);
}
return CGF.CreateMemTemp(Ty, "ref.tmp", Alloca);
}
@@ -441,10 +451,10 @@ EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
ownership != Qualifiers::OCL_ExplicitNone) {
Address Object = createReferenceTemporary(*this, M, E);
if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object.getPointer())) {
- Object = Address(llvm::ConstantExpr::getBitCast(Var,
- ConvertTypeForMem(E->getType())
- ->getPointerTo(Object.getAddressSpace())),
- Object.getAlignment());
+ llvm::Type *Ty = ConvertTypeForMem(E->getType());
+ Object = Address(llvm::ConstantExpr::getBitCast(
+ Var, Ty->getPointerTo(Object.getAddressSpace())),
+ Ty, Object.getAlignment());
// createReferenceTemporary will promote the temporary to a global with a
// constant initializer if it can. It can only do this to a value of
@@ -499,9 +509,11 @@ EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
Address Object = createReferenceTemporary(*this, M, E, &Alloca);
if (auto *Var = dyn_cast<llvm::GlobalVariable>(
Object.getPointer()->stripPointerCasts())) {
+ llvm::Type *TemporaryType = ConvertTypeForMem(E->getType());
Object = Address(llvm::ConstantExpr::getBitCast(
cast<llvm::Constant>(Object.getPointer()),
- ConvertTypeForMem(E->getType())->getPointerTo()),
+ TemporaryType->getPointerTo()),
+ TemporaryType,
Object.getAlignment());
// If the temporary is a global and has a constant initializer or is a
// constant temporary that we promoted to a global, we may have already
@@ -745,23 +757,23 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
}
}
- uint64_t AlignVal = 0;
+ llvm::MaybeAlign AlignVal;
llvm::Value *PtrAsInt = nullptr;
if (SanOpts.has(SanitizerKind::Alignment) &&
!SkippedChecks.has(SanitizerKind::Alignment)) {
- AlignVal = Alignment.getQuantity();
+ AlignVal = Alignment.getAsMaybeAlign();
if (!Ty->isIncompleteType() && !AlignVal)
AlignVal = CGM.getNaturalTypeAlignment(Ty, nullptr, nullptr,
/*ForPointeeType=*/true)
- .getQuantity();
+ .getAsMaybeAlign();
// The glvalue must be suitably aligned.
- if (AlignVal > 1 &&
- (!PtrToAlloca || PtrToAlloca->getAlignment() < AlignVal)) {
+ if (AlignVal && *AlignVal > llvm::Align(1) &&
+ (!PtrToAlloca || PtrToAlloca->getAlign() < *AlignVal)) {
PtrAsInt = Builder.CreatePtrToInt(Ptr, IntPtrTy);
llvm::Value *Align = Builder.CreateAnd(
- PtrAsInt, llvm::ConstantInt::get(IntPtrTy, AlignVal - 1));
+ PtrAsInt, llvm::ConstantInt::get(IntPtrTy, AlignVal->value() - 1));
llvm::Value *Aligned =
Builder.CreateICmpEQ(Align, llvm::ConstantInt::get(IntPtrTy, 0));
if (Aligned != True)
@@ -770,12 +782,9 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
}
if (Checks.size() > 0) {
- // Make sure we're not losing information. Alignment needs to be a power of
- // 2
- assert(!AlignVal || (uint64_t)1 << llvm::Log2_64(AlignVal) == AlignVal);
llvm::Constant *StaticData[] = {
EmitCheckSourceLocation(Loc), EmitCheckTypeDescriptor(Ty),
- llvm::ConstantInt::get(Int8Ty, AlignVal ? llvm::Log2_64(AlignVal) : 1),
+ llvm::ConstantInt::get(Int8Ty, AlignVal ? llvm::Log2(*AlignVal) : 1),
llvm::ConstantInt::get(Int8Ty, TCK)};
EmitCheck(Checks, SanitizerHandler::TypeMismatch, StaticData,
PtrAsInt ? PtrAsInt : Ptr);
@@ -821,7 +830,8 @@ void CodeGenFunction::EmitTypeCheck(TypeCheckKind TCK, SourceLocation Loc,
// Load the vptr, and compute hash_16_bytes(TypeHash, vptr).
llvm::Value *Low = llvm::ConstantInt::get(Int64Ty, TypeHash);
llvm::Type *VPtrTy = llvm::PointerType::get(IntPtrTy, 0);
- Address VPtrAddr(Builder.CreateBitCast(Ptr, VPtrTy), getPointerAlign());
+ Address VPtrAddr(Builder.CreateBitCast(Ptr, VPtrTy), IntPtrTy,
+ getPointerAlign());
llvm::Value *VPtrVal = Builder.CreateLoad(VPtrAddr);
llvm::Value *High = Builder.CreateZExt(VPtrVal, Int64Ty);
@@ -1106,17 +1116,17 @@ Address CodeGenFunction::EmitPointerWithAlignment(const Expr *E,
if (SanOpts.has(SanitizerKind::CFIUnrelatedCast) &&
CE->getCastKind() == CK_BitCast) {
if (auto PT = E->getType()->getAs<PointerType>())
- EmitVTablePtrCheckForCast(PT->getPointeeType(), Addr.getPointer(),
+ EmitVTablePtrCheckForCast(PT->getPointeeType(), Addr,
/*MayBeNull=*/true,
CodeGenFunction::CFITCK_UnrelatedCast,
CE->getBeginLoc());
}
- if (CE->getCastKind() == CK_AddressSpaceConversion)
- return Builder.CreateAddrSpaceCast(Addr, ConvertType(E->getType()));
-
llvm::Type *ElemTy = ConvertTypeForMem(E->getType()->getPointeeType());
- return Builder.CreateElementBitCast(Addr, ElemTy);
+ Addr = Builder.CreateElementBitCast(Addr, ElemTy);
+ if (CE->getCastKind() == CK_AddressSpaceConversion)
+ Addr = Builder.CreateAddrSpaceCast(Addr, ConvertType(E->getType()));
+ return Addr;
}
break;
@@ -1157,6 +1167,22 @@ Address CodeGenFunction::EmitPointerWithAlignment(const Expr *E,
}
}
+ // std::addressof and variants.
+ if (auto *Call = dyn_cast<CallExpr>(E)) {
+ switch (Call->getBuiltinCallee()) {
+ default:
+ break;
+ case Builtin::BIaddressof:
+ case Builtin::BI__addressof:
+ case Builtin::BI__builtin_addressof: {
+ LValue LV = EmitLValue(Call->getArg(0));
+ if (BaseInfo) *BaseInfo = LV.getBaseInfo();
+ if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
+ return LV.getAddress(*this);
+ }
+ }
+ }
+
// TODO: conditional operators, comma.
// Otherwise, use the alignment of the type.
@@ -1208,9 +1234,10 @@ RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E,
LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
const char *Name) {
ErrorUnsupported(E, Name);
- llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType()));
- return MakeAddrLValue(Address(llvm::UndefValue::get(Ty), CharUnits::One()),
- E->getType());
+ llvm::Type *ElTy = ConvertType(E->getType());
+ llvm::Type *Ty = llvm::PointerType::getUnqual(ElTy);
+ return MakeAddrLValue(
+ Address(llvm::UndefValue::get(Ty), ElTy, CharUnits::One()), E->getType());
}
bool CodeGenFunction::IsWrappedCXXThis(const Expr *Obj) {
@@ -1703,27 +1730,42 @@ llvm::Value *CodeGenFunction::EmitLoadOfScalar(Address Addr, bool Volatile,
LValueBaseInfo BaseInfo,
TBAAAccessInfo TBAAInfo,
bool isNontemporal) {
- if (!CGM.getCodeGenOpts().PreserveVec3Type) {
- // For better performance, handle vector loads differently.
- if (Ty->isVectorType()) {
- const llvm::Type *EltTy = Addr.getElementType();
-
- const auto *VTy = cast<llvm::FixedVectorType>(EltTy);
-
- // Handle vectors of size 3 like size 4 for better performance.
- if (VTy->getNumElements() == 3) {
-
- // Bitcast to vec4 type.
- auto *vec4Ty = llvm::FixedVectorType::get(VTy->getElementType(), 4);
- Address Cast = Builder.CreateElementBitCast(Addr, vec4Ty, "castToVec4");
- // Now load value.
- llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVec4");
-
- // Shuffle vector to get vec3.
- V = Builder.CreateShuffleVector(V, ArrayRef<int>{0, 1, 2},
- "extractVec");
- return EmitFromMemory(V, Ty);
- }
+ if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
+ // Boolean vectors use `iN` as storage type.
+ if (ClangVecTy->isExtVectorBoolType()) {
+ llvm::Type *ValTy = ConvertType(Ty);
+ unsigned ValNumElems =
+ cast<llvm::FixedVectorType>(ValTy)->getNumElements();
+ // Load the `iP` storage object (P is the padded vector size).
+ auto *RawIntV = Builder.CreateLoad(Addr, Volatile, "load_bits");
+ const auto *RawIntTy = RawIntV->getType();
+ assert(RawIntTy->isIntegerTy() && "compressed iN storage for bitvectors");
+ // Bitcast iP --> <P x i1>.
+ auto *PaddedVecTy = llvm::FixedVectorType::get(
+ Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits());
+ llvm::Value *V = Builder.CreateBitCast(RawIntV, PaddedVecTy);
+ // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
+ V = emitBoolVecConversion(V, ValNumElems, "extractvec");
+
+ return EmitFromMemory(V, Ty);
+ }
+
+ // Handle vectors of size 3 like size 4 for better performance.
+ const llvm::Type *EltTy = Addr.getElementType();
+ const auto *VTy = cast<llvm::FixedVectorType>(EltTy);
+
+ if (!CGM.getCodeGenOpts().PreserveVec3Type && VTy->getNumElements() == 3) {
+
+ // Bitcast to vec4 type.
+ llvm::VectorType *vec4Ty =
+ llvm::FixedVectorType::get(VTy->getElementType(), 4);
+ Address Cast = Builder.CreateElementBitCast(Addr, vec4Ty, "castToVec4");
+ // Now load value.
+ llvm::Value *V = Builder.CreateLoad(Cast, Volatile, "loadVec4");
+
+ // Shuffle vector to get vec3.
+ V = Builder.CreateShuffleVector(V, ArrayRef<int>{0, 1, 2}, "extractVec");
+ return EmitFromMemory(V, Ty);
}
}
@@ -1774,6 +1816,17 @@ llvm::Value *CodeGenFunction::EmitFromMemory(llvm::Value *Value, QualType Ty) {
"wrong value rep of bool");
return Builder.CreateTrunc(Value, Builder.getInt1Ty(), "tobool");
}
+ if (Ty->isExtVectorBoolType()) {
+ const auto *RawIntTy = Value->getType();
+ // Bitcast iP --> <P x i1>.
+ auto *PaddedVecTy = llvm::FixedVectorType::get(
+ Builder.getInt1Ty(), RawIntTy->getPrimitiveSizeInBits());
+ auto *V = Builder.CreateBitCast(Value, PaddedVecTy);
+ // Shuffle <P x i1> --> <N x i1> (N is the actual bit size).
+ llvm::Type *ValTy = ConvertType(Ty);
+ unsigned ValNumElems = cast<llvm::FixedVectorType>(ValTy)->getNumElements();
+ return emitBoolVecConversion(V, ValNumElems, "extractvec");
+ }
return Value;
}
@@ -1818,11 +1871,18 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *Value, Address Addr,
LValueBaseInfo BaseInfo,
TBAAAccessInfo TBAAInfo,
bool isInit, bool isNontemporal) {
- if (!CGM.getCodeGenOpts().PreserveVec3Type) {
- // Handle vectors differently to get better performance.
- if (Ty->isVectorType()) {
- llvm::Type *SrcTy = Value->getType();
- auto *VecTy = dyn_cast<llvm::VectorType>(SrcTy);
+ llvm::Type *SrcTy = Value->getType();
+ if (const auto *ClangVecTy = Ty->getAs<VectorType>()) {
+ auto *VecTy = dyn_cast<llvm::FixedVectorType>(SrcTy);
+ if (VecTy && ClangVecTy->isExtVectorBoolType()) {
+ auto *MemIntTy = cast<llvm::IntegerType>(Addr.getElementType());
+ // Expand to the memory bit width.
+ unsigned MemNumElems = MemIntTy->getPrimitiveSizeInBits();
+ // <N x i1> --> <P x i1>.
+ Value = emitBoolVecConversion(Value, MemNumElems, "insertvec");
+ // <P x i1> --> iP.
+ Value = Builder.CreateBitCast(Value, MemIntTy);
+ } else if (!CGM.getCodeGenOpts().PreserveVec3Type) {
// Handle vec3 special.
if (VecTy && cast<llvm::FixedVectorType>(VecTy)->getNumElements() == 3) {
// Our source is a vec3, do a shuffle vector to make it a vec4.
@@ -1932,7 +1992,7 @@ RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
llvm::Value *Idx = LV.getMatrixIdx();
if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
const auto *const MatTy = LV.getType()->castAs<ConstantMatrixType>();
- llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
+ llvm::MatrixBuilder MB(Builder);
MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
}
llvm::LoadInst *Load =
@@ -2059,8 +2119,19 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
// Read/modify/write the vector, inserting the new element.
llvm::Value *Vec = Builder.CreateLoad(Dst.getVectorAddress(),
Dst.isVolatileQualified());
+ auto *IRStoreTy = dyn_cast<llvm::IntegerType>(Vec->getType());
+ if (IRStoreTy) {
+ auto *IRVecTy = llvm::FixedVectorType::get(
+ Builder.getInt1Ty(), IRStoreTy->getPrimitiveSizeInBits());
+ Vec = Builder.CreateBitCast(Vec, IRVecTy);
+ // iN --> <N x i1>.
+ }
Vec = Builder.CreateInsertElement(Vec, Src.getScalarVal(),
Dst.getVectorIdx(), "vecins");
+ if (IRStoreTy) {
+ // <N x i1> --> <iN>.
+ Vec = Builder.CreateBitCast(Vec, IRStoreTy);
+ }
Builder.CreateStore(Vec, Dst.getVectorAddress(),
Dst.isVolatileQualified());
return;
@@ -2078,7 +2149,7 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
llvm::Value *Idx = Dst.getMatrixIdx();
if (CGM.getCodeGenOpts().OptimizationLevel > 0) {
const auto *const MatTy = Dst.getType()->castAs<ConstantMatrixType>();
- llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
+ llvm::MatrixBuilder MB(Builder);
MB.CreateIndexAssumption(Idx, MatTy->getNumElementsFlattened());
}
llvm::Instruction *Load = Builder.CreateLoad(Dst.getMatrixAddress());
@@ -2499,9 +2570,10 @@ Address CodeGenFunction::EmitLoadOfPointer(Address Ptr,
LValueBaseInfo *BaseInfo,
TBAAAccessInfo *TBAAInfo) {
llvm::Value *Addr = Builder.CreateLoad(Ptr);
- return Address(Addr, CGM.getNaturalTypeAlignment(PtrTy->getPointeeType(),
- BaseInfo, TBAAInfo,
- /*forPointeeType=*/true));
+ return Address(Addr, ConvertTypeForMem(PtrTy->getPointeeType()),
+ CGM.getNaturalTypeAlignment(PtrTy->getPointeeType(), BaseInfo,
+ TBAAInfo,
+ /*forPointeeType=*/true));
}
LValue CodeGenFunction::EmitLoadOfPointerLValue(Address PtrAddr,
@@ -2701,8 +2773,7 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
llvm::Type *VarTy = getTypes().ConvertTypeForMem(VD->getType());
auto *PTy = llvm::PointerType::get(
VarTy, getContext().getTargetAddressSpace(VD->getType()));
- if (PTy != Addr.getType())
- Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, PTy);
+ Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, PTy, VarTy);
} else {
// Should we be using the alignment of the constant pointer we emitted?
CharUnits Alignment =
@@ -2741,8 +2812,10 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
LValue CapLVal =
EmitCapturedFieldLValue(*this, CapturedStmtInfo->lookup(VD),
CapturedStmtInfo->getContextValue());
+ Address LValueAddress = CapLVal.getAddress(*this);
CapLVal = MakeAddrLValue(
- Address(CapLVal.getPointer(*this), getContext().getDeclAlign(VD)),
+ Address(LValueAddress.getPointer(), LValueAddress.getElementType(),
+ getContext().getDeclAlign(VD)),
CapLVal.getType(), LValueBaseInfo(AlignmentSource::Decl),
CapLVal.getTBAAInfo());
// Mark lvalue as nontemporal if the variable is marked as nontemporal
@@ -3183,7 +3256,7 @@ static void emitCheckHandlerCall(CodeGenFunction &CGF,
B.addAttribute(llvm::Attribute::NoReturn)
.addAttribute(llvm::Attribute::NoUnwind);
}
- B.addAttribute(llvm::Attribute::UWTable);
+ B.addUWTableAttr(llvm::UWTableKind::Default);
llvm::FunctionCallee Fn = CGF.CGM.CreateRuntimeFunction(
FnType, FnName,
@@ -3431,7 +3504,8 @@ void CodeGenFunction::EmitCfiCheckFail() {
CfiCheckFailDataTy,
Builder.CreatePointerCast(Data, CfiCheckFailDataTy->getPointerTo(0)), 0,
0);
- Address CheckKindAddr(V, getIntAlign());
+
+ Address CheckKindAddr(V, Int8Ty, getIntAlign());
llvm::Value *CheckKind = Builder.CreateLoad(CheckKindAddr);
llvm::Value *AllVtables = llvm::MetadataAsValue::get(
@@ -3808,7 +3882,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
// interfaces, so we can't rely on GEP to do this scaling
// correctly, so we need to cast to i8*. FIXME: is this actually
// true? A lot of other things in the fragile ABI would break...
- llvm::Type *OrigBaseTy = Addr.getType();
+ llvm::Type *OrigBaseElemTy = Addr.getElementType();
Addr = Builder.CreateElementBitCast(Addr, Int8Ty);
// Do the GEP.
@@ -3817,10 +3891,10 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
llvm::Value *EltPtr =
emitArraySubscriptGEP(*this, Addr.getElementType(), Addr.getPointer(),
ScaledIdx, false, SignedIndices, E->getExprLoc());
- Addr = Address(EltPtr, EltAlign);
+ Addr = Address(EltPtr, Addr.getElementType(), EltAlign);
// Cast back.
- Addr = Builder.CreateBitCast(Addr, OrigBaseTy);
+ Addr = Builder.CreateElementBitCast(Addr, OrigBaseElemTy);
} else if (const Expr *Array = isSimpleArrayDecayOperand(E->getBase())) {
// If this is A[i] where A is an array, the frontend will have decayed the
// base to be a ArrayToPointerDecay implicit cast. While correct, it is
@@ -3917,7 +3991,8 @@ static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base,
CGF.CGM.getNaturalTypeAlignment(ElTy, &TypeBaseInfo, &TypeTBAAInfo);
BaseInfo.mergeForCast(TypeBaseInfo);
TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(TBAAInfo, TypeTBAAInfo);
- return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress(CGF)), Align);
+ return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress(CGF)),
+ CGF.ConvertTypeForMem(ElTy), Align);
}
return CGF.EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo);
}
@@ -4374,7 +4449,7 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
// fields may leak the real address of dynamic object, which could result
// in miscompilation when leaked pointer would be compared.
auto *stripped = Builder.CreateStripInvariantGroup(addr.getPointer());
- addr = Address(stripped, addr.getAlignment());
+ addr = Address(stripped, addr.getElementType(), addr.getAlignment());
}
}
@@ -4395,7 +4470,7 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
addr = Address(
Builder.CreatePreserveUnionAccessIndex(
addr.getPointer(), getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo),
- addr.getAlignment());
+ addr.getElementType(), addr.getAlignment());
}
if (FieldType->isReferenceType())
@@ -4518,94 +4593,140 @@ static Optional<LValue> EmitLValueOrThrowExpression(CodeGenFunction &CGF,
return CGF.EmitLValue(Operand);
}
-LValue CodeGenFunction::
-EmitConditionalOperatorLValue(const AbstractConditionalOperator *expr) {
- if (!expr->isGLValue()) {
- // ?: here should be an aggregate.
- assert(hasAggregateEvaluationKind(expr->getType()) &&
- "Unexpected conditional operator!");
- return EmitAggExprToLValue(expr);
- }
-
- OpaqueValueMapping binding(*this, expr);
-
- const Expr *condExpr = expr->getCond();
+namespace {
+// Handle the case where the condition is a constant evaluatable simple integer,
+// which means we don't have to separately handle the true/false blocks.
+llvm::Optional<LValue> HandleConditionalOperatorLValueSimpleCase(
+ CodeGenFunction &CGF, const AbstractConditionalOperator *E) {
+ const Expr *condExpr = E->getCond();
bool CondExprBool;
- if (ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
- const Expr *live = expr->getTrueExpr(), *dead = expr->getFalseExpr();
- if (!CondExprBool) std::swap(live, dead);
+ if (CGF.ConstantFoldsToSimpleInteger(condExpr, CondExprBool)) {
+ const Expr *Live = E->getTrueExpr(), *Dead = E->getFalseExpr();
+ if (!CondExprBool)
+ std::swap(Live, Dead);
- if (!ContainsLabel(dead)) {
+ if (!CGF.ContainsLabel(Dead)) {
// If the true case is live, we need to track its region.
if (CondExprBool)
- incrementProfileCounter(expr);
+ CGF.incrementProfileCounter(E);
// If a throw expression we emit it and return an undefined lvalue
// because it can't be used.
- if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(live->IgnoreParens())) {
- EmitCXXThrowExpr(ThrowExpr);
- llvm::Type *ElemTy = ConvertType(dead->getType());
+ if (auto *ThrowExpr = dyn_cast<CXXThrowExpr>(Live->IgnoreParens())) {
+ CGF.EmitCXXThrowExpr(ThrowExpr);
+ llvm::Type *ElemTy = CGF.ConvertType(Dead->getType());
llvm::Type *Ty = llvm::PointerType::getUnqual(ElemTy);
- return MakeAddrLValue(
+ return CGF.MakeAddrLValue(
Address(llvm::UndefValue::get(Ty), ElemTy, CharUnits::One()),
- dead->getType());
+ Dead->getType());
}
- return EmitLValue(live);
+ return CGF.EmitLValue(Live);
}
}
+ return llvm::None;
+}
+struct ConditionalInfo {
+ llvm::BasicBlock *lhsBlock, *rhsBlock;
+ Optional<LValue> LHS, RHS;
+};
- llvm::BasicBlock *lhsBlock = createBasicBlock("cond.true");
- llvm::BasicBlock *rhsBlock = createBasicBlock("cond.false");
- llvm::BasicBlock *contBlock = createBasicBlock("cond.end");
-
- ConditionalEvaluation eval(*this);
- EmitBranchOnBoolExpr(condExpr, lhsBlock, rhsBlock, getProfileCount(expr));
+// Create and generate the 3 blocks for a conditional operator.
+// Leaves the 'current block' in the continuation basic block.
+template<typename FuncTy>
+ConditionalInfo EmitConditionalBlocks(CodeGenFunction &CGF,
+ const AbstractConditionalOperator *E,
+ const FuncTy &BranchGenFunc) {
+ ConditionalInfo Info{CGF.createBasicBlock("cond.true"),
+ CGF.createBasicBlock("cond.false"), llvm::None,
+ llvm::None};
+ llvm::BasicBlock *endBlock = CGF.createBasicBlock("cond.end");
+
+ CodeGenFunction::ConditionalEvaluation eval(CGF);
+ CGF.EmitBranchOnBoolExpr(E->getCond(), Info.lhsBlock, Info.rhsBlock,
+ CGF.getProfileCount(E));
// Any temporaries created here are conditional.
- EmitBlock(lhsBlock);
- incrementProfileCounter(expr);
- eval.begin(*this);
- Optional<LValue> lhs =
- EmitLValueOrThrowExpression(*this, expr->getTrueExpr());
- eval.end(*this);
-
- if (lhs && !lhs->isSimple())
- return EmitUnsupportedLValue(expr, "conditional operator");
+ CGF.EmitBlock(Info.lhsBlock);
+ CGF.incrementProfileCounter(E);
+ eval.begin(CGF);
+ Info.LHS = BranchGenFunc(CGF, E->getTrueExpr());
+ eval.end(CGF);
+ Info.lhsBlock = CGF.Builder.GetInsertBlock();
- lhsBlock = Builder.GetInsertBlock();
- if (lhs)
- Builder.CreateBr(contBlock);
+ if (Info.LHS)
+ CGF.Builder.CreateBr(endBlock);
// Any temporaries created here are conditional.
- EmitBlock(rhsBlock);
- eval.begin(*this);
- Optional<LValue> rhs =
- EmitLValueOrThrowExpression(*this, expr->getFalseExpr());
- eval.end(*this);
- if (rhs && !rhs->isSimple())
- return EmitUnsupportedLValue(expr, "conditional operator");
- rhsBlock = Builder.GetInsertBlock();
+ CGF.EmitBlock(Info.rhsBlock);
+ eval.begin(CGF);
+ Info.RHS = BranchGenFunc(CGF, E->getFalseExpr());
+ eval.end(CGF);
+ Info.rhsBlock = CGF.Builder.GetInsertBlock();
+ CGF.EmitBlock(endBlock);
- EmitBlock(contBlock);
+ return Info;
+}
+} // namespace
+
+void CodeGenFunction::EmitIgnoredConditionalOperator(
+ const AbstractConditionalOperator *E) {
+ if (!E->isGLValue()) {
+ // ?: here should be an aggregate.
+ assert(hasAggregateEvaluationKind(E->getType()) &&
+ "Unexpected conditional operator!");
+ return (void)EmitAggExprToLValue(E);
+ }
+
+ OpaqueValueMapping binding(*this, E);
+ if (HandleConditionalOperatorLValueSimpleCase(*this, E))
+ return;
- if (lhs && rhs) {
- Address lhsAddr = lhs->getAddress(*this);
- Address rhsAddr = rhs->getAddress(*this);
+ EmitConditionalBlocks(*this, E, [](CodeGenFunction &CGF, const Expr *E) {
+ CGF.EmitIgnoredExpr(E);
+ return LValue{};
+ });
+}
+LValue CodeGenFunction::EmitConditionalOperatorLValue(
+ const AbstractConditionalOperator *expr) {
+ if (!expr->isGLValue()) {
+ // ?: here should be an aggregate.
+ assert(hasAggregateEvaluationKind(expr->getType()) &&
+ "Unexpected conditional operator!");
+ return EmitAggExprToLValue(expr);
+ }
+
+ OpaqueValueMapping binding(*this, expr);
+ if (llvm::Optional<LValue> Res =
+ HandleConditionalOperatorLValueSimpleCase(*this, expr))
+ return *Res;
+
+ ConditionalInfo Info = EmitConditionalBlocks(
+ *this, expr, [](CodeGenFunction &CGF, const Expr *E) {
+ return EmitLValueOrThrowExpression(CGF, E);
+ });
+
+ if ((Info.LHS && !Info.LHS->isSimple()) ||
+ (Info.RHS && !Info.RHS->isSimple()))
+ return EmitUnsupportedLValue(expr, "conditional operator");
+
+ if (Info.LHS && Info.RHS) {
+ Address lhsAddr = Info.LHS->getAddress(*this);
+ Address rhsAddr = Info.RHS->getAddress(*this);
llvm::PHINode *phi = Builder.CreatePHI(lhsAddr.getType(), 2, "cond-lvalue");
- phi->addIncoming(lhsAddr.getPointer(), lhsBlock);
- phi->addIncoming(rhsAddr.getPointer(), rhsBlock);
+ phi->addIncoming(lhsAddr.getPointer(), Info.lhsBlock);
+ phi->addIncoming(rhsAddr.getPointer(), Info.rhsBlock);
Address result(phi, lhsAddr.getElementType(),
std::min(lhsAddr.getAlignment(), rhsAddr.getAlignment()));
AlignmentSource alignSource =
- std::max(lhs->getBaseInfo().getAlignmentSource(),
- rhs->getBaseInfo().getAlignmentSource());
+ std::max(Info.LHS->getBaseInfo().getAlignmentSource(),
+ Info.RHS->getBaseInfo().getAlignmentSource());
TBAAAccessInfo TBAAInfo = CGM.mergeTBAAInfoForConditionalOperator(
- lhs->getTBAAInfo(), rhs->getTBAAInfo());
+ Info.LHS->getTBAAInfo(), Info.RHS->getTBAAInfo());
return MakeAddrLValue(result, expr->getType(), LValueBaseInfo(alignSource),
TBAAInfo);
} else {
- assert((lhs || rhs) &&
+ assert((Info.LHS || Info.RHS) &&
"both operands of glvalue conditional are throw-expressions?");
- return lhs ? *lhs : *rhs;
+ return Info.LHS ? *Info.LHS : *Info.RHS;
}
}
@@ -4747,7 +4868,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
Derived.getPointer(), E->getType());
if (SanOpts.has(SanitizerKind::CFIDerivedCast))
- EmitVTablePtrCheckForCast(E->getType(), Derived.getPointer(),
+ EmitVTablePtrCheckForCast(E->getType(), Derived,
/*MayBeNull=*/false, CFITCK_DerivedCast,
E->getBeginLoc());
@@ -4765,7 +4886,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
ConvertTypeForMem(CE->getTypeAsWritten()->getPointeeType()));
if (SanOpts.has(SanitizerKind::CFIUnrelatedCast))
- EmitVTablePtrCheckForCast(E->getType(), V.getPointer(),
+ EmitVTablePtrCheckForCast(E->getType(), V,
/*MayBeNull=*/false, CFITCK_UnrelatedCast,
E->getBeginLoc());
@@ -4779,7 +4900,8 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
*this, LV.getPointer(*this),
E->getSubExpr()->getType().getAddressSpace(),
E->getType().getAddressSpace(), ConvertType(DestTy));
- return MakeAddrLValue(Address(V, LV.getAddress(*this).getAlignment()),
+ return MakeAddrLValue(Address(V, ConvertTypeForMem(E->getType()),
+ LV.getAddress(*this).getAlignment()),
E->getType(), LV.getBaseInfo(), LV.getTBAAInfo());
}
case CK_ObjCObjectLValueCast: {
@@ -4909,7 +5031,16 @@ static CGCallee EmitDirectCallee(CodeGenFunction &CGF, GlobalDecl GD) {
const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
if (auto builtinID = FD->getBuiltinID()) {
+ std::string NoBuiltinFD = ("no-builtin-" + FD->getName()).str();
+ std::string NoBuiltins = "no-builtins";
std::string FDInlineName = (FD->getName() + ".inline").str();
+
+ bool IsPredefinedLibFunction =
+ CGF.getContext().BuiltinInfo.isPredefinedLibFunction(builtinID);
+ bool HasAttributeNoBuiltin =
+ CGF.CurFn->getAttributes().hasFnAttr(NoBuiltinFD) ||
+ CGF.CurFn->getAttributes().hasFnAttr(NoBuiltins);
+
// When directing calling an inline builtin, call it through it's mangled
// name to make it clear it's not the actual builtin.
if (CGF.CurFn->getName() != FDInlineName &&
@@ -4929,8 +5060,11 @@ static CGCallee EmitDirectCallee(CodeGenFunction &CGF, GlobalDecl GD) {
// Replaceable builtins provide their own implementation of a builtin. If we
// are in an inline builtin implementation, avoid trivial infinite
- // recursion.
- else
+ // recursion. Honor __attribute__((no_builtin("foo"))) or
+ // __attribute__((no_builtin)) on the current function unless foo is
+ // not a predefined library function which means we must generate the
+ // builtin no matter what.
+ else if (!IsPredefinedLibFunction || !HasAttributeNoBuiltin)
return CGCallee::forBuiltin(builtinID, FD);
}
@@ -5343,7 +5477,8 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee
llvm::Value *Handle = Callee.getFunctionPointer();
auto *Cast =
Builder.CreateBitCast(Handle, Handle->getType()->getPointerTo());
- auto *Stub = Builder.CreateLoad(Address(Cast, CGM.getPointerAlign()));
+ auto *Stub = Builder.CreateLoad(
+ Address(Cast, Handle->getType(), CGM.getPointerAlign()));
Callee.setFunctionPointer(Stub);
}
llvm::CallBase *CallOrInvoke = nullptr;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGExprCXX.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGExprCXX.cpp
index 3a3acf98f309..3cc144361542 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGExprCXX.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGExprCXX.cpp
@@ -548,11 +548,12 @@ static void EmitNullBaseClassInitialization(CodeGenFunction &CGF,
/*isConstant=*/true, llvm::GlobalVariable::PrivateLinkage,
NullConstantForBase, Twine());
- CharUnits Align = std::max(Layout.getNonVirtualAlignment(),
- DestPtr.getAlignment());
+ CharUnits Align =
+ std::max(Layout.getNonVirtualAlignment(), DestPtr.getAlignment());
NullVariable->setAlignment(Align.getAsAlign());
- Address SrcPtr = Address(CGF.EmitCastToVoidPtr(NullVariable), Align);
+ Address SrcPtr =
+ Address(CGF.EmitCastToVoidPtr(NullVariable), CGF.Int8Ty, Align);
// Get and call the appropriate llvm.memcpy overload.
for (std::pair<CharUnits, CharUnits> Store : Stores) {
@@ -1244,10 +1245,10 @@ void CodeGenFunction::EmitNewArrayInitializer(
// Set up the current-element phi.
llvm::PHINode *CurPtrPhi =
- Builder.CreatePHI(CurPtr.getType(), 2, "array.cur");
+ Builder.CreatePHI(CurPtr.getType(), 2, "array.cur");
CurPtrPhi->addIncoming(CurPtr.getPointer(), EntryBB);
- CurPtr = Address(CurPtrPhi, ElementAlign);
+ CurPtr = Address(CurPtrPhi, CurPtr.getElementType(), ElementAlign);
// Store the new Cleanup position for irregular Cleanups.
if (EndOfInit.isValid())
@@ -1736,13 +1737,14 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
EmitNewInitializer(*this, E, allocType, elementTy, result, numElements,
allocSizeWithoutCookie);
+ llvm::Value *resultPtr = result.getPointer();
if (E->isArray()) {
// NewPtr is a pointer to the base element type. If we're
// allocating an array of arrays, we'll need to cast back to the
// array pointer type.
llvm::Type *resultType = ConvertTypeForMem(E->getType());
- if (result.getType() != resultType)
- result = Builder.CreateBitCast(result, resultType);
+ if (resultPtr->getType() != resultType)
+ resultPtr = Builder.CreateBitCast(resultPtr, resultType);
}
// Deactivate the 'operator delete' cleanup if we finished
@@ -1752,7 +1754,6 @@ llvm::Value *CodeGenFunction::EmitCXXNewExpr(const CXXNewExpr *E) {
cleanupDominator->eraseFromParent();
}
- llvm::Value *resultPtr = result.getPointer();
if (nullCheck) {
conditional.end(*this);
@@ -1796,7 +1797,8 @@ void CodeGenFunction::EmitDeleteCall(const FunctionDecl *DeleteFD,
CharUnits Align = CGM.getNaturalTypeAlignment(DDTag);
DestroyingDeleteTag = CreateTempAlloca(Ty, "destroying.delete.tag");
DestroyingDeleteTag->setAlignment(Align.getAsAlign());
- DeleteArgs.add(RValue::getAggregate(Address(DestroyingDeleteTag, Align)), DDTag);
+ DeleteArgs.add(
+ RValue::getAggregate(Address(DestroyingDeleteTag, Ty, Align)), DDTag);
}
// Pass the size if the delete function has a size_t parameter.
@@ -2101,7 +2103,7 @@ void CodeGenFunction::EmitCXXDeleteExpr(const CXXDeleteExpr *E) {
Ptr = Address(Builder.CreateInBoundsGEP(Ptr.getElementType(),
Ptr.getPointer(), GEP, "del.first"),
- Ptr.getAlignment());
+ ConvertTypeForMem(DeleteTy), Ptr.getAlignment());
}
assert(ConvertTypeForMem(DeleteTy) == Ptr.getElementType());
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGExprConstant.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGExprConstant.cpp
index dd2f8ac3eca4..7ff7c1102eab 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGExprConstant.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGExprConstant.cpp
@@ -439,22 +439,33 @@ llvm::Constant *ConstantAggregateBuilder::buildFrom(
// Can't emit as an array, carry on to emit as a struct.
}
+ // The size of the constant we plan to generate. This is usually just
+ // the size of the initialized type, but in AllowOversized mode (i.e.
+ // flexible array init), it can be larger.
CharUnits DesiredSize = Utils.getSize(DesiredTy);
+ if (Size > DesiredSize) {
+ assert(AllowOversized && "Elems are oversized");
+ DesiredSize = Size;
+ }
+
+ // The natural alignment of an unpacked LLVM struct with the given elements.
CharUnits Align = CharUnits::One();
for (llvm::Constant *C : Elems)
Align = std::max(Align, Utils.getAlignment(C));
+
+ // The natural size of an unpacked LLVM struct with the given elements.
CharUnits AlignedSize = Size.alignTo(Align);
bool Packed = false;
ArrayRef<llvm::Constant*> UnpackedElems = Elems;
llvm::SmallVector<llvm::Constant*, 32> UnpackedElemStorage;
- if ((DesiredSize < AlignedSize && !AllowOversized) ||
- DesiredSize.alignTo(Align) != DesiredSize) {
- // The natural layout would be the wrong size; force use of a packed layout.
+ if (DesiredSize < AlignedSize || DesiredSize.alignTo(Align) != DesiredSize) {
+ // The natural layout would be too big; force use of a packed layout.
NaturalLayout = false;
Packed = true;
} else if (DesiredSize > AlignedSize) {
- // The constant would be too small. Add padding to fix it.
+ // The natural layout would be too small. Add padding to fix it. (This
+ // is ignored if we choose a packed layout.)
UnpackedElemStorage.assign(Elems.begin(), Elems.end());
UnpackedElemStorage.push_back(Utils.getPadding(DesiredSize - Size));
UnpackedElems = UnpackedElemStorage;
@@ -482,7 +493,7 @@ llvm::Constant *ConstantAggregateBuilder::buildFrom(
// If we're using the packed layout, pad it out to the desired size if
// necessary.
if (Packed) {
- assert((SizeSoFar <= DesiredSize || AllowOversized) &&
+ assert(SizeSoFar <= DesiredSize &&
"requested size is too small for contents");
if (SizeSoFar < DesiredSize)
PackedElems.push_back(Utils.getPadding(DesiredSize - SizeSoFar));
@@ -692,8 +703,8 @@ bool ConstStructBuilder::Build(InitListExpr *ILE, bool AllowOverwrite) {
!declaresSameEntity(ILE->getInitializedFieldInUnion(), Field))
continue;
- // Don't emit anonymous bitfields or zero-sized fields.
- if (Field->isUnnamedBitfield() || Field->isZeroSize(CGM.getContext()))
+ // Don't emit anonymous bitfields.
+ if (Field->isUnnamedBitfield())
continue;
// Get the initializer. A struct can include fields without initializers,
@@ -704,6 +715,14 @@ bool ConstStructBuilder::Build(InitListExpr *ILE, bool AllowOverwrite) {
if (Init && isa<NoInitExpr>(Init))
continue;
+ // Zero-sized fields are not emitted, but their initializers may still
+ // prevent emission of this struct as a constant.
+ if (Field->isZeroSize(CGM.getContext())) {
+ if (Init->HasSideEffects(CGM.getContext()))
+ return false;
+ continue;
+ }
+
// When emitting a DesignatedInitUpdateExpr, a nested InitListExpr
// represents additional overwriting of our current constant value, and not
// a new constant to emit independently.
@@ -1092,7 +1111,16 @@ public:
destAS, destTy);
}
- case CK_LValueToRValue:
+ case CK_LValueToRValue: {
+ // We don't really support doing lvalue-to-rvalue conversions here; any
+ // interesting conversions should be done in Evaluate(). But as a
+ // special case, allow compound literals to support the gcc extension
+ // allowing "struct x {int x;} x = (struct x) {};".
+ if (auto *E = dyn_cast<CompoundLiteralExpr>(subExpr->IgnoreParens()))
+ return Visit(E->getInitializer(), destType);
+ return nullptr;
+ }
+
case CK_AtomicToNonAtomic:
case CK_NonAtomicToAtomic:
case CK_NoOp:
@@ -1909,6 +1937,9 @@ ConstantLValueEmitter::tryEmitBase(const APValue::LValueBase &base) {
if (auto *GD = dyn_cast<MSGuidDecl>(D))
return CGM.GetAddrOfMSGuidDecl(GD);
+ if (auto *GCD = dyn_cast<UnnamedGlobalConstantDecl>(D))
+ return CGM.GetAddrOfUnnamedGlobalConstantDecl(GCD);
+
if (auto *TPO = dyn_cast<TemplateParamObjectDecl>(D))
return CGM.GetAddrOfTemplateParamObject(TPO);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGExprScalar.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGExprScalar.cpp
index 4e8933fffe03..b150aaa376eb 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGExprScalar.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGExprScalar.cpp
@@ -32,6 +32,7 @@
#include "llvm/IR/CFG.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DataLayout.h"
+#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/FixedPointBuilder.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GetElementPtrTypeIterator.h"
@@ -40,6 +41,7 @@
#include "llvm/IR/IntrinsicsPowerPC.h"
#include "llvm/IR/MatrixBuilder.h"
#include "llvm/IR/Module.h"
+#include "llvm/Support/TypeSize.h"
#include <cstdarg>
using namespace clang;
@@ -65,20 +67,14 @@ bool mayHaveIntegerOverflow(llvm::ConstantInt *LHS, llvm::ConstantInt *RHS,
const auto &LHSAP = LHS->getValue();
const auto &RHSAP = RHS->getValue();
if (Opcode == BO_Add) {
- if (Signed)
- Result = LHSAP.sadd_ov(RHSAP, Overflow);
- else
- Result = LHSAP.uadd_ov(RHSAP, Overflow);
+ Result = Signed ? LHSAP.sadd_ov(RHSAP, Overflow)
+ : LHSAP.uadd_ov(RHSAP, Overflow);
} else if (Opcode == BO_Sub) {
- if (Signed)
- Result = LHSAP.ssub_ov(RHSAP, Overflow);
- else
- Result = LHSAP.usub_ov(RHSAP, Overflow);
+ Result = Signed ? LHSAP.ssub_ov(RHSAP, Overflow)
+ : LHSAP.usub_ov(RHSAP, Overflow);
} else if (Opcode == BO_Mul) {
- if (Signed)
- Result = LHSAP.smul_ov(RHSAP, Overflow);
- else
- Result = LHSAP.umul_ov(RHSAP, Overflow);
+ Result = Signed ? LHSAP.smul_ov(RHSAP, Overflow)
+ : LHSAP.umul_ov(RHSAP, Overflow);
} else if (Opcode == BO_Div || Opcode == BO_Rem) {
if (Signed && !RHS->isZero())
Result = LHSAP.sdiv_ov(RHSAP, Overflow);
@@ -172,7 +168,7 @@ static llvm::Optional<QualType> getUnwidenedIntegerType(const ASTContext &Ctx,
/// Check if \p E is a widened promoted integer.
static bool IsWidenedIntegerOp(const ASTContext &Ctx, const Expr *E) {
- return getUnwidenedIntegerType(Ctx, E).hasValue();
+ return getUnwidenedIntegerType(Ctx, E).has_value();
}
/// Check if we can skip the overflow check for \p Op.
@@ -427,7 +423,8 @@ public:
if (Value *Result = ConstantEmitter(CGF).tryEmitConstantExpr(E)) {
if (E->isGLValue())
return CGF.Builder.CreateLoad(Address(
- Result, CGF.getContext().getTypeAlignInChars(E->getType())));
+ Result, CGF.ConvertTypeForMem(E->getType()),
+ CGF.getContext().getTypeAlignInChars(E->getType())));
return Result;
}
return Visit(E->getSubExpr());
@@ -731,7 +728,7 @@ public:
}
if (Ops.Ty->isConstantMatrixType()) {
- llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
+ llvm::MatrixBuilder MB(Builder);
// We need to check the types of the operands of the operator to get the
// correct matrix dimensions.
auto *BO = cast<BinaryOperator>(Ops.E);
@@ -1393,8 +1390,8 @@ Value *ScalarExprEmitter::EmitScalarConversion(Value *Src, QualType SrcType,
if (isa<llvm::VectorType>(SrcTy) || isa<llvm::VectorType>(DstTy)) {
// Allow bitcast from vector to integer/fp of the same size.
- unsigned SrcSize = SrcTy->getPrimitiveSizeInBits();
- unsigned DstSize = DstTy->getPrimitiveSizeInBits();
+ llvm::TypeSize SrcSize = SrcTy->getPrimitiveSizeInBits();
+ llvm::TypeSize DstSize = DstTy->getPrimitiveSizeInBits();
if (SrcSize == DstSize)
return Builder.CreateBitCast(Src, DstTy, "conv");
@@ -1606,7 +1603,7 @@ ScalarExprEmitter::VisitSYCLUniqueStableNameExpr(SYCLUniqueStableNameExpr *E) {
Context.getTargetInfo().getConstantAddressSpace();
llvm::Constant *GlobalConstStr = Builder.CreateGlobalStringPtr(
E->ComputeName(Context), "__usn_str",
- static_cast<unsigned>(GlobalAS.getValueOr(LangAS::Default)));
+ static_cast<unsigned>(GlobalAS.value_or(LangAS::Default)));
unsigned ExprAS = Context.getTargetAddressSpace(E->getType());
@@ -1770,7 +1767,8 @@ Value *ScalarExprEmitter::VisitArraySubscriptExpr(ArraySubscriptExpr *E) {
// loads the lvalue formed by the subscript expr. However, we have to be
// careful, because the base of a vector subscript is occasionally an rvalue,
// so we can't get it as an lvalue.
- if (!E->getBase()->getType()->isVectorType())
+ if (!E->getBase()->getType()->isVectorType() &&
+ !E->getBase()->getType()->isVLSTBuiltinType())
return EmitLoadOfLValue(E);
// Handle the vector case. The base must be a vector, the index must be an
@@ -1795,7 +1793,7 @@ Value *ScalarExprEmitter::VisitMatrixSubscriptExpr(MatrixSubscriptExpr *E) {
const auto *MatrixTy = E->getBase()->getType()->castAs<ConstantMatrixType>();
unsigned NumRows = MatrixTy->getNumRows();
- llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
+ llvm::MatrixBuilder MB(Builder);
Value *Idx = MB.CreateIndex(RowIdx, ColumnIdx, NumRows);
if (CGF.CGM.getCodeGenOpts().OptimizationLevel > 0)
MB.CreateIndexAssumption(Idx, MatrixTy->getNumElementsFlattened());
@@ -2045,11 +2043,16 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
}
if (CGF.SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
- if (auto PT = DestTy->getAs<PointerType>())
- CGF.EmitVTablePtrCheckForCast(PT->getPointeeType(), Src,
- /*MayBeNull=*/true,
- CodeGenFunction::CFITCK_UnrelatedCast,
- CE->getBeginLoc());
+ if (auto *PT = DestTy->getAs<PointerType>()) {
+ CGF.EmitVTablePtrCheckForCast(
+ PT->getPointeeType(),
+ Address(Src,
+ CGF.ConvertTypeForMem(
+ E->getType()->castAs<PointerType>()->getPointeeType()),
+ CGF.getPointerAlign()),
+ /*MayBeNull=*/true, CodeGenFunction::CFITCK_UnrelatedCast,
+ CE->getBeginLoc());
+ }
}
if (CGF.CGM.getCodeGenOpts().StrictVTablePointers) {
@@ -2081,8 +2084,8 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
}
// If Src is a fixed vector and Dst is a scalable vector, and both have the
- // same element type, use the llvm.experimental.vector.insert intrinsic to
- // perform the bitcast.
+ // same element type, use the llvm.vector.insert intrinsic to perform the
+ // bitcast.
if (const auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
if (const auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(DstTy)) {
// If we are casting a fixed i8 vector to a scalable 16 x i1 predicate
@@ -2093,7 +2096,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
if (ScalableDst == PredType &&
FixedSrc->getElementType() == Builder.getInt8Ty()) {
DstTy = llvm::ScalableVectorType::get(Builder.getInt8Ty(), 2);
- ScalableDst = dyn_cast<llvm::ScalableVectorType>(DstTy);
+ ScalableDst = cast<llvm::ScalableVectorType>(DstTy);
NeedsBitCast = true;
}
if (FixedSrc->getElementType() == ScalableDst->getElementType()) {
@@ -2109,8 +2112,8 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
}
// If Src is a scalable vector and Dst is a fixed vector, and both have the
- // same element type, use the llvm.experimental.vector.extract intrinsic to
- // perform the bitcast.
+ // same element type, use the llvm.vector.extract intrinsic to perform the
+ // bitcast.
if (const auto *ScalableSrc = dyn_cast<llvm::ScalableVectorType>(SrcTy)) {
if (const auto *FixedDst = dyn_cast<llvm::FixedVectorType>(DstTy)) {
// If we are casting a scalable 16 x i1 predicate vector to a fixed i8
@@ -2119,7 +2122,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
if (ScalableSrc == PredType &&
FixedDst->getElementType() == Builder.getInt8Ty()) {
SrcTy = llvm::ScalableVectorType::get(Builder.getInt8Ty(), 2);
- ScalableSrc = dyn_cast<llvm::ScalableVectorType>(SrcTy);
+ ScalableSrc = cast<llvm::ScalableVectorType>(SrcTy);
Src = Builder.CreateBitCast(Src, SrcTy);
}
if (ScalableSrc->getElementType() == FixedDst->getElementType()) {
@@ -2148,7 +2151,6 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
return EmitLoadOfLValue(DestLV, CE->getExprLoc());
}
-
return Builder.CreateBitCast(Src, DstTy);
}
case CK_AddressSpaceConversion: {
@@ -2204,10 +2206,10 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
Derived.getPointer(), DestTy->getPointeeType());
if (CGF.SanOpts.has(SanitizerKind::CFIDerivedCast))
- CGF.EmitVTablePtrCheckForCast(
- DestTy->getPointeeType(), Derived.getPointer(),
- /*MayBeNull=*/true, CodeGenFunction::CFITCK_DerivedCast,
- CE->getBeginLoc());
+ CGF.EmitVTablePtrCheckForCast(DestTy->getPointeeType(), Derived,
+ /*MayBeNull=*/true,
+ CodeGenFunction::CFITCK_DerivedCast,
+ CE->getBeginLoc());
return Derived.getPointer();
}
@@ -2331,9 +2333,10 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
}
case CK_VectorSplat: {
llvm::Type *DstTy = ConvertType(DestTy);
- Value *Elt = Visit(const_cast<Expr*>(E));
+ Value *Elt = Visit(const_cast<Expr *>(E));
// Splat the element across to all elements
- unsigned NumElements = cast<llvm::FixedVectorType>(DstTy)->getNumElements();
+ llvm::ElementCount NumElements =
+ cast<llvm::VectorType>(DstTy)->getElementCount();
return Builder.CreateVectorSplat(NumElements, Elt, "splat");
}
@@ -2643,7 +2646,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
= CGF.getContext().getAsVariableArrayType(type)) {
llvm::Value *numElts = CGF.getVLASize(vla).NumElts;
if (!isInc) numElts = Builder.CreateNSWNeg(numElts, "vla.negsize");
- llvm::Type *elemTy = value->getType()->getPointerElementType();
+ llvm::Type *elemTy = CGF.ConvertTypeForMem(vla->getElementType());
if (CGF.getLangOpts().isSignedOverflowDefined())
value = Builder.CreateGEP(elemTy, value, numElts, "vla.inc");
else
@@ -2949,8 +2952,8 @@ Value *ScalarExprEmitter::VisitOffsetOfExpr(OffsetOfExpr *E) {
CurrentType = ON.getBase()->getType();
// Compute the offset to the base.
- const RecordType *BaseRT = CurrentType->getAs<RecordType>();
- CXXRecordDecl *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl());
+ auto *BaseRT = CurrentType->castAs<RecordType>();
+ auto *BaseRD = cast<CXXRecordDecl>(BaseRT->getDecl());
CharUnits OffsetInt = RL.getBaseClassOffset(BaseRD);
Offset = llvm::ConstantInt::get(ResultType, OffsetInt.getQuantity());
break;
@@ -3263,7 +3266,7 @@ Value *ScalarExprEmitter::EmitDiv(const BinOpInfo &Ops) {
}
if (Ops.Ty->isConstantMatrixType()) {
- llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
+ llvm::MatrixBuilder MB(Builder);
// We need to check the types of the operands of the operator to get the
// correct matrix dimensions.
auto *BO = cast<BinaryOperator>(Ops.E);
@@ -3521,7 +3524,7 @@ static Value *emitPointerArithmetic(CodeGenFunction &CGF,
// GEP indexes are signed, and scaling an index isn't permitted to
// signed-overflow, so we use the same semantics for our explicit
// multiply. We suppress this if overflow is not undefined behavior.
- llvm::Type *elemTy = pointer->getType()->getPointerElementType();
+ llvm::Type *elemTy = CGF.ConvertTypeForMem(vla->getElementType());
if (CGF.getLangOpts().isSignedOverflowDefined()) {
index = CGF.Builder.CreateMul(index, numElements, "vla.index");
pointer = CGF.Builder.CreateGEP(elemTy, pointer, index, "add.ptr");
@@ -3657,7 +3660,7 @@ Value *ScalarExprEmitter::EmitAdd(const BinOpInfo &op) {
}
if (op.Ty->isConstantMatrixType()) {
- llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
+ llvm::MatrixBuilder MB(Builder);
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
return MB.CreateAdd(op.LHS, op.RHS);
}
@@ -3807,7 +3810,7 @@ Value *ScalarExprEmitter::EmitSub(const BinOpInfo &op) {
}
if (op.Ty->isConstantMatrixType()) {
- llvm::MatrixBuilder<CGBuilderTy> MB(Builder);
+ llvm::MatrixBuilder MB(Builder);
CodeGenFunction::CGFPOptionsRAII FPOptsRAII(CGF, op.FPFeatures);
return MB.CreateSub(op.LHS, op.RHS);
}
@@ -4639,7 +4642,8 @@ VisitAbstractConditionalOperator(const AbstractConditionalOperator *E) {
return tmp5;
}
- if (condExpr->getType()->isVectorType()) {
+ if (condExpr->getType()->isVectorType() ||
+ condExpr->getType()->isVLSTBuiltinType()) {
CGF.incrementProfileCounter(E);
llvm::Value *CondV = CGF.EmitScalarExpr(condExpr);
@@ -4819,6 +4823,10 @@ Value *ScalarExprEmitter::VisitAsTypeExpr(AsTypeExpr *E) {
? cast<llvm::FixedVectorType>(DstTy)->getNumElements()
: 0;
+ // Use bit vector expansion for ext_vector_type boolean vectors.
+ if (E->getType()->isExtVectorBoolType())
+ return CGF.emitBoolVecConversion(Src, NumElementsDst, "astype");
+
// Going from vec3 to non-vec3 is a special case and requires a shuffle
// vector to get a vec4, then a bitcast if the target type is different.
if (NumElementsSrc == 3 && NumElementsDst != 3) {
@@ -4902,7 +4910,9 @@ LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
Expr *BaseExpr = E->getBase();
Address Addr = Address::invalid();
if (BaseExpr->isPRValue()) {
- Addr = Address(EmitScalarExpr(BaseExpr), getPointerAlign());
+ llvm::Type *BaseTy =
+ ConvertTypeForMem(BaseExpr->getType()->getPointeeType());
+ Addr = Address(EmitScalarExpr(BaseExpr), BaseTy, getPointerAlign());
} else {
Addr = EmitLValue(BaseExpr).getAddress(*this);
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGHLSLRuntime.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGHLSLRuntime.cpp
new file mode 100644
index 000000000000..7dfcc65969a8
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGHLSLRuntime.cpp
@@ -0,0 +1,52 @@
+//===----- CGHLSLRuntime.cpp - Interface to HLSL Runtimes -----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for HLSL code generation. Concrete
+// subclasses of this implement code generation for specific HLSL
+// runtime libraries.
+//
+//===----------------------------------------------------------------------===//
+
+#include "CGHLSLRuntime.h"
+#include "CodeGenModule.h"
+#include "clang/Basic/TargetOptions.h"
+#include "llvm/IR/Metadata.h"
+#include "llvm/IR/Module.h"
+
+using namespace clang;
+using namespace CodeGen;
+using namespace llvm;
+
+namespace {
+void addDxilValVersion(StringRef ValVersionStr, llvm::Module &M) {
+ // The validation of ValVersionStr is done at HLSLToolChain::TranslateArgs.
+ // Assume ValVersionStr is legal here.
+ VersionTuple Version;
+ if (Version.tryParse(ValVersionStr) || Version.getBuild() ||
+ Version.getSubminor() || !Version.getMinor()) {
+ return;
+ }
+
+ uint64_t Major = Version.getMajor();
+ uint64_t Minor = *Version.getMinor();
+
+ auto &Ctx = M.getContext();
+ IRBuilder<> B(M.getContext());
+ MDNode *Val = MDNode::get(Ctx, {ConstantAsMetadata::get(B.getInt32(Major)),
+ ConstantAsMetadata::get(B.getInt32(Minor))});
+ StringRef DxilValKey = "dx.valver";
+ M.addModuleFlag(llvm::Module::ModFlagBehavior::AppendUnique, DxilValKey, Val);
+}
+} // namespace
+
+void CGHLSLRuntime::finishCodeGen() {
+ auto &TargetOpts = CGM.getTarget().getTargetOpts();
+
+ llvm::Module &M = CGM.getModule();
+ addDxilValVersion(TargetOpts.DxilValidatorVersion, M);
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGHLSLRuntime.h b/contrib/llvm-project/clang/lib/CodeGen/CGHLSLRuntime.h
new file mode 100644
index 000000000000..268810f2ec9e
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGHLSLRuntime.h
@@ -0,0 +1,38 @@
+//===----- CGHLSLRuntime.h - Interface to HLSL Runtimes -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This provides an abstract class for HLSL code generation. Concrete
+// subclasses of this implement code generation for specific HLSL
+// runtime libraries.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_CODEGEN_CGHLSLRUNTIME_H
+#define LLVM_CLANG_LIB_CODEGEN_CGHLSLRUNTIME_H
+
+namespace clang {
+
+namespace CodeGen {
+
+class CodeGenModule;
+
+class CGHLSLRuntime {
+protected:
+ CodeGenModule &CGM;
+
+public:
+ CGHLSLRuntime(CodeGenModule &CGM) : CGM(CGM) {}
+ virtual ~CGHLSLRuntime() {}
+
+ void finishCodeGen();
+};
+
+} // namespace CodeGen
+} // namespace clang
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGNonTrivialStruct.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGNonTrivialStruct.cpp
index e3b0e069b830..0abf39ad1f28 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGNonTrivialStruct.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGNonTrivialStruct.cpp
@@ -315,8 +315,7 @@ static const CGFunctionInfo &getFunctionInfo(CodeGenModule &CGM,
Ctx, nullptr, SourceLocation(), &Ctx.Idents.get(ValNameStr[I]), ParamTy,
ImplicitParamDecl::Other));
- for (auto &P : Params)
- Args.push_back(P);
+ llvm::append_range(Args, Params);
return CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args);
}
@@ -326,9 +325,9 @@ static std::array<Address, N> getParamAddrs(std::index_sequence<Ints...> IntSeq,
std::array<CharUnits, N> Alignments,
FunctionArgList Args,
CodeGenFunction *CGF) {
- return std::array<Address, N>{{
- Address(CGF->Builder.CreateLoad(CGF->GetAddrOfLocalVar(Args[Ints])),
- Alignments[Ints])...}};
+ return std::array<Address, N>{
+ {Address(CGF->Builder.CreateLoad(CGF->GetAddrOfLocalVar(Args[Ints])),
+ CGF->VoidPtrTy, Alignments[Ints])...}};
}
// Template classes that are used as bases for classes that emit special
@@ -400,8 +399,9 @@ template <class Derived> struct GenFuncBase {
std::array<Address, N> NewAddrs = Addrs;
for (unsigned I = 0; I < N; ++I)
- NewAddrs[I] = Address(
- PHIs[I], StartAddrs[I].getAlignment().alignmentAtOffset(EltSize));
+ NewAddrs[I] =
+ Address(PHIs[I], CGF.Int8PtrTy,
+ StartAddrs[I].getAlignment().alignmentAtOffset(EltSize));
EltQT = IsVolatile ? EltQT.withVolatile() : EltQT;
this->asDerived().visitWithKind(FK, EltQT, nullptr, CharUnits::Zero(),
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp
index 8cc609186f9e..1b6acb2b7212 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGObjC.cpp
@@ -92,8 +92,9 @@ CodeGenFunction::EmitObjCBoxedExpr(const ObjCBoxedExpr *E) {
// and cast value to correct type
Address Temporary = CreateMemTemp(SubExpr->getType());
EmitAnyExprToMem(SubExpr, Temporary, Qualifiers(), /*isInit*/ true);
- Address BitCast = Builder.CreateBitCast(Temporary, ConvertType(ArgQT));
- Args.add(RValue::get(BitCast.getPointer()), ArgQT);
+ llvm::Value *BitCast =
+ Builder.CreateBitCast(Temporary.getPointer(), ConvertType(ArgQT));
+ Args.add(RValue::get(BitCast), ArgQT);
// Create char array to store type encoding
std::string Str;
@@ -441,7 +442,7 @@ CodeGen::RValue CGObjCRuntime::GeneratePossiblySpecializedMessageSend(
if (Optional<llvm::Value *> SpecializedResult =
tryGenerateSpecializedMessageSend(CGF, ResultType, Receiver, Args,
Sel, Method, isClassMessage)) {
- return RValue::get(SpecializedResult.getValue());
+ return RValue::get(*SpecializedResult);
}
return GenerateMessageSend(CGF, Return, ResultType, Sel, Receiver, Args, OID,
Method);
@@ -816,19 +817,20 @@ static void emitStructGetterCall(CodeGenFunction &CGF, ObjCIvarDecl *ivar,
bool isAtomic, bool hasStrong) {
ASTContext &Context = CGF.getContext();
- Address src =
+ llvm::Value *src =
CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), CGF.LoadObjCSelf(), ivar, 0)
- .getAddress(CGF);
+ .getPointer(CGF);
// objc_copyStruct (ReturnValue, &structIvar,
// sizeof (Type of Ivar), isAtomic, false);
CallArgList args;
- Address dest = CGF.Builder.CreateBitCast(CGF.ReturnValue, CGF.VoidPtrTy);
- args.add(RValue::get(dest.getPointer()), Context.VoidPtrTy);
+ llvm::Value *dest =
+ CGF.Builder.CreateBitCast(CGF.ReturnValue.getPointer(), CGF.VoidPtrTy);
+ args.add(RValue::get(dest), Context.VoidPtrTy);
src = CGF.Builder.CreateBitCast(src, CGF.VoidPtrTy);
- args.add(RValue::get(src.getPointer()), Context.VoidPtrTy);
+ args.add(RValue::get(src), Context.VoidPtrTy);
CharUnits size = CGF.getContext().getTypeSizeInChars(ivar->getType());
args.add(RValue::get(CGF.CGM.getSize(size)), Context.getSizeType());
@@ -1149,11 +1151,10 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
// types, so there's no point in trying to pick a prettier type.
uint64_t ivarSize = getContext().toBits(strategy.getIvarSize());
llvm::Type *bitcastType = llvm::Type::getIntNTy(getLLVMContext(), ivarSize);
- bitcastType = bitcastType->getPointerTo(); // addrspace 0 okay
// Perform an atomic load. This does not impose ordering constraints.
Address ivarAddr = LV.getAddress(*this);
- ivarAddr = Builder.CreateBitCast(ivarAddr, bitcastType);
+ ivarAddr = Builder.CreateElementBitCast(ivarAddr, bitcastType);
llvm::LoadInst *load = Builder.CreateLoad(ivarAddr, "load");
load->setAtomic(llvm::AtomicOrdering::Unordered);
@@ -1164,12 +1165,11 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
uint64_t retTySize = CGM.getDataLayout().getTypeSizeInBits(retTy);
llvm::Value *ivarVal = load;
if (ivarSize > retTySize) {
- llvm::Type *newTy = llvm::Type::getIntNTy(getLLVMContext(), retTySize);
- ivarVal = Builder.CreateTrunc(load, newTy);
- bitcastType = newTy->getPointerTo();
+ bitcastType = llvm::Type::getIntNTy(getLLVMContext(), retTySize);
+ ivarVal = Builder.CreateTrunc(load, bitcastType);
}
Builder.CreateStore(ivarVal,
- Builder.CreateBitCast(ReturnValue, bitcastType));
+ Builder.CreateElementBitCast(ReturnValue, bitcastType));
// Make sure we don't do an autorelease.
AutoreleaseResult = false;
@@ -1911,8 +1911,7 @@ void CodeGenFunction::EmitObjCForCollectionStmt(const ObjCForCollectionStmt &S){
// Fetch the value at the current index from the buffer.
llvm::Value *CurrentItemPtr = Builder.CreateGEP(
- EnumStateItems->getType()->getPointerElementType(), EnumStateItems, index,
- "currentitem.ptr");
+ ObjCIdType, EnumStateItems, index, "currentitem.ptr");
llvm::Value *CurrentItem =
Builder.CreateAlignedLoad(ObjCIdType, CurrentItemPtr, getPointerAlign());
@@ -2156,7 +2155,7 @@ static llvm::Value *emitARCLoadOperation(CodeGenFunction &CGF, Address addr,
// Cast the argument to 'id*'.
llvm::Type *origType = addr.getElementType();
- addr = CGF.Builder.CreateBitCast(addr, CGF.Int8PtrPtrTy);
+ addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8PtrTy);
// Call the function.
llvm::Value *result = CGF.EmitNounwindRuntimeCall(fn, addr.getPointer());
@@ -2610,7 +2609,7 @@ void CodeGenFunction::EmitARCDestroyWeak(Address addr) {
fn = getARCIntrinsic(llvm::Intrinsic::objc_destroyWeak, CGM);
// Cast the argument to 'id*'.
- addr = Builder.CreateBitCast(addr, Int8PtrPtrTy);
+ addr = Builder.CreateElementBitCast(addr, Int8PtrTy);
EmitNounwindRuntimeCall(fn, addr.getPointer());
}
@@ -3845,15 +3844,14 @@ CodeGenFunction::GenerateObjCAtomicGetterCopyHelperFunction(
SourceLocation());
RValue DV = EmitAnyExpr(&DstExpr);
- CharUnits Alignment
- = getContext().getTypeAlignInChars(TheCXXConstructExpr->getType());
+ CharUnits Alignment =
+ getContext().getTypeAlignInChars(TheCXXConstructExpr->getType());
EmitAggExpr(TheCXXConstructExpr,
- AggValueSlot::forAddr(Address(DV.getScalarVal(), Alignment),
- Qualifiers(),
- AggValueSlot::IsDestructed,
- AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased,
- AggValueSlot::DoesNotOverlap));
+ AggValueSlot::forAddr(
+ Address(DV.getScalarVal(), ConvertTypeForMem(Ty), Alignment),
+ Qualifiers(), AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased, AggValueSlot::DoesNotOverlap));
FinishFunction();
HelperFn = llvm::ConstantExpr::getBitCast(Fn, VoidPtrTy);
@@ -3897,6 +3895,8 @@ static unsigned getBaseMachOPlatformID(const llvm::Triple &TT) {
return llvm::MachO::PLATFORM_TVOS;
case llvm::Triple::WatchOS:
return llvm::MachO::PLATFORM_WATCHOS;
+ case llvm::Triple::DriverKit:
+ return llvm::MachO::PLATFORM_DRIVERKIT;
default:
return /*Unknown platform*/ 0;
}
@@ -3915,8 +3915,8 @@ static llvm::Value *emitIsPlatformVersionAtLeast(CodeGenFunction &CGF,
Args.push_back(
llvm::ConstantInt::get(CGM.Int32Ty, getBaseMachOPlatformID(TT)));
Args.push_back(llvm::ConstantInt::get(CGM.Int32Ty, Version.getMajor()));
- Args.push_back(llvm::ConstantInt::get(CGM.Int32Ty, Min.getValueOr(0)));
- Args.push_back(llvm::ConstantInt::get(CGM.Int32Ty, SMin.getValueOr(0)));
+ Args.push_back(llvm::ConstantInt::get(CGM.Int32Ty, Min.value_or(0)));
+ Args.push_back(llvm::ConstantInt::get(CGM.Int32Ty, SMin.value_or(0)));
};
assert(!Version.empty() && "unexpected empty version");
@@ -3952,9 +3952,8 @@ CodeGenFunction::EmitBuiltinAvailable(const VersionTuple &Version) {
Optional<unsigned> Min = Version.getMinor(), SMin = Version.getSubminor();
llvm::Value *Args[] = {
llvm::ConstantInt::get(CGM.Int32Ty, Version.getMajor()),
- llvm::ConstantInt::get(CGM.Int32Ty, Min.getValueOr(0)),
- llvm::ConstantInt::get(CGM.Int32Ty, SMin.getValueOr(0))
- };
+ llvm::ConstantInt::get(CGM.Int32Ty, Min.value_or(0)),
+ llvm::ConstantInt::get(CGM.Int32Ty, SMin.value_or(0))};
llvm::Value *CallRes =
EmitNounwindRuntimeCall(CGM.IsOSVersionAtLeastFn, Args);
@@ -3977,6 +3976,9 @@ static bool isFoundationNeededForDarwinAvailabilityCheck(
case llvm::Triple::MacOSX:
FoundationDroppedInVersion = VersionTuple(/*Major=*/10, /*Minor=*/15);
break;
+ case llvm::Triple::DriverKit:
+ // DriverKit doesn't need Foundation.
+ return false;
default:
llvm_unreachable("Unexpected OS");
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp
index 52b449090868..ec459f07f307 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGObjCGNU.cpp
@@ -107,6 +107,8 @@ protected:
/// SEL is included in a header somewhere, in which case it will be whatever
/// type is declared in that header, most likely {i8*, i8*}.
llvm::PointerType *SelectorTy;
+ /// Element type of SelectorTy.
+ llvm::Type *SelectorElemTy;
/// LLVM i8 type. Cached here to avoid repeatedly getting it in all of the
/// places where it's used
llvm::IntegerType *Int8Ty;
@@ -128,6 +130,8 @@ protected:
/// but if the runtime header declaring it is included then it may be a
/// pointer to a structure.
llvm::PointerType *IdTy;
+ /// Element type of IdTy.
+ llvm::Type *IdElemTy;
/// Pointer to a pointer to an Objective-C object. Used in the new ABI
/// message lookup function and some GC-related functions.
llvm::PointerType *PtrToIdTy;
@@ -313,12 +317,9 @@ protected:
/// Ensures that the value has the required type, by inserting a bitcast if
/// required. This function lets us avoid inserting bitcasts that are
/// redundant.
- llvm::Value* EnforceType(CGBuilderTy &B, llvm::Value *V, llvm::Type *Ty) {
- if (V->getType() == Ty) return V;
- return B.CreateBitCast(V, Ty);
- }
- Address EnforceType(CGBuilderTy &B, Address V, llvm::Type *Ty) {
- if (V.getType() == Ty) return V;
+ llvm::Value *EnforceType(CGBuilderTy &B, llvm::Value *V, llvm::Type *Ty) {
+ if (V->getType() == Ty)
+ return V;
return B.CreateBitCast(V, Ty);
}
@@ -700,8 +701,8 @@ protected:
llvm::Value *LookupIMPSuper(CodeGenFunction &CGF, Address ObjCSuper,
llvm::Value *cmd, MessageSendInfo &MSI) override {
CGBuilderTy &Builder = CGF.Builder;
- llvm::Value *lookupArgs[] = {EnforceType(Builder, ObjCSuper,
- PtrToObjCSuperTy).getPointer(), cmd};
+ llvm::Value *lookupArgs[] = {
+ EnforceType(Builder, ObjCSuper.getPointer(), PtrToObjCSuperTy), cmd};
return CGF.EmitNounwindRuntimeCall(MsgLookupSuperFn, lookupArgs);
}
@@ -978,9 +979,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
// Look for an existing one
llvm::StringMap<llvm::Constant*>::iterator old = ObjCStrings.find(Str);
if (old != ObjCStrings.end())
- return ConstantAddress(
- old->getValue(), old->getValue()->getType()->getPointerElementType(),
- Align);
+ return ConstantAddress(old->getValue(), IdElemTy, Align);
bool isNonASCII = SL->containsNonAscii();
@@ -1002,7 +1001,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
auto *ObjCStr = llvm::ConstantExpr::getIntToPtr(
llvm::ConstantInt::get(Int64Ty, str), IdTy);
ObjCStrings[Str] = ObjCStr;
- return ConstantAddress(ObjCStr, IdTy->getPointerElementType(), Align);
+ return ConstantAddress(ObjCStr, IdElemTy, Align);
}
StringRef StringClass = CGM.getLangOpts().ObjCConstantStringClass;
@@ -1116,7 +1115,7 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
llvm::Constant *ObjCStr = llvm::ConstantExpr::getBitCast(ObjCStrGV, IdTy);
ObjCStrings[Str] = ObjCStr;
ConstantStrings.push_back(ObjCStr);
- return ConstantAddress(ObjCStr, IdTy->getPointerElementType(), Align);
+ return ConstantAddress(ObjCStr, IdElemTy, Align);
}
void PushProperty(ConstantArrayBuilder &PropertiesArray,
@@ -1208,8 +1207,10 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
llvm::Value *cmd, MessageSendInfo &MSI) override {
// Don't access the slot unless we're trying to cache the result.
CGBuilderTy &Builder = CGF.Builder;
- llvm::Value *lookupArgs[] = {CGObjCGNU::EnforceType(Builder, ObjCSuper,
- PtrToObjCSuperTy).getPointer(), cmd};
+ llvm::Value *lookupArgs[] = {CGObjCGNU::EnforceType(Builder,
+ ObjCSuper.getPointer(),
+ PtrToObjCSuperTy),
+ cmd};
return CGF.EmitNounwindRuntimeCall(MsgLookupSuperFn, lookupArgs);
}
@@ -1263,8 +1264,8 @@ class CGObjCGNUstep2 : public CGObjCGNUstep {
llvm::Value *GetClassNamed(CodeGenFunction &CGF,
const std::string &Name,
bool isWeak) override {
- return CGF.Builder.CreateLoad(Address(GetClassVar(Name, isWeak),
- CGM.getPointerAlign()));
+ return CGF.Builder.CreateLoad(
+ Address(GetClassVar(Name, isWeak), IdTy, CGM.getPointerAlign()));
}
int32_t FlagsForOwnership(Qualifiers::ObjCLifetime Ownership) {
// typedef enum {
@@ -2170,8 +2171,10 @@ CGObjCGNU::CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
QualType selTy = CGM.getContext().getObjCSelType();
if (QualType() == selTy) {
SelectorTy = PtrToInt8Ty;
+ SelectorElemTy = Int8Ty;
} else {
SelectorTy = cast<llvm::PointerType>(CGM.getTypes().ConvertType(selTy));
+ SelectorElemTy = CGM.getTypes().ConvertTypeForMem(selTy->getPointeeType());
}
PtrToIntTy = llvm::PointerType::getUnqual(IntTy);
@@ -2189,8 +2192,11 @@ CGObjCGNU::CGObjCGNU(CodeGenModule &cgm, unsigned runtimeABIVersion,
if (UnqualIdTy != QualType()) {
ASTIdTy = CGM.getContext().getCanonicalType(UnqualIdTy);
IdTy = cast<llvm::PointerType>(CGM.getTypes().ConvertType(ASTIdTy));
+ IdElemTy = CGM.getTypes().ConvertTypeForMem(
+ ASTIdTy.getTypePtr()->getPointeeType());
} else {
IdTy = PtrToInt8Ty;
+ IdElemTy = Int8Ty;
}
PtrToIdTy = llvm::PointerType::getUnqual(IdTy);
ProtocolTy = llvm::StructType::get(IdTy,
@@ -2347,7 +2353,7 @@ llvm::Value *CGObjCGNU::GetTypedSelector(CodeGenFunction &CGF, Selector Sel,
}
}
if (!SelValue) {
- SelValue = llvm::GlobalAlias::create(SelectorTy->getPointerElementType(), 0,
+ SelValue = llvm::GlobalAlias::create(SelectorElemTy, 0,
llvm::GlobalValue::PrivateLinkage,
".objc_selector_" + Sel.getAsString(),
&TheModule);
@@ -2577,16 +2583,14 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
if (IsClassMessage) {
if (!MetaClassPtrAlias) {
MetaClassPtrAlias = llvm::GlobalAlias::create(
- IdTy->getPointerElementType(), 0,
- llvm::GlobalValue::InternalLinkage,
+ IdElemTy, 0, llvm::GlobalValue::InternalLinkage,
".objc_metaclass_ref" + Class->getNameAsString(), &TheModule);
}
ReceiverClass = MetaClassPtrAlias;
} else {
if (!ClassPtrAlias) {
ClassPtrAlias = llvm::GlobalAlias::create(
- IdTy->getPointerElementType(), 0,
- llvm::GlobalValue::InternalLinkage,
+ IdElemTy, 0, llvm::GlobalValue::InternalLinkage,
".objc_class_ref" + Class->getNameAsString(), &TheModule);
}
ReceiverClass = ClassPtrAlias;
@@ -2612,8 +2616,6 @@ CGObjCGNU::GenerateMessageSendSuper(CodeGenFunction &CGF,
Builder.CreateStore(Receiver, Builder.CreateStructGEP(ObjCSuper, 0));
Builder.CreateStore(ReceiverClass, Builder.CreateStructGEP(ObjCSuper, 1));
- ObjCSuper = EnforceType(Builder, ObjCSuper, PtrToObjCSuperTy);
-
// Get the IMP
llvm::Value *imp = LookupIMPSuper(CGF, ObjCSuper, cmd, MSI);
imp = EnforceType(Builder, imp, MSI.MessengerType);
@@ -3708,8 +3710,7 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
// Add all referenced protocols to a category.
GenerateProtocolHolderCategory();
- llvm::StructType *selStructTy =
- dyn_cast<llvm::StructType>(SelectorTy->getPointerElementType());
+ llvm::StructType *selStructTy = dyn_cast<llvm::StructType>(SelectorElemTy);
llvm::Type *selStructPtrTy = SelectorTy;
if (!selStructTy) {
selStructTy = llvm::StructType::get(CGM.getLLVMContext(),
@@ -3861,9 +3862,10 @@ llvm::Function *CGObjCGNU::ModuleInitFunction() {
// The path to the source file where this module was declared
SourceManager &SM = CGM.getContext().getSourceManager();
- const FileEntry *mainFile = SM.getFileEntryForID(SM.getMainFileID());
+ Optional<FileEntryRef> mainFile =
+ SM.getFileEntryRefForID(SM.getMainFileID());
std::string path =
- (Twine(mainFile->getDir()->getName()) + "/" + mainFile->getName()).str();
+ (mainFile->getDir().getName() + "/" + mainFile->getName()).str();
module.add(MakeConstantString(path, ".objc_source_file_name"));
module.add(symtab);
@@ -4064,16 +4066,16 @@ void CGObjCGNU::EmitThrowStmt(CodeGenFunction &CGF,
llvm::Value * CGObjCGNU::EmitObjCWeakRead(CodeGenFunction &CGF,
Address AddrWeakObj) {
CGBuilderTy &B = CGF.Builder;
- AddrWeakObj = EnforceType(B, AddrWeakObj, PtrToIdTy);
- return B.CreateCall(WeakReadFn, AddrWeakObj.getPointer());
+ return B.CreateCall(WeakReadFn,
+ EnforceType(B, AddrWeakObj.getPointer(), PtrToIdTy));
}
void CGObjCGNU::EmitObjCWeakAssign(CodeGenFunction &CGF,
llvm::Value *src, Address dst) {
CGBuilderTy &B = CGF.Builder;
src = EnforceType(B, src, IdTy);
- dst = EnforceType(B, dst, PtrToIdTy);
- B.CreateCall(WeakAssignFn, {src, dst.getPointer()});
+ llvm::Value *dstVal = EnforceType(B, dst.getPointer(), PtrToIdTy);
+ B.CreateCall(WeakAssignFn, {src, dstVal});
}
void CGObjCGNU::EmitObjCGlobalAssign(CodeGenFunction &CGF,
@@ -4081,10 +4083,10 @@ void CGObjCGNU::EmitObjCGlobalAssign(CodeGenFunction &CGF,
bool threadlocal) {
CGBuilderTy &B = CGF.Builder;
src = EnforceType(B, src, IdTy);
- dst = EnforceType(B, dst, PtrToIdTy);
+ llvm::Value *dstVal = EnforceType(B, dst.getPointer(), PtrToIdTy);
// FIXME. Add threadloca assign API
assert(!threadlocal && "EmitObjCGlobalAssign - Threal Local API NYI");
- B.CreateCall(GlobalAssignFn, {src, dst.getPointer()});
+ B.CreateCall(GlobalAssignFn, {src, dstVal});
}
void CGObjCGNU::EmitObjCIvarAssign(CodeGenFunction &CGF,
@@ -4092,16 +4094,16 @@ void CGObjCGNU::EmitObjCIvarAssign(CodeGenFunction &CGF,
llvm::Value *ivarOffset) {
CGBuilderTy &B = CGF.Builder;
src = EnforceType(B, src, IdTy);
- dst = EnforceType(B, dst, IdTy);
- B.CreateCall(IvarAssignFn, {src, dst.getPointer(), ivarOffset});
+ llvm::Value *dstVal = EnforceType(B, dst.getPointer(), IdTy);
+ B.CreateCall(IvarAssignFn, {src, dstVal, ivarOffset});
}
void CGObjCGNU::EmitObjCStrongCastAssign(CodeGenFunction &CGF,
llvm::Value *src, Address dst) {
CGBuilderTy &B = CGF.Builder;
src = EnforceType(B, src, IdTy);
- dst = EnforceType(B, dst, PtrToIdTy);
- B.CreateCall(StrongCastAssignFn, {src, dst.getPointer()});
+ llvm::Value *dstVal = EnforceType(B, dst.getPointer(), PtrToIdTy);
+ B.CreateCall(StrongCastAssignFn, {src, dstVal});
}
void CGObjCGNU::EmitGCMemmoveCollectable(CodeGenFunction &CGF,
@@ -4109,10 +4111,10 @@ void CGObjCGNU::EmitGCMemmoveCollectable(CodeGenFunction &CGF,
Address SrcPtr,
llvm::Value *Size) {
CGBuilderTy &B = CGF.Builder;
- DestPtr = EnforceType(B, DestPtr, PtrTy);
- SrcPtr = EnforceType(B, SrcPtr, PtrTy);
+ llvm::Value *DestPtrVal = EnforceType(B, DestPtr.getPointer(), PtrTy);
+ llvm::Value *SrcPtrVal = EnforceType(B, SrcPtr.getPointer(), PtrTy);
- B.CreateCall(MemMoveFn, {DestPtr.getPointer(), SrcPtr.getPointer(), Size});
+ B.CreateCall(MemMoveFn, {DestPtrVal, SrcPtrVal, Size});
}
llvm::GlobalVariable *CGObjCGNU::ObjCIvarOffsetVariable(
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGObjCMac.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGObjCMac.cpp
index e7dba4c8feab..46e65eb1ed43 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGObjCMac.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGObjCMac.cpp
@@ -4915,11 +4915,11 @@ void CGObjCMac::EmitThrowStmt(CodeGen::CodeGenFunction &CGF,
llvm::Value * CGObjCMac::EmitObjCWeakRead(CodeGen::CodeGenFunction &CGF,
Address AddrWeakObj) {
llvm::Type* DestTy = AddrWeakObj.getElementType();
- AddrWeakObj = CGF.Builder.CreateBitCast(AddrWeakObj,
- ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *AddrWeakObjVal = CGF.Builder.CreateBitCast(
+ AddrWeakObj.getPointer(), ObjCTypes.PtrObjectPtrTy);
llvm::Value *read_weak =
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcReadWeakFn(),
- AddrWeakObj.getPointer(), "weakread");
+ AddrWeakObjVal, "weakread");
read_weak = CGF.Builder.CreateBitCast(read_weak, DestTy);
return read_weak;
}
@@ -4938,8 +4938,9 @@ void CGObjCMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
- dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
- llvm::Value *args[] = { src, dst.getPointer() };
+ llvm::Value *dstVal =
+ CGF.Builder.CreateBitCast(dst.getPointer(), ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *args[] = { src, dstVal };
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignWeakFn(),
args, "weakassign");
}
@@ -4959,8 +4960,9 @@ void CGObjCMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
- dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
- llvm::Value *args[] = { src, dst.getPointer() };
+ llvm::Value *dstVal =
+ CGF.Builder.CreateBitCast(dst.getPointer(), ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *args[] = {src, dstVal};
if (!threadlocal)
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignGlobalFn(),
args, "globalassign");
@@ -4985,8 +4987,9 @@ void CGObjCMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
- dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
- llvm::Value *args[] = { src, dst.getPointer(), ivarOffset };
+ llvm::Value *dstVal =
+ CGF.Builder.CreateBitCast(dst.getPointer(), ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *args[] = {src, dstVal, ivarOffset};
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignIvarFn(), args);
}
@@ -5004,8 +5007,9 @@ void CGObjCMac::EmitObjCStrongCastAssign(CodeGen::CodeGenFunction &CGF,
src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
- dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
- llvm::Value *args[] = { src, dst.getPointer() };
+ llvm::Value *dstVal =
+ CGF.Builder.CreateBitCast(dst.getPointer(), ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *args[] = {src, dstVal};
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignStrongCastFn(),
args, "strongassign");
}
@@ -5014,8 +5018,8 @@ void CGObjCMac::EmitGCMemmoveCollectable(CodeGen::CodeGenFunction &CGF,
Address DestPtr,
Address SrcPtr,
llvm::Value *size) {
- SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, ObjCTypes.Int8PtrTy);
- DestPtr = CGF.Builder.CreateBitCast(DestPtr, ObjCTypes.Int8PtrTy);
+ SrcPtr = CGF.Builder.CreateElementBitCast(SrcPtr, CGF.Int8Ty);
+ DestPtr = CGF.Builder.CreateElementBitCast(DestPtr, CGF.Int8Ty);
llvm::Value *args[] = { DestPtr.getPointer(), SrcPtr.getPointer(), size };
CGF.EmitNounwindRuntimeCall(ObjCTypes.GcMemmoveCollectableFn(), args);
}
@@ -5064,7 +5068,9 @@ std::string CGObjCCommonMac::GetSectionName(StringRef Section,
return ("." + Section.substr(2) + "$B").str();
case llvm::Triple::Wasm:
case llvm::Triple::GOFF:
+ case llvm::Triple::SPIRV:
case llvm::Triple::XCOFF:
+ case llvm::Triple::DXContainer:
llvm::report_fatal_error(
"Objective-C support is unimplemented for object file format");
}
@@ -5268,7 +5274,7 @@ Address CGObjCMac::EmitSelectorAddr(Selector Sel) {
Entry->setExternallyInitialized(true);
}
- return Address(Entry, Align);
+ return Address(Entry, ObjCTypes.SelectorPtrTy, Align);
}
llvm::Constant *CGObjCCommonMac::GetClassName(StringRef RuntimeName) {
@@ -7345,7 +7351,7 @@ CGObjCNonFragileABIMac::EmitVTableMessageSend(CodeGenFunction &CGF,
Address mref =
Address(CGF.Builder.CreateBitCast(messageRef, ObjCTypes.MessageRefPtrTy),
- CGF.getPointerAlign());
+ ObjCTypes.MessageRefTy, CGF.getPointerAlign());
// Update the message ref argument.
args[1].setRValue(RValue::get(mref.getPointer()));
@@ -7404,7 +7410,7 @@ CGObjCNonFragileABIMac::GetClassGlobal(StringRef Name,
: llvm::GlobalValue::ExternalLinkage;
llvm::GlobalVariable *GV = CGM.getModule().getGlobalVariable(Name);
- if (!GV || GV->getType() != ObjCTypes.ClassnfABITy->getPointerTo()) {
+ if (!GV || GV->getValueType() != ObjCTypes.ClassnfABITy) {
auto *NewGV = new llvm::GlobalVariable(ObjCTypes.ClassnfABITy, false, L,
nullptr, Name);
@@ -7639,7 +7645,7 @@ Address CGObjCNonFragileABIMac::EmitSelectorAddr(Selector Sel) {
CGM.addCompilerUsedGlobal(Entry);
}
- return Address(Entry, Align);
+ return Address(Entry, ObjCTypes.SelectorPtrTy, Align);
}
/// EmitObjCIvarAssign - Code gen for assigning to a __strong object.
@@ -7658,8 +7664,9 @@ void CGObjCNonFragileABIMac::EmitObjCIvarAssign(CodeGen::CodeGenFunction &CGF,
src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
- dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
- llvm::Value *args[] = { src, dst.getPointer(), ivarOffset };
+ llvm::Value *dstVal =
+ CGF.Builder.CreateBitCast(dst.getPointer(), ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *args[] = {src, dstVal, ivarOffset};
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignIvarFn(), args);
}
@@ -7678,8 +7685,9 @@ void CGObjCNonFragileABIMac::EmitObjCStrongCastAssign(
src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
- dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
- llvm::Value *args[] = { src, dst.getPointer() };
+ llvm::Value *dstVal =
+ CGF.Builder.CreateBitCast(dst.getPointer(), ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *args[] = {src, dstVal};
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignStrongCastFn(),
args, "weakassign");
}
@@ -7689,8 +7697,8 @@ void CGObjCNonFragileABIMac::EmitGCMemmoveCollectable(
Address DestPtr,
Address SrcPtr,
llvm::Value *Size) {
- SrcPtr = CGF.Builder.CreateBitCast(SrcPtr, ObjCTypes.Int8PtrTy);
- DestPtr = CGF.Builder.CreateBitCast(DestPtr, ObjCTypes.Int8PtrTy);
+ SrcPtr = CGF.Builder.CreateElementBitCast(SrcPtr, CGF.Int8Ty);
+ DestPtr = CGF.Builder.CreateElementBitCast(DestPtr, CGF.Int8Ty);
llvm::Value *args[] = { DestPtr.getPointer(), SrcPtr.getPointer(), Size };
CGF.EmitNounwindRuntimeCall(ObjCTypes.GcMemmoveCollectableFn(), args);
}
@@ -7702,10 +7710,11 @@ llvm::Value * CGObjCNonFragileABIMac::EmitObjCWeakRead(
CodeGen::CodeGenFunction &CGF,
Address AddrWeakObj) {
llvm::Type *DestTy = AddrWeakObj.getElementType();
- AddrWeakObj = CGF.Builder.CreateBitCast(AddrWeakObj, ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *AddrWeakObjVal = CGF.Builder.CreateBitCast(
+ AddrWeakObj.getPointer(), ObjCTypes.PtrObjectPtrTy);
llvm::Value *read_weak =
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcReadWeakFn(),
- AddrWeakObj.getPointer(), "weakread");
+ AddrWeakObjVal, "weakread");
read_weak = CGF.Builder.CreateBitCast(read_weak, DestTy);
return read_weak;
}
@@ -7724,8 +7733,9 @@ void CGObjCNonFragileABIMac::EmitObjCWeakAssign(CodeGen::CodeGenFunction &CGF,
src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
- dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
- llvm::Value *args[] = { src, dst.getPointer() };
+ llvm::Value *dstVal =
+ CGF.Builder.CreateBitCast(dst.getPointer(), ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *args[] = {src, dstVal};
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignWeakFn(),
args, "weakassign");
}
@@ -7745,8 +7755,9 @@ void CGObjCNonFragileABIMac::EmitObjCGlobalAssign(CodeGen::CodeGenFunction &CGF,
src = CGF.Builder.CreateIntToPtr(src, ObjCTypes.Int8PtrTy);
}
src = CGF.Builder.CreateBitCast(src, ObjCTypes.ObjectPtrTy);
- dst = CGF.Builder.CreateBitCast(dst, ObjCTypes.PtrObjectPtrTy);
- llvm::Value *args[] = { src, dst.getPointer() };
+ llvm::Value *dstVal =
+ CGF.Builder.CreateBitCast(dst.getPointer(), ObjCTypes.PtrObjectPtrTy);
+ llvm::Value *args[] = {src, dstVal};
if (!threadlocal)
CGF.EmitNounwindRuntimeCall(ObjCTypes.getGcAssignGlobalFn(),
args, "globalassign");
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGObjCRuntime.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGObjCRuntime.cpp
index 33ae3c7c2b28..550fd3d70bdc 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGObjCRuntime.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGObjCRuntime.cpp
@@ -106,7 +106,7 @@ LValue CGObjCRuntime::EmitValueForIvarAtOffset(CodeGen::CodeGenFunction &CGF,
CGF.CGM.getContext().toBits(StorageSize),
CharUnits::fromQuantity(0)));
- Address Addr(V, Alignment);
+ Address Addr = Address(V, CGF.Int8Ty, Alignment);
Addr = CGF.Builder.CreateElementBitCast(Addr,
llvm::Type::getIntNTy(CGF.getLLVMContext(),
Info->StorageSize));
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGOpenCLRuntime.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGOpenCLRuntime.cpp
index dbe375294d17..ab8de7ecf50c 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGOpenCLRuntime.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGOpenCLRuntime.cpp
@@ -34,41 +34,46 @@ llvm::Type *CGOpenCLRuntime::convertOpenCLSpecificType(const Type *T) {
assert(T->isOpenCLSpecificType() &&
"Not an OpenCL specific type!");
- llvm::LLVMContext& Ctx = CGM.getLLVMContext();
- uint32_t AddrSpc = CGM.getContext().getTargetAddressSpace(
- CGM.getContext().getOpenCLTypeAddrSpace(T));
switch (cast<BuiltinType>(T)->getKind()) {
default:
llvm_unreachable("Unexpected opencl builtin type!");
return nullptr;
-#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
- case BuiltinType::Id: \
- return llvm::PointerType::get( \
- llvm::StructType::create(Ctx, "opencl." #ImgType "_" #Suffix "_t"), \
- AddrSpc);
+#define IMAGE_TYPE(ImgType, Id, SingletonId, Access, Suffix) \
+ case BuiltinType::Id: \
+ return getPointerType(T, "opencl." #ImgType "_" #Suffix "_t");
#include "clang/Basic/OpenCLImageTypes.def"
case BuiltinType::OCLSampler:
return getSamplerType(T);
case BuiltinType::OCLEvent:
- return llvm::PointerType::get(
- llvm::StructType::create(Ctx, "opencl.event_t"), AddrSpc);
+ return getPointerType(T, "opencl.event_t");
case BuiltinType::OCLClkEvent:
- return llvm::PointerType::get(
- llvm::StructType::create(Ctx, "opencl.clk_event_t"), AddrSpc);
+ return getPointerType(T, "opencl.clk_event_t");
case BuiltinType::OCLQueue:
- return llvm::PointerType::get(
- llvm::StructType::create(Ctx, "opencl.queue_t"), AddrSpc);
+ return getPointerType(T, "opencl.queue_t");
case BuiltinType::OCLReserveID:
- return llvm::PointerType::get(
- llvm::StructType::create(Ctx, "opencl.reserve_id_t"), AddrSpc);
-#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
- case BuiltinType::Id: \
- return llvm::PointerType::get( \
- llvm::StructType::create(Ctx, "opencl." #ExtType), AddrSpc);
+ return getPointerType(T, "opencl.reserve_id_t");
+#define EXT_OPAQUE_TYPE(ExtType, Id, Ext) \
+ case BuiltinType::Id: \
+ return getPointerType(T, "opencl." #ExtType);
#include "clang/Basic/OpenCLExtensionTypes.def"
}
}
+llvm::PointerType *CGOpenCLRuntime::getPointerType(const Type *T,
+ StringRef Name) {
+ auto I = CachedTys.find(Name);
+ if (I != CachedTys.end())
+ return I->second;
+
+ llvm::LLVMContext &Ctx = CGM.getLLVMContext();
+ uint32_t AddrSpc = CGM.getContext().getTargetAddressSpace(
+ CGM.getContext().getOpenCLTypeAddrSpace(T));
+ auto *PTy =
+ llvm::PointerType::get(llvm::StructType::create(Ctx, Name), AddrSpc);
+ CachedTys[Name] = PTy;
+ return PTy;
+}
+
llvm::Type *CGOpenCLRuntime::getPipeType(const PipeType *T) {
if (T->isReadOnly())
return getPipeType(T, "opencl.pipe_ro_t", PipeROTy);
@@ -143,13 +148,14 @@ static const BlockExpr *getBlockExpr(const Expr *E) {
/// corresponding block expression.
void CGOpenCLRuntime::recordBlockInfo(const BlockExpr *E,
llvm::Function *InvokeF,
- llvm::Value *Block) {
+ llvm::Value *Block, llvm::Type *BlockTy) {
assert(EnqueuedBlockMap.find(E) == EnqueuedBlockMap.end() &&
"Block expression emitted twice");
assert(isa<llvm::Function>(InvokeF) && "Invalid invoke function");
assert(Block->getType()->isPointerTy() && "Invalid block literal type");
EnqueuedBlockMap[E].InvokeFunc = InvokeF;
EnqueuedBlockMap[E].BlockArg = Block;
+ EnqueuedBlockMap[E].BlockTy = BlockTy;
EnqueuedBlockMap[E].Kernel = nullptr;
}
@@ -174,8 +180,7 @@ CGOpenCLRuntime::emitOpenCLEnqueuedBlock(CodeGenFunction &CGF, const Expr *E) {
}
auto *F = CGF.getTargetHooks().createEnqueuedBlockKernel(
- CGF, EnqueuedBlockMap[Block].InvokeFunc,
- EnqueuedBlockMap[Block].BlockArg->stripPointerCasts());
+ CGF, EnqueuedBlockMap[Block].InvokeFunc, EnqueuedBlockMap[Block].BlockTy);
// The common part of the post-processing of the kernel goes here.
F->addFnAttr(llvm::Attribute::NoUnwind);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGOpenCLRuntime.h b/contrib/llvm-project/clang/lib/CodeGen/CGOpenCLRuntime.h
index 3f7aa9b0d8dc..900644b3b93b 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGOpenCLRuntime.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGOpenCLRuntime.h
@@ -18,6 +18,7 @@
#include "clang/AST/Expr.h"
#include "clang/AST/Type.h"
#include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/StringMap.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
@@ -38,18 +39,21 @@ protected:
llvm::Type *PipeROTy;
llvm::Type *PipeWOTy;
llvm::PointerType *SamplerTy;
+ llvm::StringMap<llvm::PointerType *> CachedTys;
/// Structure for enqueued block information.
struct EnqueuedBlockInfo {
llvm::Function *InvokeFunc; /// Block invoke function.
llvm::Function *Kernel; /// Enqueued block kernel.
llvm::Value *BlockArg; /// The first argument to enqueued block kernel.
+ llvm::Type *BlockTy; /// Type of the block argument.
};
/// Maps block expression to block information.
llvm::DenseMap<const Expr *, EnqueuedBlockInfo> EnqueuedBlockMap;
virtual llvm::Type *getPipeType(const PipeType *T, StringRef Name,
llvm::Type *&PipeTy);
+ llvm::PointerType *getPointerType(const Type *T, StringRef Name);
public:
CGOpenCLRuntime(CodeGenModule &CGM) : CGM(CGM),
@@ -90,7 +94,7 @@ public:
/// \param InvokeF invoke function emitted for the block expression.
/// \param Block block literal emitted for the block expression.
void recordBlockInfo(const BlockExpr *E, llvm::Function *InvokeF,
- llvm::Value *Block);
+ llvm::Value *Block, llvm::Type *BlockTy);
/// \return LLVM block invoke function emitted for an expression derived from
/// the block expression.
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.cpp
index db1c3ca191ca..305040b01c08 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -29,11 +29,13 @@
#include "clang/CodeGen/ConstantInitBuilder.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/SetOperations.h"
+#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Bitcode/BitcodeReader.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GlobalValue.h"
+#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/Format.h"
@@ -369,8 +371,7 @@ public:
/*RefersToEnclosingVariableOrCapture=*/false,
VD->getType().getNonReferenceType(), VK_LValue,
C.getLocation());
- PrivScope.addPrivate(
- VD, [&CGF, &DRE]() { return CGF.EmitLValue(&DRE).getAddress(CGF); });
+ PrivScope.addPrivate(VD, CGF.EmitLValue(&DRE).getAddress(CGF));
}
(void)PrivScope.Privatize();
}
@@ -633,10 +634,8 @@ static void emitInitWithReductionInitializer(CodeGenFunction &CGF,
const auto *RHSDRE =
cast<DeclRefExpr>(cast<UnaryOperator>(RHS)->getSubExpr());
CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
- PrivateScope.addPrivate(cast<VarDecl>(LHSDRE->getDecl()),
- [=]() { return Private; });
- PrivateScope.addPrivate(cast<VarDecl>(RHSDRE->getDecl()),
- [=]() { return Original; });
+ PrivateScope.addPrivate(cast<VarDecl>(LHSDRE->getDecl()), Private);
+ PrivateScope.addPrivate(cast<VarDecl>(RHSDRE->getDecl()), Original);
(void)PrivateScope.Privatize();
RValue Func = RValue::get(Reduction.second);
CodeGenFunction::OpaqueValueMapping Map(CGF, OVE, Func);
@@ -719,14 +718,14 @@ static void EmitOMPAggregateInit(CodeGenFunction &CGF, Address DestAddr,
"omp.arraycpy.srcElementPast");
SrcElementPHI->addIncoming(SrcBegin, EntryBB);
SrcElementCurrent =
- Address(SrcElementPHI,
+ Address(SrcElementPHI, SrcAddr.getElementType(),
SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
}
llvm::PHINode *DestElementPHI = CGF.Builder.CreatePHI(
DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
DestElementPHI->addIncoming(DestBegin, EntryBB);
Address DestElementCurrent =
- Address(DestElementPHI,
+ Address(DestElementPHI, DestAddr.getElementType(),
DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
// Emit copy.
@@ -825,9 +824,7 @@ void ReductionCodeGen::emitSharedOrigLValue(CodeGenFunction &CGF, unsigned N) {
}
void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N) {
- const auto *PrivateVD =
- cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
- QualType PrivateType = PrivateVD->getType();
+ QualType PrivateType = getPrivateType(N);
bool AsArraySection = isa<OMPArraySectionExpr>(ClausesData[N].Ref);
if (!PrivateType->isVariablyModifiedType()) {
Sizes.emplace_back(
@@ -862,9 +859,7 @@ void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N) {
void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N,
llvm::Value *Size) {
- const auto *PrivateVD =
- cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
- QualType PrivateType = PrivateVD->getType();
+ QualType PrivateType = getPrivateType(N);
if (!PrivateType->isVariablyModifiedType()) {
assert(!Size && !Sizes[N].second &&
"Size should be nullptr for non-variably modified reduction "
@@ -887,9 +882,6 @@ void ReductionCodeGen::emitInitialization(
cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
const OMPDeclareReductionDecl *DRD =
getReductionInit(ClausesData[N].ReductionOp);
- QualType PrivateType = PrivateVD->getType();
- PrivateAddr = CGF.Builder.CreateElementBitCast(
- PrivateAddr, CGF.ConvertTypeForMem(PrivateType));
if (CGF.getContext().getAsArrayType(PrivateVD->getType())) {
if (DRD && DRD->getInitializer())
(void)DefaultInit(CGF);
@@ -908,18 +900,14 @@ void ReductionCodeGen::emitInitialization(
}
bool ReductionCodeGen::needCleanups(unsigned N) {
- const auto *PrivateVD =
- cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
- QualType PrivateType = PrivateVD->getType();
+ QualType PrivateType = getPrivateType(N);
QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
return DTorKind != QualType::DK_none;
}
void ReductionCodeGen::emitCleanups(CodeGenFunction &CGF, unsigned N,
Address PrivateAddr) {
- const auto *PrivateVD =
- cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl());
- QualType PrivateType = PrivateVD->getType();
+ QualType PrivateType = getPrivateType(N);
QualType::DestructionKind DTorKind = PrivateType.isDestructedType();
if (needCleanups(N)) {
PrivateAddr = CGF.Builder.CreateElementBitCast(
@@ -949,8 +937,7 @@ static LValue loadToBegin(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
}
static Address castToBase(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
- llvm::Type *BaseLVType, CharUnits BaseLVAlignment,
- llvm::Value *Addr) {
+ Address OriginalBaseAddress, llvm::Value *Addr) {
Address Tmp = Address::invalid();
Address TopTmp = Address::invalid();
Address MostTopTmp = Address::invalid();
@@ -965,15 +952,17 @@ static Address castToBase(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
TopTmp = Tmp;
BaseTy = BaseTy->getPointeeType();
}
- llvm::Type *Ty = BaseLVType;
- if (Tmp.isValid())
- Ty = Tmp.getElementType();
- Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Addr, Ty);
+
if (Tmp.isValid()) {
+ Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ Addr, Tmp.getElementType());
CGF.Builder.CreateStore(Addr, Tmp);
return MostTopTmp;
}
- return Address(Addr, BaseLVAlignment);
+
+ Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ Addr, OriginalBaseAddress.getType());
+ return OriginalBaseAddress.withPointer(Addr);
}
static const VarDecl *getBaseDecl(const Expr *Ref, const DeclRefExpr *&DE) {
@@ -1016,8 +1005,7 @@ Address ReductionCodeGen::adjustPrivateAddress(CodeGenFunction &CGF, unsigned N,
SharedAddr.getElementType(), PrivatePointer, Adjustment);
return castToBase(CGF, OrigVD->getType(),
SharedAddresses[N].first.getType(),
- OriginalBaseLValue.getAddress(CGF).getType(),
- OriginalBaseLValue.getAlignment(), Ptr);
+ OriginalBaseLValue.getAddress(CGF), Ptr);
}
BaseDecls.emplace_back(
cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Ref)->getDecl()));
@@ -1140,15 +1128,13 @@ emitCombinerOrInitializer(CodeGenModule &CGM, QualType Ty,
Out->getLocation());
CodeGenFunction::OMPPrivateScope Scope(CGF);
Address AddrIn = CGF.GetAddrOfLocalVar(&OmpInParm);
- Scope.addPrivate(In, [&CGF, AddrIn, PtrTy]() {
- return CGF.EmitLoadOfPointerLValue(AddrIn, PtrTy->castAs<PointerType>())
- .getAddress(CGF);
- });
+ Scope.addPrivate(
+ In, CGF.EmitLoadOfPointerLValue(AddrIn, PtrTy->castAs<PointerType>())
+ .getAddress(CGF));
Address AddrOut = CGF.GetAddrOfLocalVar(&OmpOutParm);
- Scope.addPrivate(Out, [&CGF, AddrOut, PtrTy]() {
- return CGF.EmitLoadOfPointerLValue(AddrOut, PtrTy->castAs<PointerType>())
- .getAddress(CGF);
- });
+ Scope.addPrivate(
+ Out, CGF.EmitLoadOfPointerLValue(AddrOut, PtrTy->castAs<PointerType>())
+ .getAddress(CGF));
(void)Scope.Privatize();
if (!IsCombiner && Out->hasInit() &&
!CGF.isTrivialInitializer(Out->getInit())) {
@@ -1343,51 +1329,6 @@ llvm::Function *CGOpenMPRuntime::emitTaskOutlinedFunction(
return Res;
}
-static void buildStructValue(ConstantStructBuilder &Fields, CodeGenModule &CGM,
- const RecordDecl *RD, const CGRecordLayout &RL,
- ArrayRef<llvm::Constant *> Data) {
- llvm::StructType *StructTy = RL.getLLVMType();
- unsigned PrevIdx = 0;
- ConstantInitBuilder CIBuilder(CGM);
- auto DI = Data.begin();
- for (const FieldDecl *FD : RD->fields()) {
- unsigned Idx = RL.getLLVMFieldNo(FD);
- // Fill the alignment.
- for (unsigned I = PrevIdx; I < Idx; ++I)
- Fields.add(llvm::Constant::getNullValue(StructTy->getElementType(I)));
- PrevIdx = Idx + 1;
- Fields.add(*DI);
- ++DI;
- }
-}
-
-template <class... As>
-static llvm::GlobalVariable *
-createGlobalStruct(CodeGenModule &CGM, QualType Ty, bool IsConstant,
- ArrayRef<llvm::Constant *> Data, const Twine &Name,
- As &&... Args) {
- const auto *RD = cast<RecordDecl>(Ty->getAsTagDecl());
- const CGRecordLayout &RL = CGM.getTypes().getCGRecordLayout(RD);
- ConstantInitBuilder CIBuilder(CGM);
- ConstantStructBuilder Fields = CIBuilder.beginStruct(RL.getLLVMType());
- buildStructValue(Fields, CGM, RD, RL, Data);
- return Fields.finishAndCreateGlobal(
- Name, CGM.getContext().getAlignOfGlobalVarInChars(Ty), IsConstant,
- std::forward<As>(Args)...);
-}
-
-template <typename T>
-static void
-createConstantGlobalStructAndAddToParent(CodeGenModule &CGM, QualType Ty,
- ArrayRef<llvm::Constant *> Data,
- T &Parent) {
- const auto *RD = cast<RecordDecl>(Ty->getAsTagDecl());
- const CGRecordLayout &RL = CGM.getTypes().getCGRecordLayout(RD);
- ConstantStructBuilder Fields = Parent.beginStruct(RL.getLLVMType());
- buildStructValue(Fields, CGM, RD, RL, Data);
- Fields.finishAndAddTo(Parent);
-}
-
void CGOpenMPRuntime::setLocThreadIdInsertPt(CodeGenFunction &CGF,
bool AtCurrentPoint) {
auto &Elem = OpenMPLocThreadIDMap.FindAndConstruct(CGF.CurFn);
@@ -1703,10 +1644,10 @@ Address CGOpenMPRuntime::getAddrOfDeclareTargetVar(const VarDecl *VD) {
OS << "_decl_tgt_ref_ptr";
}
llvm::Value *Ptr = CGM.getModule().getNamedValue(PtrName);
+ QualType PtrTy = CGM.getContext().getPointerType(VD->getType());
+ llvm::Type *LlvmPtrTy = CGM.getTypes().ConvertTypeForMem(PtrTy);
if (!Ptr) {
- QualType PtrTy = CGM.getContext().getPointerType(VD->getType());
- Ptr = getOrCreateInternalVariable(CGM.getTypes().ConvertTypeForMem(PtrTy),
- PtrName);
+ Ptr = getOrCreateInternalVariable(LlvmPtrTy, PtrName);
auto *GV = cast<llvm::GlobalVariable>(Ptr);
GV->setLinkage(llvm::GlobalValue::WeakAnyLinkage);
@@ -1715,7 +1656,7 @@ Address CGOpenMPRuntime::getAddrOfDeclareTargetVar(const VarDecl *VD) {
GV->setInitializer(CGM.GetAddrOfGlobal(VD));
registerTargetGlobalVariable(VD, cast<llvm::Constant>(Ptr));
}
- return Address(Ptr, CGM.getContext().getDeclAlign(VD));
+ return Address(Ptr, LlvmPtrTy, CGM.getContext().getDeclAlign(VD));
}
return Address::invalid();
}
@@ -1739,16 +1680,17 @@ Address CGOpenMPRuntime::getAddrOfThreadPrivate(CodeGenFunction &CGF,
return VDAddr;
llvm::Type *VarTy = VDAddr.getElementType();
- llvm::Value *Args[] = {emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
- CGF.Builder.CreatePointerCast(VDAddr.getPointer(),
- CGM.Int8PtrTy),
- CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy)),
- getOrCreateThreadPrivateCache(VD)};
- return Address(CGF.EmitRuntimeCall(
- OMPBuilder.getOrCreateRuntimeFunction(
- CGM.getModule(), OMPRTL___kmpc_threadprivate_cached),
- Args),
- VDAddr.getAlignment());
+ llvm::Value *Args[] = {
+ emitUpdateLocation(CGF, Loc), getThreadID(CGF, Loc),
+ CGF.Builder.CreatePointerCast(VDAddr.getPointer(), CGM.Int8PtrTy),
+ CGM.getSize(CGM.GetTargetTypeStoreSize(VarTy)),
+ getOrCreateThreadPrivateCache(VD)};
+ return Address(
+ CGF.EmitRuntimeCall(
+ OMPBuilder.getOrCreateRuntimeFunction(
+ CGM.getModule(), OMPRTL___kmpc_threadprivate_cached),
+ Args),
+ CGF.Int8Ty, VDAddr.getAlignment());
}
void CGOpenMPRuntime::emitThreadPrivateVarInit(
@@ -1805,7 +1747,7 @@ llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
llvm::Value *ArgVal = CtorCGF.EmitLoadOfScalar(
CtorCGF.GetAddrOfLocalVar(&Dst), /*Volatile=*/false,
CGM.getContext().VoidPtrTy, Dst.getLocation());
- Address Arg = Address(ArgVal, VDAddr.getAlignment());
+ Address Arg(ArgVal, CtorCGF.Int8Ty, VDAddr.getAlignment());
Arg = CtorCGF.Builder.CreateElementBitCast(
Arg, CtorCGF.ConvertTypeForMem(ASTTy));
CtorCGF.EmitAnyExprToMem(Init, Arg, Init->getType().getQualifiers(),
@@ -1841,9 +1783,10 @@ llvm::Function *CGOpenMPRuntime::emitThreadPrivateVarDefinition(
llvm::Value *ArgVal = DtorCGF.EmitLoadOfScalar(
DtorCGF.GetAddrOfLocalVar(&Dst),
/*Volatile=*/false, CGM.getContext().VoidPtrTy, Dst.getLocation());
- DtorCGF.emitDestroy(Address(ArgVal, VDAddr.getAlignment()), ASTTy,
- DtorCGF.getDestroyer(ASTTy.isDestructedType()),
- DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
+ DtorCGF.emitDestroy(
+ Address(ArgVal, DtorCGF.Int8Ty, VDAddr.getAlignment()), ASTTy,
+ DtorCGF.getDestroyer(ASTTy.isDestructedType()),
+ DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
DtorCGF.FinishFunction();
Dtor = Fn;
}
@@ -1938,19 +1881,27 @@ bool CGOpenMPRuntime::emitDeclareTargetVarDefinition(const VarDecl *VD,
const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
llvm::Function *Fn = CGM.CreateGlobalInitOrCleanUpFunction(
- FTy, Twine(Buffer, "_ctor"), FI, Loc);
+ FTy, Twine(Buffer, "_ctor"), FI, Loc, false,
+ llvm::GlobalValue::WeakODRLinkage);
+ if (CGM.getTriple().isAMDGCN())
+ Fn->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL);
auto NL = ApplyDebugLocation::CreateEmpty(CtorCGF);
CtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI,
FunctionArgList(), Loc, Loc);
auto AL = ApplyDebugLocation::CreateArtificial(CtorCGF);
+ llvm::Constant *AddrInAS0 = Addr;
+ if (Addr->getAddressSpace() != 0)
+ AddrInAS0 = llvm::ConstantExpr::getAddrSpaceCast(
+ Addr, llvm::PointerType::getWithSamePointeeType(
+ cast<llvm::PointerType>(Addr->getType()), 0));
CtorCGF.EmitAnyExprToMem(Init,
- Address(Addr, CGM.getContext().getDeclAlign(VD)),
+ Address(AddrInAS0, Addr->getValueType(),
+ CGM.getContext().getDeclAlign(VD)),
Init->getType().getQualifiers(),
/*IsInitializer=*/true);
CtorCGF.FinishFunction();
Ctor = Fn;
ID = llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
- CGM.addUsedGlobal(cast<llvm::GlobalValue>(Ctor));
} else {
Ctor = new llvm::GlobalVariable(
CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
@@ -1976,20 +1927,28 @@ bool CGOpenMPRuntime::emitDeclareTargetVarDefinition(const VarDecl *VD,
const CGFunctionInfo &FI = CGM.getTypes().arrangeNullaryFunction();
llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FI);
llvm::Function *Fn = CGM.CreateGlobalInitOrCleanUpFunction(
- FTy, Twine(Buffer, "_dtor"), FI, Loc);
+ FTy, Twine(Buffer, "_dtor"), FI, Loc, false,
+ llvm::GlobalValue::WeakODRLinkage);
+ if (CGM.getTriple().isAMDGCN())
+ Fn->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL);
auto NL = ApplyDebugLocation::CreateEmpty(DtorCGF);
DtorCGF.StartFunction(GlobalDecl(), CGM.getContext().VoidTy, Fn, FI,
FunctionArgList(), Loc, Loc);
// Create a scope with an artificial location for the body of this
// function.
auto AL = ApplyDebugLocation::CreateArtificial(DtorCGF);
- DtorCGF.emitDestroy(Address(Addr, CGM.getContext().getDeclAlign(VD)),
+ llvm::Constant *AddrInAS0 = Addr;
+ if (Addr->getAddressSpace() != 0)
+ AddrInAS0 = llvm::ConstantExpr::getAddrSpaceCast(
+ Addr, llvm::PointerType::getWithSamePointeeType(
+ cast<llvm::PointerType>(Addr->getType()), 0));
+ DtorCGF.emitDestroy(Address(AddrInAS0, Addr->getValueType(),
+ CGM.getContext().getDeclAlign(VD)),
ASTTy, DtorCGF.getDestroyer(ASTTy.isDestructedType()),
DtorCGF.needsEHCleanup(ASTTy.isDestructedType()));
DtorCGF.FinishFunction();
Dtor = Fn;
ID = llvm::ConstantExpr::getBitCast(Fn, CGM.Int8PtrTy);
- CGM.addUsedGlobal(cast<llvm::GlobalValue>(Dtor));
} else {
Dtor = new llvm::GlobalVariable(
CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
@@ -2035,7 +1994,7 @@ Address CGOpenMPRuntime::getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
CGM.getModule(), OMPRTL___kmpc_threadprivate_cached),
Args),
VarLVType->getPointerTo(/*AddrSpace=*/0)),
- CGM.getContext().getTypeAlignInChars(VarType));
+ VarLVType, CGM.getContext().getTypeAlignInChars(VarType));
}
void CGOpenMPRuntime::emitIfClause(CodeGenFunction &CGF, const Expr *Cond,
@@ -2367,14 +2326,15 @@ static Address emitAddrOfVarFromArray(CodeGenFunction &CGF, Address Array,
Address PtrAddr = CGF.Builder.CreateConstArrayGEP(Array, Index);
llvm::Value *Ptr = CGF.Builder.CreateLoad(PtrAddr);
- Address Addr = Address(Ptr, CGF.getContext().getDeclAlign(Var));
- Addr = CGF.Builder.CreateElementBitCast(
- Addr, CGF.ConvertTypeForMem(Var->getType()));
- return Addr;
+ llvm::Type *ElemTy = CGF.ConvertTypeForMem(Var->getType());
+ return Address(
+ CGF.Builder.CreateBitCast(
+ Ptr, ElemTy->getPointerTo(Ptr->getType()->getPointerAddressSpace())),
+ ElemTy, CGF.getContext().getDeclAlign(Var));
}
static llvm::Value *emitCopyprivateCopyFunction(
- CodeGenModule &CGM, llvm::Type *ArgsType,
+ CodeGenModule &CGM, llvm::Type *ArgsElemType,
ArrayRef<const Expr *> CopyprivateVars, ArrayRef<const Expr *> DestExprs,
ArrayRef<const Expr *> SrcExprs, ArrayRef<const Expr *> AssignmentOps,
SourceLocation Loc) {
@@ -2401,11 +2361,13 @@ static llvm::Value *emitCopyprivateCopyFunction(
// Dest = (void*[n])(LHSArg);
// Src = (void*[n])(RHSArg);
Address LHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
- ArgsType), CGF.getPointerAlign());
+ CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
+ ArgsElemType->getPointerTo()),
+ ArgsElemType, CGF.getPointerAlign());
Address RHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
- ArgsType), CGF.getPointerAlign());
+ CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
+ ArgsElemType->getPointerTo()),
+ ArgsElemType, CGF.getPointerAlign());
// *(Type0*)Dst[0] = *(Type0*)Src[0];
// *(Type1*)Dst[1] = *(Type1*)Src[1];
// ...
@@ -2494,12 +2456,11 @@ void CGOpenMPRuntime::emitSingleRegion(CodeGenFunction &CGF,
// Build function that copies private values from single region to all other
// threads in the corresponding parallel region.
llvm::Value *CpyFn = emitCopyprivateCopyFunction(
- CGM, CGF.ConvertTypeForMem(CopyprivateArrayTy)->getPointerTo(),
- CopyprivateVars, SrcExprs, DstExprs, AssignmentOps, Loc);
+ CGM, CGF.ConvertTypeForMem(CopyprivateArrayTy), CopyprivateVars,
+ SrcExprs, DstExprs, AssignmentOps, Loc);
llvm::Value *BufSize = CGF.getTypeSize(CopyprivateArrayTy);
- Address CL =
- CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(CopyprivateList,
- CGF.VoidPtrTy);
+ Address CL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ CopyprivateList, CGF.VoidPtrTy, CGF.Int8Ty);
llvm::Value *DidItVal = CGF.Builder.CreateLoad(DidIt);
llvm::Value *Args[] = {
emitUpdateLocation(CGF, Loc), // ident_t *<loc>
@@ -3130,32 +3091,7 @@ void CGOpenMPRuntime::OffloadEntriesInfoManagerTy::
void CGOpenMPRuntime::createOffloadEntry(
llvm::Constant *ID, llvm::Constant *Addr, uint64_t Size, int32_t Flags,
llvm::GlobalValue::LinkageTypes Linkage) {
- StringRef Name = Addr->getName();
- llvm::Module &M = CGM.getModule();
- llvm::LLVMContext &C = M.getContext();
-
- // Create constant string with the name.
- llvm::Constant *StrPtrInit = llvm::ConstantDataArray::getString(C, Name);
-
- std::string StringName = getName({"omp_offloading", "entry_name"});
- auto *Str = new llvm::GlobalVariable(
- M, StrPtrInit->getType(), /*isConstant=*/true,
- llvm::GlobalValue::InternalLinkage, StrPtrInit, StringName);
- Str->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
-
- llvm::Constant *Data[] = {
- llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(ID, CGM.VoidPtrTy),
- llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(Str, CGM.Int8PtrTy),
- llvm::ConstantInt::get(CGM.SizeTy, Size),
- llvm::ConstantInt::get(CGM.Int32Ty, Flags),
- llvm::ConstantInt::get(CGM.Int32Ty, 0)};
- std::string EntryName = getName({"omp_offloading", "entry", ""});
- llvm::GlobalVariable *Entry = createGlobalStruct(
- CGM, getTgtOffloadEntryQTy(), /*IsConstant=*/true, Data,
- Twine(EntryName).concat(Name), llvm::GlobalValue::WeakAnyLinkage);
-
- // The entry has to be created in the section the linker expects it to be.
- Entry->setSection("omp_offloading_entries");
+ OMPBuilder.emitOffloadingEntry(ID, Addr->getName(), Size, Flags);
}
void CGOpenMPRuntime::createOffloadEntriesAndInfoMetadata() {
@@ -3323,6 +3259,13 @@ void CGOpenMPRuntime::createOffloadEntriesAndInfoMetadata() {
}
break;
}
+
+ // Hidden or internal symbols on the device are not externally visible. We
+ // should not attempt to register them by creating an offloading entry.
+ if (auto *GV = dyn_cast<llvm::GlobalValue>(CE->getAddress()))
+ if (GV->hasLocalLinkage() || GV->hasHiddenVisibility())
+ continue;
+
createOffloadEntry(CE->getAddress(), CE->getAddress(),
CE->getVarSize().getQuantity(), Flags,
CE->getLinkage());
@@ -3413,35 +3356,6 @@ void CGOpenMPRuntime::emitKmpRoutineEntryT(QualType KmpInt32Ty) {
}
}
-QualType CGOpenMPRuntime::getTgtOffloadEntryQTy() {
- // Make sure the type of the entry is already created. This is the type we
- // have to create:
- // struct __tgt_offload_entry{
- // void *addr; // Pointer to the offload entry info.
- // // (function or global)
- // char *name; // Name of the function or global.
- // size_t size; // Size of the entry info (0 if it a function).
- // int32_t flags; // Flags associated with the entry, e.g. 'link'.
- // int32_t reserved; // Reserved, to use by the runtime library.
- // };
- if (TgtOffloadEntryQTy.isNull()) {
- ASTContext &C = CGM.getContext();
- RecordDecl *RD = C.buildImplicitRecord("__tgt_offload_entry");
- RD->startDefinition();
- addFieldToRecordDecl(C, RD, C.VoidPtrTy);
- addFieldToRecordDecl(C, RD, C.getPointerType(C.CharTy));
- addFieldToRecordDecl(C, RD, C.getSizeType());
- addFieldToRecordDecl(
- C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
- addFieldToRecordDecl(
- C, RD, C.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/true));
- RD->completeDefinition();
- RD->addAttr(PackedAttr::CreateImplicit(C));
- TgtOffloadEntryQTy = C.getRecordType(RD);
- }
- return TgtOffloadEntryQTy;
-}
-
namespace {
struct PrivateHelpersTy {
PrivateHelpersTy(const Expr *OriginalRef, const VarDecl *Original,
@@ -3642,12 +3556,12 @@ emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc,
PrivatesParam = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
}
- llvm::Value *CommonArgs[] = {GtidParam, PartidParam, PrivatesParam,
- TaskPrivatesMap,
- CGF.Builder
- .CreatePointerBitCastOrAddrSpaceCast(
- TDBase.getAddress(CGF), CGF.VoidPtrTy)
- .getPointer()};
+ llvm::Value *CommonArgs[] = {
+ GtidParam, PartidParam, PrivatesParam, TaskPrivatesMap,
+ CGF.Builder
+ .CreatePointerBitCastOrAddrSpaceCast(TDBase.getAddress(CGF),
+ CGF.VoidPtrTy, CGF.Int8Ty)
+ .getPointer()};
SmallVector<llvm::Value *, 16> CallArgs(std::begin(CommonArgs),
std::end(CommonArgs));
if (isOpenMPTaskLoopDirective(Kind)) {
@@ -3867,7 +3781,8 @@ static void emitPrivatesInit(CodeGenFunction &CGF,
(IsTargetTask && KmpTaskSharedsPtr.isValid())) {
SrcBase = CGF.MakeAddrLValue(
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- KmpTaskSharedsPtr, CGF.ConvertTypeForMem(SharedsPtrTy)),
+ KmpTaskSharedsPtr, CGF.ConvertTypeForMem(SharedsPtrTy),
+ CGF.ConvertTypeForMem(SharedsTy)),
SharedsTy);
}
FI = cast<RecordDecl>(FI->getType()->getAsTagDecl())->field_begin();
@@ -3903,8 +3818,8 @@ static void emitPrivatesInit(CodeGenFunction &CGF,
} else if (ForDup) {
SharedRefLValue = CGF.EmitLValueForField(SrcBase, SharedField);
SharedRefLValue = CGF.MakeAddrLValue(
- Address(SharedRefLValue.getPointer(CGF),
- C.getDeclAlign(OriginalVD)),
+ SharedRefLValue.getAddress(CGF).withAlignment(
+ C.getDeclAlign(OriginalVD)),
SharedRefLValue.getType(), LValueBaseInfo(AlignmentSource::Decl),
SharedRefLValue.getTBAAInfo());
} else if (CGF.LambdaCaptureFields.count(
@@ -3933,8 +3848,7 @@ static void emitPrivatesInit(CodeGenFunction &CGF,
Address SrcElement) {
// Clean up any temporaries needed by the initialization.
CodeGenFunction::OMPPrivateScope InitScope(CGF);
- InitScope.addPrivate(
- Elem, [SrcElement]() -> Address { return SrcElement; });
+ InitScope.addPrivate(Elem, SrcElement);
(void)InitScope.Privatize();
// Emit initialization for single element.
CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(
@@ -3946,9 +3860,7 @@ static void emitPrivatesInit(CodeGenFunction &CGF,
}
} else {
CodeGenFunction::OMPPrivateScope InitScope(CGF);
- InitScope.addPrivate(Elem, [SharedRefLValue, &CGF]() -> Address {
- return SharedRefLValue.getAddress(CGF);
- });
+ InitScope.addPrivate(Elem, SharedRefLValue.getAddress(CGF));
(void)InitScope.Privatize();
CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CapturesInfo);
CGF.EmitExprAsInit(Init, VD, PrivateLValue,
@@ -3971,7 +3883,7 @@ static bool checkInitIsRequired(CodeGenFunction &CGF,
continue;
const VarDecl *VD = Pair.second.PrivateCopy;
const Expr *Init = VD->getAnyInitializer();
- InitRequired = InitRequired || (Init && isa<CXXConstructExpr>(Init) &&
+ InitRequired = InitRequired || (isa_and_nonnull<CXXConstructExpr>(Init) &&
!CGF.isTrivialInitializer(Init));
if (InitRequired)
break;
@@ -4051,7 +3963,7 @@ emitTaskDupFunction(CodeGenModule &CGM, SourceLocation Loc,
Base, *std::next(KmpTaskTQTyRD->field_begin(),
KmpTaskTShareds)),
Loc),
- CGM.getNaturalTypeAlignment(SharedsTy));
+ CGF.Int8Ty, CGM.getNaturalTypeAlignment(SharedsTy));
}
emitPrivatesInit(CGF, D, KmpTaskSharedsPtr, TDBase, KmpTaskTWithPrivatesQTyRD,
SharedsTy, SharedsPtrTy, Data, Privates, /*ForDup=*/true);
@@ -4094,14 +4006,11 @@ public:
for (unsigned I = 0, End = E->numOfIterators(); I < End; ++I) {
Uppers.push_back(CGF.EmitScalarExpr(E->getHelper(I).Upper));
const auto *VD = cast<VarDecl>(E->getIteratorDecl(I));
- addPrivate(VD, [&CGF, VD]() {
- return CGF.CreateMemTemp(VD->getType(), VD->getName());
- });
+ addPrivate(VD, CGF.CreateMemTemp(VD->getType(), VD->getName()));
const OMPIteratorHelperData &HelperData = E->getHelper(I);
- addPrivate(HelperData.CounterVD, [&CGF, &HelperData]() {
- return CGF.CreateMemTemp(HelperData.CounterVD->getType(),
- "counter.addr");
- });
+ addPrivate(
+ HelperData.CounterVD,
+ CGF.CreateMemTemp(HelperData.CounterVD->getType(), "counter.addr"));
}
Privatize();
@@ -4534,13 +4443,13 @@ CGOpenMPRuntime::emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
// Copy shareds if there are any.
Address KmpTaskSharedsPtr = Address::invalid();
if (!SharedsTy->getAsStructureType()->getDecl()->field_empty()) {
- KmpTaskSharedsPtr =
- Address(CGF.EmitLoadOfScalar(
- CGF.EmitLValueForField(
- TDBase, *std::next(KmpTaskTQTyRD->field_begin(),
- KmpTaskTShareds)),
- Loc),
- CGM.getNaturalTypeAlignment(SharedsTy));
+ KmpTaskSharedsPtr = Address(
+ CGF.EmitLoadOfScalar(
+ CGF.EmitLValueForField(
+ TDBase,
+ *std::next(KmpTaskTQTyRD->field_begin(), KmpTaskTShareds)),
+ Loc),
+ CGF.Int8Ty, CGM.getNaturalTypeAlignment(SharedsTy));
LValue Dest = CGF.MakeAddrLValue(KmpTaskSharedsPtr, SharedsTy);
LValue Src = CGF.MakeAddrLValue(Shareds, SharedsTy);
CGF.EmitAggregateCopy(Dest, Src, SharedsTy, AggValueSlot::DoesNotOverlap);
@@ -4597,7 +4506,9 @@ namespace {
enum RTLDependenceKindTy {
DepIn = 0x01,
DepInOut = 0x3,
- DepMutexInOutSet = 0x4
+ DepMutexInOutSet = 0x4,
+ DepInOutSet = 0x8,
+ DepOmpAllMem = 0x80,
};
/// Fields ids in kmp_depend_info record.
enum RTLDependInfoFieldsTy { BaseAddr, Len, Flags };
@@ -4618,9 +4529,16 @@ static RTLDependenceKindTy translateDependencyKind(OpenMPDependClauseKind K) {
case OMPC_DEPEND_mutexinoutset:
DepKind = DepMutexInOutSet;
break;
+ case OMPC_DEPEND_inoutset:
+ DepKind = DepInOutSet;
+ break;
+ case OMPC_DEPEND_outallmemory:
+ DepKind = DepOmpAllMem;
+ break;
case OMPC_DEPEND_source:
case OMPC_DEPEND_sink:
case OMPC_DEPEND_depobj:
+ case OMPC_DEPEND_inoutallmemory:
case OMPC_DEPEND_unknown:
llvm_unreachable("Unknown task dependence type");
}
@@ -4650,16 +4568,15 @@ CGOpenMPRuntime::getDepobjElements(CodeGenFunction &CGF, LValue DepobjLVal,
getDependTypes(C, KmpDependInfoTy, FlagsTy);
RecordDecl *KmpDependInfoRD =
cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
- LValue Base = CGF.EmitLoadOfPointerLValue(
- DepobjLVal.getAddress(CGF),
- C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
- Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- Base.getAddress(CGF), CGF.ConvertTypeForMem(KmpDependInfoPtrTy));
- Base = CGF.MakeAddrLValue(Addr, KmpDependInfoTy, Base.getBaseInfo(),
- Base.getTBAAInfo());
+ LValue Base = CGF.EmitLoadOfPointerLValue(
+ CGF.Builder.CreateElementBitCast(
+ DepobjLVal.getAddress(CGF),
+ CGF.ConvertTypeForMem(KmpDependInfoPtrTy)),
+ KmpDependInfoPtrTy->castAs<PointerType>());
Address DepObjAddr = CGF.Builder.CreateGEP(
- Addr, llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
+ Base.getAddress(CGF),
+ llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
LValue NumDepsBase = CGF.MakeAddrLValue(
DepObjAddr, KmpDependInfoTy, Base.getBaseInfo(), Base.getTBAAInfo());
// NumDeps = deps[i].base_addr;
@@ -4688,12 +4605,21 @@ static void emitDependData(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
for (const Expr *E : Data.DepExprs) {
llvm::Value *Addr;
llvm::Value *Size;
- std::tie(Addr, Size) = getPointerAndSize(CGF, E);
+
+ // The expression will be a nullptr in the 'omp_all_memory' case.
+ if (E) {
+ std::tie(Addr, Size) = getPointerAndSize(CGF, E);
+ Addr = CGF.Builder.CreatePtrToInt(Addr, CGF.IntPtrTy);
+ } else {
+ Addr = llvm::ConstantInt::get(CGF.IntPtrTy, 0);
+ Size = llvm::ConstantInt::get(CGF.SizeTy, 0);
+ }
LValue Base;
if (unsigned *P = Pos.dyn_cast<unsigned *>()) {
Base = CGF.MakeAddrLValue(
CGF.Builder.CreateConstGEP(DependenciesArray, *P), KmpDependInfoTy);
} else {
+ assert(E && "Expected a non-null expression");
LValue &PosLVal = *Pos.get<LValue *>();
llvm::Value *Idx = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
Base = CGF.MakeAddrLValue(
@@ -4702,8 +4628,7 @@ static void emitDependData(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
// deps[i].base_addr = &<Dependencies[i].second>;
LValue BaseAddrLVal = CGF.EmitLValueForField(
Base, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
- CGF.EmitStoreOfScalar(CGF.Builder.CreatePtrToInt(Addr, CGF.IntPtrTy),
- BaseAddrLVal);
+ CGF.EmitStoreOfScalar(Addr, BaseAddrLVal);
// deps[i].len = sizeof(<Dependencies[i].second>);
LValue LenLVal = CGF.EmitLValueForField(
Base, *std::next(KmpDependInfoRD->field_begin(), Len));
@@ -4726,43 +4651,25 @@ static void emitDependData(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
}
}
-static SmallVector<llvm::Value *, 4>
-emitDepobjElementsSizes(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
- const OMPTaskDataTy::DependData &Data) {
+SmallVector<llvm::Value *, 4> CGOpenMPRuntime::emitDepobjElementsSizes(
+ CodeGenFunction &CGF, QualType &KmpDependInfoTy,
+ const OMPTaskDataTy::DependData &Data) {
assert(Data.DepKind == OMPC_DEPEND_depobj &&
"Expected depobj dependecy kind.");
SmallVector<llvm::Value *, 4> Sizes;
SmallVector<LValue, 4> SizeLVals;
ASTContext &C = CGF.getContext();
- QualType FlagsTy;
- getDependTypes(C, KmpDependInfoTy, FlagsTy);
- RecordDecl *KmpDependInfoRD =
- cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
- QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
- llvm::Type *KmpDependInfoPtrT = CGF.ConvertTypeForMem(KmpDependInfoPtrTy);
{
OMPIteratorGeneratorScope IteratorScope(
CGF, cast_or_null<OMPIteratorExpr>(
Data.IteratorExpr ? Data.IteratorExpr->IgnoreParenImpCasts()
: nullptr));
for (const Expr *E : Data.DepExprs) {
+ llvm::Value *NumDeps;
+ LValue Base;
LValue DepobjLVal = CGF.EmitLValue(E->IgnoreParenImpCasts());
- LValue Base = CGF.EmitLoadOfPointerLValue(
- DepobjLVal.getAddress(CGF),
- C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
- Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- Base.getAddress(CGF), KmpDependInfoPtrT);
- Base = CGF.MakeAddrLValue(Addr, KmpDependInfoTy, Base.getBaseInfo(),
- Base.getTBAAInfo());
- Address DepObjAddr = CGF.Builder.CreateGEP(
- Addr, llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
- LValue NumDepsBase = CGF.MakeAddrLValue(
- DepObjAddr, KmpDependInfoTy, Base.getBaseInfo(), Base.getTBAAInfo());
- // NumDeps = deps[i].base_addr;
- LValue BaseAddrLVal = CGF.EmitLValueForField(
- NumDepsBase, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
- llvm::Value *NumDeps =
- CGF.EmitLoadOfScalar(BaseAddrLVal, E->getExprLoc());
+ std::tie(NumDeps, Base) =
+ getDepobjElements(CGF, DepobjLVal, E->getExprLoc());
LValue NumLVal = CGF.MakeAddrLValue(
CGF.CreateMemTemp(C.getUIntPtrType(), "depobj.size.addr"),
C.getUIntPtrType());
@@ -4782,19 +4689,13 @@ emitDepobjElementsSizes(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
return Sizes;
}
-static void emitDepobjElements(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
- LValue PosLVal,
- const OMPTaskDataTy::DependData &Data,
- Address DependenciesArray) {
+void CGOpenMPRuntime::emitDepobjElements(CodeGenFunction &CGF,
+ QualType &KmpDependInfoTy,
+ LValue PosLVal,
+ const OMPTaskDataTy::DependData &Data,
+ Address DependenciesArray) {
assert(Data.DepKind == OMPC_DEPEND_depobj &&
"Expected depobj dependecy kind.");
- ASTContext &C = CGF.getContext();
- QualType FlagsTy;
- getDependTypes(C, KmpDependInfoTy, FlagsTy);
- RecordDecl *KmpDependInfoRD =
- cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
- QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
- llvm::Type *KmpDependInfoPtrT = CGF.ConvertTypeForMem(KmpDependInfoPtrTy);
llvm::Value *ElSize = CGF.getTypeSize(KmpDependInfoTy);
{
OMPIteratorGeneratorScope IteratorScope(
@@ -4803,25 +4704,11 @@ static void emitDepobjElements(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
: nullptr));
for (unsigned I = 0, End = Data.DepExprs.size(); I < End; ++I) {
const Expr *E = Data.DepExprs[I];
+ llvm::Value *NumDeps;
+ LValue Base;
LValue DepobjLVal = CGF.EmitLValue(E->IgnoreParenImpCasts());
- LValue Base = CGF.EmitLoadOfPointerLValue(
- DepobjLVal.getAddress(CGF),
- C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
- Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- Base.getAddress(CGF), KmpDependInfoPtrT);
- Base = CGF.MakeAddrLValue(Addr, KmpDependInfoTy, Base.getBaseInfo(),
- Base.getTBAAInfo());
-
- // Get number of elements in a single depobj.
- Address DepObjAddr = CGF.Builder.CreateGEP(
- Addr, llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
- LValue NumDepsBase = CGF.MakeAddrLValue(
- DepObjAddr, KmpDependInfoTy, Base.getBaseInfo(), Base.getTBAAInfo());
- // NumDeps = deps[i].base_addr;
- LValue BaseAddrLVal = CGF.EmitLValueForField(
- NumDepsBase, *std::next(KmpDependInfoRD->field_begin(), BaseAddr));
- llvm::Value *NumDeps =
- CGF.EmitLoadOfScalar(BaseAddrLVal, E->getExprLoc());
+ std::tie(NumDeps, Base) =
+ getDepobjElements(CGF, DepobjLVal, E->getExprLoc());
// memcopy dependency data.
llvm::Value *Size = CGF.Builder.CreateNUWMul(
@@ -4959,7 +4846,7 @@ std::pair<llvm::Value *, Address> CGOpenMPRuntime::emitDependClause(
}
}
DependenciesArray = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- DependenciesArray, CGF.VoidPtrTy);
+ DependenciesArray, CGF.VoidPtrTy, CGF.Int8Ty);
return std::make_pair(NumOfElements, DependenciesArray);
}
@@ -5018,9 +4905,10 @@ Address CGOpenMPRuntime::emitDepobjDependClause(
CGF.EmitRuntimeCall(OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_alloc),
Args, ".dep.arr.addr");
+ llvm::Type *KmpDependInfoLlvmTy = CGF.ConvertTypeForMem(KmpDependInfoTy);
Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- Addr, CGF.ConvertTypeForMem(KmpDependInfoTy)->getPointerTo());
- DependenciesArray = Address(Addr, Align);
+ Addr, KmpDependInfoLlvmTy->getPointerTo());
+ DependenciesArray = Address(Addr, KmpDependInfoLlvmTy, Align);
// Write number of elements in the first element of array for depobj.
LValue Base = CGF.MakeAddrLValue(DependenciesArray, KmpDependInfoTy);
// deps[i].base_addr = NumDependencies;
@@ -5042,7 +4930,8 @@ Address CGOpenMPRuntime::emitDepobjDependClause(
}
emitDependData(CGF, KmpDependInfoTy, Pos, Dependencies, DependenciesArray);
DependenciesArray = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.Builder.CreateConstGEP(DependenciesArray, 1), CGF.VoidPtrTy);
+ CGF.Builder.CreateConstGEP(DependenciesArray, 1), CGF.VoidPtrTy,
+ CGF.Int8Ty);
return DependenciesArray;
}
@@ -5052,11 +4941,11 @@ void CGOpenMPRuntime::emitDestroyClause(CodeGenFunction &CGF, LValue DepobjLVal,
QualType FlagsTy;
getDependTypes(C, KmpDependInfoTy, FlagsTy);
LValue Base = CGF.EmitLoadOfPointerLValue(
- DepobjLVal.getAddress(CGF),
- C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
+ DepobjLVal.getAddress(CGF), C.VoidPtrTy.castAs<PointerType>());
QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- Base.getAddress(CGF), CGF.ConvertTypeForMem(KmpDependInfoPtrTy));
+ Base.getAddress(CGF), CGF.ConvertTypeForMem(KmpDependInfoPtrTy),
+ CGF.ConvertTypeForMem(KmpDependInfoTy));
llvm::Value *DepObjAddr = CGF.Builder.CreateGEP(
Addr.getElementType(), Addr.getPointer(),
llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
@@ -5098,7 +4987,7 @@ void CGOpenMPRuntime::emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal,
llvm::PHINode *ElementPHI =
CGF.Builder.CreatePHI(Begin.getType(), 2, "omp.elementPast");
ElementPHI->addIncoming(Begin.getPointer(), EntryBB);
- Begin = Address(ElementPHI, Begin.getAlignment());
+ Begin = Begin.withPointer(ElementPHI);
Base = CGF.MakeAddrLValue(Begin, KmpDependInfoTy, Base.getBaseInfo(),
Base.getTBAAInfo());
// deps[i].flags = NewDepKind;
@@ -5370,21 +5259,21 @@ static void EmitOMPAggregateReduction(
llvm::PHINode *RHSElementPHI = CGF.Builder.CreatePHI(
RHSBegin->getType(), 2, "omp.arraycpy.srcElementPast");
RHSElementPHI->addIncoming(RHSBegin, EntryBB);
- Address RHSElementCurrent =
- Address(RHSElementPHI,
- RHSAddr.getAlignment().alignmentOfArrayElement(ElementSize));
+ Address RHSElementCurrent(
+ RHSElementPHI, RHSAddr.getElementType(),
+ RHSAddr.getAlignment().alignmentOfArrayElement(ElementSize));
llvm::PHINode *LHSElementPHI = CGF.Builder.CreatePHI(
LHSBegin->getType(), 2, "omp.arraycpy.destElementPast");
LHSElementPHI->addIncoming(LHSBegin, EntryBB);
- Address LHSElementCurrent =
- Address(LHSElementPHI,
- LHSAddr.getAlignment().alignmentOfArrayElement(ElementSize));
+ Address LHSElementCurrent(
+ LHSElementPHI, LHSAddr.getElementType(),
+ LHSAddr.getAlignment().alignmentOfArrayElement(ElementSize));
// Emit copy.
CodeGenFunction::OMPPrivateScope Scope(CGF);
- Scope.addPrivate(LHSVar, [=]() { return LHSElementCurrent; });
- Scope.addPrivate(RHSVar, [=]() { return RHSElementCurrent; });
+ Scope.addPrivate(LHSVar, LHSElementCurrent);
+ Scope.addPrivate(RHSVar, RHSElementCurrent);
Scope.Privatize();
RedOpGen(CGF, XExpr, EExpr, UpExpr);
Scope.ForceCleanup();
@@ -5429,9 +5318,9 @@ static void emitReductionCombiner(CodeGenFunction &CGF,
}
llvm::Function *CGOpenMPRuntime::emitReductionFunction(
- SourceLocation Loc, llvm::Type *ArgsType, ArrayRef<const Expr *> Privates,
- ArrayRef<const Expr *> LHSExprs, ArrayRef<const Expr *> RHSExprs,
- ArrayRef<const Expr *> ReductionOps) {
+ SourceLocation Loc, llvm::Type *ArgsElemType,
+ ArrayRef<const Expr *> Privates, ArrayRef<const Expr *> LHSExprs,
+ ArrayRef<const Expr *> RHSExprs, ArrayRef<const Expr *> ReductionOps) {
ASTContext &C = CGM.getContext();
// void reduction_func(void *LHSArg, void *RHSArg);
@@ -5456,29 +5345,27 @@ llvm::Function *CGOpenMPRuntime::emitReductionFunction(
// Dst = (void*[n])(LHSArg);
// Src = (void*[n])(RHSArg);
Address LHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
- ArgsType), CGF.getPointerAlign());
+ CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&LHSArg)),
+ ArgsElemType->getPointerTo()),
+ ArgsElemType, CGF.getPointerAlign());
Address RHS(CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
- ArgsType), CGF.getPointerAlign());
+ CGF.Builder.CreateLoad(CGF.GetAddrOfLocalVar(&RHSArg)),
+ ArgsElemType->getPointerTo()),
+ ArgsElemType, CGF.getPointerAlign());
// ...
// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
// ...
CodeGenFunction::OMPPrivateScope Scope(CGF);
- auto IPriv = Privates.begin();
+ const auto *IPriv = Privates.begin();
unsigned Idx = 0;
for (unsigned I = 0, E = ReductionOps.size(); I < E; ++I, ++IPriv, ++Idx) {
const auto *RHSVar =
cast<VarDecl>(cast<DeclRefExpr>(RHSExprs[I])->getDecl());
- Scope.addPrivate(RHSVar, [&CGF, RHS, Idx, RHSVar]() {
- return emitAddrOfVarFromArray(CGF, RHS, Idx, RHSVar);
- });
+ Scope.addPrivate(RHSVar, emitAddrOfVarFromArray(CGF, RHS, Idx, RHSVar));
const auto *LHSVar =
cast<VarDecl>(cast<DeclRefExpr>(LHSExprs[I])->getDecl());
- Scope.addPrivate(LHSVar, [&CGF, LHS, Idx, LHSVar]() {
- return emitAddrOfVarFromArray(CGF, LHS, Idx, LHSVar);
- });
+ Scope.addPrivate(LHSVar, emitAddrOfVarFromArray(CGF, LHS, Idx, LHSVar));
QualType PrivTy = (*IPriv)->getType();
if (PrivTy->isVariablyModifiedType()) {
// Get array size and emit VLA type.
@@ -5495,8 +5382,8 @@ llvm::Function *CGOpenMPRuntime::emitReductionFunction(
}
Scope.Privatize();
IPriv = Privates.begin();
- auto ILHS = LHSExprs.begin();
- auto IRHS = RHSExprs.begin();
+ const auto *ILHS = LHSExprs.begin();
+ const auto *IRHS = RHSExprs.begin();
for (const Expr *E : ReductionOps) {
if ((*IPriv)->getType()->isArrayType()) {
// Emit reduction for array section.
@@ -5591,9 +5478,9 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
if (SimpleReduction) {
CodeGenFunction::RunCleanupsScope Scope(CGF);
- auto IPriv = Privates.begin();
- auto ILHS = LHSExprs.begin();
- auto IRHS = RHSExprs.begin();
+ const auto *IPriv = Privates.begin();
+ const auto *ILHS = LHSExprs.begin();
+ const auto *IRHS = RHSExprs.begin();
for (const Expr *E : ReductionOps) {
emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
cast<DeclRefExpr>(*IRHS));
@@ -5618,7 +5505,7 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
/*IndexTypeQuals=*/0);
Address ReductionList =
CGF.CreateMemTemp(ReductionArrayTy, ".omp.reduction.red_list");
- auto IPriv = Privates.begin();
+ const auto *IPriv = Privates.begin();
unsigned Idx = 0;
for (unsigned I = 0, E = RHSExprs.size(); I < E; ++I, ++IPriv, ++Idx) {
Address Elem = CGF.Builder.CreateConstArrayGEP(ReductionList, Idx);
@@ -5641,9 +5528,9 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
}
// 2. Emit reduce_func().
- llvm::Function *ReductionFn = emitReductionFunction(
- Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(), Privates,
- LHSExprs, RHSExprs, ReductionOps);
+ llvm::Function *ReductionFn =
+ emitReductionFunction(Loc, CGF.ConvertTypeForMem(ReductionArrayTy),
+ Privates, LHSExprs, RHSExprs, ReductionOps);
// 3. Create static kmp_critical_name lock = { 0 };
std::string Name = getName({"reduction"});
@@ -5695,9 +5582,9 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
auto &&CodeGen = [Privates, LHSExprs, RHSExprs, ReductionOps](
CodeGenFunction &CGF, PrePostActionTy &Action) {
CGOpenMPRuntime &RT = CGF.CGM.getOpenMPRuntime();
- auto IPriv = Privates.begin();
- auto ILHS = LHSExprs.begin();
- auto IRHS = RHSExprs.begin();
+ const auto *IPriv = Privates.begin();
+ const auto *ILHS = LHSExprs.begin();
+ const auto *IRHS = RHSExprs.begin();
for (const Expr *E : ReductionOps) {
RT.emitSingleReductionCombiner(CGF, E, *IPriv, cast<DeclRefExpr>(*ILHS),
cast<DeclRefExpr>(*IRHS));
@@ -5729,9 +5616,9 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
auto &&AtomicCodeGen = [Loc, Privates, LHSExprs, RHSExprs, ReductionOps](
CodeGenFunction &CGF, PrePostActionTy &Action) {
- auto ILHS = LHSExprs.begin();
- auto IRHS = RHSExprs.begin();
- auto IPriv = Privates.begin();
+ const auto *ILHS = LHSExprs.begin();
+ const auto *IRHS = RHSExprs.begin();
+ const auto *IPriv = Privates.begin();
for (const Expr *E : ReductionOps) {
const Expr *XExpr = nullptr;
const Expr *EExpr = nullptr;
@@ -5773,14 +5660,11 @@ void CGOpenMPRuntime::emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
llvm::AtomicOrdering::Monotonic, Loc,
[&CGF, UpExpr, VD, Loc](RValue XRValue) {
CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
- PrivateScope.addPrivate(
- VD, [&CGF, VD, XRValue, Loc]() {
- Address LHSTemp = CGF.CreateMemTemp(VD->getType());
- CGF.emitOMPSimpleStore(
- CGF.MakeAddrLValue(LHSTemp, VD->getType()), XRValue,
- VD->getType().getNonReferenceType(), Loc);
- return LHSTemp;
- });
+ Address LHSTemp = CGF.CreateMemTemp(VD->getType());
+ CGF.emitOMPSimpleStore(
+ CGF.MakeAddrLValue(LHSTemp, VD->getType()), XRValue,
+ VD->getType().getNonReferenceType(), Loc);
+ PrivateScope.addPrivate(VD, LHSTemp);
(void)PrivateScope.Privatize();
return CGF.EmitAnyExpr(UpExpr);
});
@@ -5896,9 +5780,12 @@ static llvm::Value *emitReduceInitFunction(CodeGenModule &CGM,
Fn->setDoesNotRecurse();
CodeGenFunction CGF(CGM);
CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
+ QualType PrivateType = RCG.getPrivateType(N);
Address PrivateAddr = CGF.EmitLoadOfPointer(
- CGF.GetAddrOfLocalVar(&Param),
- C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
+ CGF.Builder.CreateElementBitCast(
+ CGF.GetAddrOfLocalVar(&Param),
+ CGF.ConvertTypeForMem(PrivateType)->getPointerTo()),
+ C.getPointerType(PrivateType)->castAs<PointerType>());
llvm::Value *Size = nullptr;
// If the size of the reduction item is non-constant, load it from global
// threadprivate variable.
@@ -5980,22 +5867,22 @@ static llvm::Value *emitReduceCombFunction(CodeGenModule &CGM,
// %lhs = bitcast void* %arg0 to <type>*
// %rhs = bitcast void* %arg1 to <type>*
CodeGenFunction::OMPPrivateScope PrivateScope(CGF);
- PrivateScope.addPrivate(LHSVD, [&C, &CGF, &ParamInOut, LHSVD]() {
- // Pull out the pointer to the variable.
- Address PtrAddr = CGF.EmitLoadOfPointer(
- CGF.GetAddrOfLocalVar(&ParamInOut),
- C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
- return CGF.Builder.CreateElementBitCast(
- PtrAddr, CGF.ConvertTypeForMem(LHSVD->getType()));
- });
- PrivateScope.addPrivate(RHSVD, [&C, &CGF, &ParamIn, RHSVD]() {
- // Pull out the pointer to the variable.
- Address PtrAddr = CGF.EmitLoadOfPointer(
- CGF.GetAddrOfLocalVar(&ParamIn),
- C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
- return CGF.Builder.CreateElementBitCast(
- PtrAddr, CGF.ConvertTypeForMem(RHSVD->getType()));
- });
+ PrivateScope.addPrivate(
+ LHSVD,
+ // Pull out the pointer to the variable.
+ CGF.EmitLoadOfPointer(
+ CGF.Builder.CreateElementBitCast(
+ CGF.GetAddrOfLocalVar(&ParamInOut),
+ CGF.ConvertTypeForMem(LHSVD->getType())->getPointerTo()),
+ C.getPointerType(LHSVD->getType())->castAs<PointerType>()));
+ PrivateScope.addPrivate(
+ RHSVD,
+ // Pull out the pointer to the variable.
+ CGF.EmitLoadOfPointer(
+ CGF.Builder.CreateElementBitCast(
+ CGF.GetAddrOfLocalVar(&ParamIn),
+ CGF.ConvertTypeForMem(RHSVD->getType())->getPointerTo()),
+ C.getPointerType(RHSVD->getType())->castAs<PointerType>()));
PrivateScope.Privatize();
// Emit the combiner body:
// %2 = <ReductionOp>(<type> *%lhs, <type> *%rhs)
@@ -6036,8 +5923,7 @@ static llvm::Value *emitReduceFiniFunction(CodeGenModule &CGM,
CodeGenFunction CGF(CGM);
CGF.StartFunction(GlobalDecl(), C.VoidTy, Fn, FnInfo, Args, Loc, Loc);
Address PrivateAddr = CGF.EmitLoadOfPointer(
- CGF.GetAddrOfLocalVar(&Param),
- C.getPointerType(C.VoidPtrTy).castAs<PointerType>());
+ CGF.GetAddrOfLocalVar(&Param), C.VoidPtrTy.castAs<PointerType>());
llvm::Value *Size = nullptr;
// If the size of the reduction item is non-constant, load it from global
// threadprivate variable.
@@ -6237,7 +6123,7 @@ Address CGOpenMPRuntime::getTaskReductionItem(CodeGenFunction &CGF,
OMPBuilder.getOrCreateRuntimeFunction(
CGM.getModule(), OMPRTL___kmpc_task_reduction_get_th_data),
Args),
- SharedLVal.getAlignment());
+ CGF.Int8Ty, SharedLVal.getAlignment());
}
void CGOpenMPRuntime::emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc,
@@ -6478,7 +6364,7 @@ void CGOpenMPRuntime::emitUsesAllocatorsInit(CodeGenFunction &CGF,
.getLimitedValue());
LValue AllocatorTraitsLVal = CGF.EmitLValue(AllocatorTraits);
Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- AllocatorTraitsLVal.getAddress(CGF), CGF.VoidPtrPtrTy);
+ AllocatorTraitsLVal.getAddress(CGF), CGF.VoidPtrPtrTy, CGF.VoidPtrTy);
AllocatorTraitsLVal = CGF.MakeAddrLValue(Addr, CGF.getContext().VoidPtrTy,
AllocatorTraitsLVal.getBaseInfo(),
AllocatorTraitsLVal.getTBAAInfo());
@@ -6528,6 +6414,8 @@ void CGOpenMPRuntime::emitTargetOutlinedFunctionHelper(
// mangled name of the function that encloses the target region and BB is the
// line number of the target region.
+ const bool BuildOutlinedFn = CGM.getLangOpts().OpenMPIsDevice ||
+ !CGM.getLangOpts().OpenMPOffloadMandatory;
unsigned DeviceID;
unsigned FileID;
unsigned Line;
@@ -6546,7 +6434,8 @@ void CGOpenMPRuntime::emitTargetOutlinedFunctionHelper(
CGOpenMPTargetRegionInfo CGInfo(CS, CodeGen, EntryFnName);
CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGInfo);
- OutlinedFn = CGF.GenerateOpenMPCapturedStmtFunction(CS, D.getBeginLoc());
+ if (BuildOutlinedFn)
+ OutlinedFn = CGF.GenerateOpenMPCapturedStmtFunction(CS, D.getBeginLoc());
// If this target outline function is not an offload entry, we don't need to
// register it.
@@ -6566,7 +6455,7 @@ void CGOpenMPRuntime::emitTargetOutlinedFunctionHelper(
if (CGM.getLangOpts().OpenMPIsDevice) {
OutlinedFnID = llvm::ConstantExpr::getBitCast(OutlinedFn, CGM.Int8PtrTy);
- OutlinedFn->setLinkage(llvm::GlobalValue::WeakAnyLinkage);
+ OutlinedFn->setLinkage(llvm::GlobalValue::WeakODRLinkage);
OutlinedFn->setDSOLocal(false);
if (CGM.getTriple().isAMDGCN())
OutlinedFn->setCallingConv(llvm::CallingConv::AMDGPU_KERNEL);
@@ -6578,26 +6467,38 @@ void CGOpenMPRuntime::emitTargetOutlinedFunctionHelper(
llvm::Constant::getNullValue(CGM.Int8Ty), Name);
}
+ // If we do not allow host fallback we still need a named address to use.
+ llvm::Constant *TargetRegionEntryAddr = OutlinedFn;
+ if (!BuildOutlinedFn) {
+ assert(!CGM.getModule().getGlobalVariable(EntryFnName, true) &&
+ "Named kernel already exists?");
+ TargetRegionEntryAddr = new llvm::GlobalVariable(
+ CGM.getModule(), CGM.Int8Ty, /*isConstant=*/true,
+ llvm::GlobalValue::InternalLinkage,
+ llvm::Constant::getNullValue(CGM.Int8Ty), EntryFnName);
+ }
+
// Register the information for the entry associated with this target region.
OffloadEntriesInfoManager.registerTargetRegionEntryInfo(
- DeviceID, FileID, ParentName, Line, OutlinedFn, OutlinedFnID,
+ DeviceID, FileID, ParentName, Line, TargetRegionEntryAddr, OutlinedFnID,
OffloadEntriesInfoManagerTy::OMPTargetRegionEntryTargetRegion);
// Add NumTeams and ThreadLimit attributes to the outlined GPU function
int32_t DefaultValTeams = -1;
getNumTeamsExprForTargetDirective(CGF, D, DefaultValTeams);
- if (DefaultValTeams > 0) {
+ if (DefaultValTeams > 0 && OutlinedFn) {
OutlinedFn->addFnAttr("omp_target_num_teams",
std::to_string(DefaultValTeams));
}
int32_t DefaultValThreads = -1;
getNumThreadsExprForTargetDirective(CGF, D, DefaultValThreads);
- if (DefaultValThreads > 0) {
+ if (DefaultValThreads > 0 && OutlinedFn) {
OutlinedFn->addFnAttr("omp_target_thread_limit",
std::to_string(DefaultValThreads));
}
- CGM.getTargetCodeGenInfo().setTargetAttributes(nullptr, OutlinedFn, CGM);
+ if (BuildOutlinedFn)
+ CGM.getTargetCodeGenInfo().setTargetAttributes(nullptr, OutlinedFn, CGM);
}
/// Checks if the expression is constant or does not have non-trivial function
@@ -7873,6 +7774,7 @@ private:
isa<CXXThisExpr>(OAShE->getBase()->IgnoreParenCasts())) {
BP = Address(
CGF.EmitScalarExpr(OAShE->getBase()),
+ CGF.ConvertTypeForMem(OAShE->getBase()->getType()->getPointeeType()),
CGF.getContext().getTypeAlignInChars(OAShE->getBase()->getType()));
} else {
// The base is the reference to the variable.
@@ -8042,9 +7944,12 @@ private:
return BaseLV;
};
if (OAShE) {
- LowestElem = LB = Address(CGF.EmitScalarExpr(OAShE->getBase()),
- CGF.getContext().getTypeAlignInChars(
- OAShE->getBase()->getType()));
+ LowestElem = LB =
+ Address(CGF.EmitScalarExpr(OAShE->getBase()),
+ CGF.ConvertTypeForMem(
+ OAShE->getBase()->getType()->getPointeeType()),
+ CGF.getContext().getTypeAlignInChars(
+ OAShE->getBase()->getType()));
} else if (IsMemberReference) {
const auto *ME = cast<MemberExpr>(I->getAssociatedExpression());
LValue BaseLVal = EmitMemberExprBase(CGF, ME);
@@ -8080,8 +7985,8 @@ private:
CharUnits TypeSize = CGF.getContext().getTypeSizeInChars(
I->getAssociatedExpression()->getType());
Address HB = CGF.Builder.CreateConstGEP(
- CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(LowestElem,
- CGF.VoidPtrTy),
+ CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ LowestElem, CGF.VoidPtrTy, CGF.Int8Ty),
TypeSize.getQuantity() - 1);
PartialStruct.HighestElem = {
std::numeric_limits<decltype(
@@ -8326,7 +8231,7 @@ private:
}
// Skip the dummy dimension since we have already have its information.
- auto DI = DimSizes.begin() + 1;
+ auto *DI = DimSizes.begin() + 1;
// Product of dimension.
llvm::Value *DimProd =
llvm::ConstantInt::get(CGF.CGM.Int64Ty, ElementTypeSize);
@@ -9037,15 +8942,13 @@ public:
void generateInfoForLambdaCaptures(
const ValueDecl *VD, llvm::Value *Arg, MapCombinedInfoTy &CombinedInfo,
llvm::DenseMap<llvm::Value *, llvm::Value *> &LambdaPointers) const {
- const auto *RD = VD->getType()
- .getCanonicalType()
- .getNonReferenceType()
- ->getAsCXXRecordDecl();
+ QualType VDType = VD->getType().getCanonicalType().getNonReferenceType();
+ const auto *RD = VDType->getAsCXXRecordDecl();
if (!RD || !RD->isLambda())
return;
- Address VDAddr = Address(Arg, CGF.getContext().getDeclAlign(VD));
- LValue VDLVal = CGF.MakeAddrLValue(
- VDAddr, VD->getType().getCanonicalType().getNonReferenceType());
+ Address VDAddr(Arg, CGF.ConvertTypeForMem(VDType),
+ CGF.getContext().getDeclAlign(VD));
+ LValue VDLVal = CGF.MakeAddrLValue(VDAddr, VDType);
llvm::DenseMap<const VarDecl *, FieldDecl *> Captures;
FieldDecl *ThisCapture = nullptr;
RD->getCaptureFields(Captures, ThisCapture);
@@ -9500,11 +9403,11 @@ static void emitNonContiguousDescriptor(
}
// args[I] = &dims
Address DAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- DimsAddr, CGM.Int8PtrTy);
+ DimsAddr, CGM.Int8PtrTy, CGM.Int8Ty);
llvm::Value *P = CGF.Builder.CreateConstInBoundsGEP2_32(
llvm::ArrayType::get(CGM.VoidPtrTy, Info.NumberOfPtrs),
Info.PointersArray, 0, I);
- Address PAddr(P, CGF.getPointerAlign());
+ Address PAddr(P, CGM.VoidPtrTy, CGF.getPointerAlign());
CGF.Builder.CreateStore(DAddr.getPointer(), PAddr);
++L;
}
@@ -9575,12 +9478,6 @@ static void emitOffloadingArrays(
if (Info.NumberOfPtrs) {
// Detect if we have any capture size requiring runtime evaluation of the
// size so that a constant array could be eventually used.
- bool hasRuntimeEvaluationCaptureSize = false;
- for (llvm::Value *S : CombinedInfo.Sizes)
- if (!isa<llvm::Constant>(S)) {
- hasRuntimeEvaluationCaptureSize = true;
- break;
- }
llvm::APInt PointerNumAP(32, Info.NumberOfPtrs, /*isSigned=*/true);
QualType PointerArrayType = Ctx.getConstantArrayType(
@@ -9600,35 +9497,56 @@ static void emitOffloadingArrays(
// need to fill up the arrays as we do for the pointers.
QualType Int64Ty =
Ctx.getIntTypeForBitwidth(/*DestWidth=*/64, /*Signed=*/1);
- if (hasRuntimeEvaluationCaptureSize) {
+ SmallVector<llvm::Constant *> ConstSizes(
+ CombinedInfo.Sizes.size(), llvm::ConstantInt::get(CGF.Int64Ty, 0));
+ llvm::SmallBitVector RuntimeSizes(CombinedInfo.Sizes.size());
+ for (unsigned I = 0, E = CombinedInfo.Sizes.size(); I < E; ++I) {
+ if (auto *CI = dyn_cast<llvm::Constant>(CombinedInfo.Sizes[I])) {
+ if (!isa<llvm::ConstantExpr>(CI) && !isa<llvm::GlobalValue>(CI)) {
+ if (IsNonContiguous && (CombinedInfo.Types[I] &
+ MappableExprsHandler::OMP_MAP_NON_CONTIG))
+ ConstSizes[I] = llvm::ConstantInt::get(
+ CGF.Int64Ty, CombinedInfo.NonContigInfo.Dims[I]);
+ else
+ ConstSizes[I] = CI;
+ continue;
+ }
+ }
+ RuntimeSizes.set(I);
+ }
+
+ if (RuntimeSizes.all()) {
QualType SizeArrayType = Ctx.getConstantArrayType(
Int64Ty, PointerNumAP, nullptr, ArrayType::Normal,
/*IndexTypeQuals=*/0);
Info.SizesArray =
CGF.CreateMemTemp(SizeArrayType, ".offload_sizes").getPointer();
} else {
- // We expect all the sizes to be constant, so we collect them to create
- // a constant array.
- SmallVector<llvm::Constant *, 16> ConstSizes;
- for (unsigned I = 0, E = CombinedInfo.Sizes.size(); I < E; ++I) {
- if (IsNonContiguous &&
- (CombinedInfo.Types[I] & MappableExprsHandler::OMP_MAP_NON_CONTIG)) {
- ConstSizes.push_back(llvm::ConstantInt::get(
- CGF.Int64Ty, CombinedInfo.NonContigInfo.Dims[I]));
- } else {
- ConstSizes.push_back(cast<llvm::Constant>(CombinedInfo.Sizes[I]));
- }
- }
-
auto *SizesArrayInit = llvm::ConstantArray::get(
llvm::ArrayType::get(CGM.Int64Ty, ConstSizes.size()), ConstSizes);
std::string Name = CGM.getOpenMPRuntime().getName({"offload_sizes"});
auto *SizesArrayGbl = new llvm::GlobalVariable(
- CGM.getModule(), SizesArrayInit->getType(),
- /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage,
- SizesArrayInit, Name);
+ CGM.getModule(), SizesArrayInit->getType(), /*isConstant=*/true,
+ llvm::GlobalValue::PrivateLinkage, SizesArrayInit, Name);
SizesArrayGbl->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
- Info.SizesArray = SizesArrayGbl;
+ if (RuntimeSizes.any()) {
+ QualType SizeArrayType = Ctx.getConstantArrayType(
+ Int64Ty, PointerNumAP, nullptr, ArrayType::Normal,
+ /*IndexTypeQuals=*/0);
+ Address Buffer = CGF.CreateMemTemp(SizeArrayType, ".offload_sizes");
+ llvm::Value *GblConstPtr =
+ CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
+ SizesArrayGbl, CGM.Int64Ty->getPointerTo());
+ CGF.Builder.CreateMemCpy(
+ Buffer,
+ Address(GblConstPtr, CGM.Int64Ty,
+ CGM.getNaturalTypeAlignment(Ctx.getIntTypeForBitwidth(
+ /*DestWidth=*/64, /*Signed=*/false))),
+ CGF.getTypeSize(SizeArrayType));
+ Info.SizesArray = Buffer.getPointer();
+ } else {
+ Info.SizesArray = SizesArrayGbl;
+ }
}
// The map types are always constant so we don't need to generate code to
@@ -9683,7 +9601,8 @@ static void emitOffloadingArrays(
Info.BasePointersArray, 0, I);
BP = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
BP, BPVal->getType()->getPointerTo(/*AddrSpace=*/0));
- Address BPAddr(BP, Ctx.getTypeAlignInChars(Ctx.VoidPtrTy));
+ Address BPAddr(BP, BPVal->getType(),
+ Ctx.getTypeAlignInChars(Ctx.VoidPtrTy));
CGF.Builder.CreateStore(BPVal, BPAddr);
if (Info.requiresDevicePointerInfo())
@@ -9697,16 +9616,16 @@ static void emitOffloadingArrays(
Info.PointersArray, 0, I);
P = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
P, PVal->getType()->getPointerTo(/*AddrSpace=*/0));
- Address PAddr(P, Ctx.getTypeAlignInChars(Ctx.VoidPtrTy));
+ Address PAddr(P, PVal->getType(), Ctx.getTypeAlignInChars(Ctx.VoidPtrTy));
CGF.Builder.CreateStore(PVal, PAddr);
- if (hasRuntimeEvaluationCaptureSize) {
+ if (RuntimeSizes.test(I)) {
llvm::Value *S = CGF.Builder.CreateConstInBoundsGEP2_32(
llvm::ArrayType::get(CGM.Int64Ty, Info.NumberOfPtrs),
Info.SizesArray,
/*Idx0=*/0,
/*Idx1=*/I);
- Address SAddr(S, Ctx.getTypeAlignInChars(Int64Ty));
+ Address SAddr(S, CGM.Int64Ty, Ctx.getTypeAlignInChars(Int64Ty));
CGF.Builder.CreateStore(CGF.Builder.CreateIntCast(CombinedInfo.Sizes[I],
CGM.Int64Ty,
/*isSigned=*/true),
@@ -9957,6 +9876,7 @@ void CGOpenMPRuntime::emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
cast<VarDecl>(cast<DeclRefExpr>(D->getMapperVarRef())->getDecl());
SourceLocation Loc = D->getLocation();
CharUnits ElementSize = C.getTypeSizeInChars(Ty);
+ llvm::Type *ElemTy = CGM.getTypes().ConvertTypeForMem(Ty);
// Prepare mapper function arguments and attributes.
ImplicitParamDecl HandleArg(C, /*DC=*/nullptr, Loc, /*Id=*/nullptr,
@@ -10011,8 +9931,7 @@ void CGOpenMPRuntime::emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
Size, MapperCGF.Builder.getInt64(ElementSize.getQuantity()));
llvm::Value *PtrBegin = MapperCGF.Builder.CreateBitCast(
BeginIn, CGM.getTypes().ConvertTypeForMem(PtrTy));
- llvm::Value *PtrEnd = MapperCGF.Builder.CreateGEP(
- PtrBegin->getType()->getPointerElementType(), PtrBegin, Size);
+ llvm::Value *PtrEnd = MapperCGF.Builder.CreateGEP(ElemTy, PtrBegin, Size);
llvm::Value *MapType = MapperCGF.EmitLoadOfScalar(
MapperCGF.GetAddrOfLocalVar(&TypeArg), /*Volatile=*/false,
C.getPointerType(Int64Ty), Loc);
@@ -10044,13 +9963,13 @@ void CGOpenMPRuntime::emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
llvm::PHINode *PtrPHI = MapperCGF.Builder.CreatePHI(
PtrBegin->getType(), 2, "omp.arraymap.ptrcurrent");
PtrPHI->addIncoming(PtrBegin, EntryBB);
- Address PtrCurrent =
- Address(PtrPHI, MapperCGF.GetAddrOfLocalVar(&BeginArg)
- .getAlignment()
- .alignmentOfArrayElement(ElementSize));
+ Address PtrCurrent(PtrPHI, ElemTy,
+ MapperCGF.GetAddrOfLocalVar(&BeginArg)
+ .getAlignment()
+ .alignmentOfArrayElement(ElementSize));
// Privatize the declared variable of mapper to be the current array element.
CodeGenFunction::OMPPrivateScope Scope(MapperCGF);
- Scope.addPrivate(MapperVarDecl, [PtrCurrent]() { return PtrCurrent; });
+ Scope.addPrivate(MapperVarDecl, PtrCurrent);
(void)Scope.Privatize();
// Get map clause information. Fill up the arrays with all mapped variables.
@@ -10169,7 +10088,6 @@ void CGOpenMPRuntime::emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
// Update the pointer to point to the next element that needs to be mapped,
// and check whether we have mapped all elements.
- llvm::Type *ElemTy = PtrPHI->getType()->getPointerElementType();
llvm::Value *PtrNext = MapperCGF.Builder.CreateConstGEP1_32(
ElemTy, PtrPHI, /*Idx0=*/1, "omp.arraymap.next");
PtrPHI->addIncoming(PtrNext, LastBB);
@@ -10309,10 +10227,14 @@ void CGOpenMPRuntime::emitTargetCall(
if (!CGF.HaveInsertPoint())
return;
- assert(OutlinedFn && "Invalid outlined function!");
+ const bool OffloadingMandatory = !CGM.getLangOpts().OpenMPIsDevice &&
+ CGM.getLangOpts().OpenMPOffloadMandatory;
+
+ assert((OffloadingMandatory || OutlinedFn) && "Invalid outlined function!");
const bool RequiresOuterTask = D.hasClausesOfKind<OMPDependClause>() ||
- D.hasClausesOfKind<OMPNowaitClause>();
+ D.hasClausesOfKind<OMPNowaitClause>() ||
+ D.hasClausesOfKind<OMPInReductionClause>();
llvm::SmallVector<llvm::Value *, 16> CapturedVars;
const CapturedStmt &CS = *D.getCapturedStmt(OMPD_target);
auto &&ArgsCodegen = [&CS, &CapturedVars](CodeGenFunction &CGF,
@@ -10324,18 +10246,26 @@ void CGOpenMPRuntime::emitTargetCall(
CodeGenFunction::OMPTargetDataInfo InputInfo;
llvm::Value *MapTypesArray = nullptr;
llvm::Value *MapNamesArray = nullptr;
- // Fill up the pointer arrays and transfer execution to the device.
- auto &&ThenGen = [this, Device, OutlinedFn, OutlinedFnID, &D, &InputInfo,
- &MapTypesArray, &MapNamesArray, &CS, RequiresOuterTask,
- &CapturedVars,
- SizeEmitter](CodeGenFunction &CGF, PrePostActionTy &) {
- if (Device.getInt() == OMPC_DEVICE_ancestor) {
- // Reverse offloading is not supported, so just execute on the host.
+ // Generate code for the host fallback function.
+ auto &&FallbackGen = [this, OutlinedFn, &D, &CapturedVars, RequiresOuterTask,
+ &CS, OffloadingMandatory](CodeGenFunction &CGF) {
+ if (OffloadingMandatory) {
+ CGF.Builder.CreateUnreachable();
+ } else {
if (RequiresOuterTask) {
CapturedVars.clear();
CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
}
emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedFn, CapturedVars);
+ }
+ };
+ // Fill up the pointer arrays and transfer execution to the device.
+ auto &&ThenGen = [this, Device, OutlinedFnID, &D, &InputInfo, &MapTypesArray,
+ &MapNamesArray, SizeEmitter,
+ FallbackGen](CodeGenFunction &CGF, PrePostActionTy &) {
+ if (Device.getInt() == OMPC_DEVICE_ancestor) {
+ // Reverse offloading is not supported, so just execute on the host.
+ FallbackGen(CGF);
return;
}
@@ -10350,6 +10280,7 @@ void CGOpenMPRuntime::emitTargetCall(
// From this point on, we need to have an ID of the target region defined.
assert(OutlinedFnID && "Invalid outlined function ID!");
+ (void)OutlinedFnID;
// Emit device ID if any.
llvm::Value *DeviceID;
@@ -10479,25 +10410,16 @@ void CGOpenMPRuntime::emitTargetCall(
CGF.Builder.CreateCondBr(Failed, OffloadFailedBlock, OffloadContBlock);
CGF.EmitBlock(OffloadFailedBlock);
- if (RequiresOuterTask) {
- CapturedVars.clear();
- CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
- }
- emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedFn, CapturedVars);
+ FallbackGen(CGF);
+
CGF.EmitBranch(OffloadContBlock);
CGF.EmitBlock(OffloadContBlock, /*IsFinished=*/true);
};
// Notify that the host version must be executed.
- auto &&ElseGen = [this, &D, OutlinedFn, &CS, &CapturedVars,
- RequiresOuterTask](CodeGenFunction &CGF,
- PrePostActionTy &) {
- if (RequiresOuterTask) {
- CapturedVars.clear();
- CGF.GenerateOpenMPCapturedVars(CS, CapturedVars);
- }
- emitOutlinedFunctionCall(CGF, D.getBeginLoc(), OutlinedFn, CapturedVars);
+ auto &&ElseGen = [FallbackGen](CodeGenFunction &CGF, PrePostActionTy &) {
+ FallbackGen(CGF);
};
auto &&TargetThenGen = [this, &ThenGen, &D, &InputInfo, &MapTypesArray,
@@ -10587,11 +10509,13 @@ void CGOpenMPRuntime::emitTargetCall(
InputInfo.NumberOfTargetItems = Info.NumberOfPtrs;
InputInfo.BasePointersArray =
- Address(Info.BasePointersArray, CGM.getPointerAlign());
+ Address(Info.BasePointersArray, CGF.VoidPtrTy, CGM.getPointerAlign());
InputInfo.PointersArray =
- Address(Info.PointersArray, CGM.getPointerAlign());
- InputInfo.SizesArray = Address(Info.SizesArray, CGM.getPointerAlign());
- InputInfo.MappersArray = Address(Info.MappersArray, CGM.getPointerAlign());
+ Address(Info.PointersArray, CGF.VoidPtrTy, CGM.getPointerAlign());
+ InputInfo.SizesArray =
+ Address(Info.SizesArray, CGF.Int64Ty, CGM.getPointerAlign());
+ InputInfo.MappersArray =
+ Address(Info.MappersArray, CGF.VoidPtrTy, CGM.getPointerAlign());
MapTypesArray = Info.MapTypesArray;
MapNamesArray = Info.MapNamesArray;
if (RequiresOuterTask)
@@ -10865,7 +10789,7 @@ void CGOpenMPRuntime::registerTargetGlobalVariable(const VarDecl *VD,
// If we have host/nohost variables, they do not need to be registered.
Optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
OMPDeclareTargetDeclAttr::getDeviceType(VD);
- if (DevTy && DevTy.getValue() != OMPDeclareTargetDeclAttr::DT_Any)
+ if (DevTy && *DevTy != OMPDeclareTargetDeclAttr::DT_Any)
return;
llvm::Optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
@@ -11468,12 +11392,13 @@ void CGOpenMPRuntime::emitTargetDataStandAloneCall(
{/*ForEndCall=*/false});
InputInfo.NumberOfTargetItems = Info.NumberOfPtrs;
InputInfo.BasePointersArray =
- Address(Info.BasePointersArray, CGM.getPointerAlign());
+ Address(Info.BasePointersArray, CGF.VoidPtrTy, CGM.getPointerAlign());
InputInfo.PointersArray =
- Address(Info.PointersArray, CGM.getPointerAlign());
+ Address(Info.PointersArray, CGF.VoidPtrTy, CGM.getPointerAlign());
InputInfo.SizesArray =
- Address(Info.SizesArray, CGM.getPointerAlign());
- InputInfo.MappersArray = Address(Info.MappersArray, CGM.getPointerAlign());
+ Address(Info.SizesArray, CGF.Int64Ty, CGM.getPointerAlign());
+ InputInfo.MappersArray =
+ Address(Info.MappersArray, CGF.VoidPtrTy, CGM.getPointerAlign());
MapTypesArray = Info.MapTypesArray;
MapNamesArray = Info.MapNamesArray;
if (RequiresOuterTask)
@@ -11493,13 +11418,21 @@ void CGOpenMPRuntime::emitTargetDataStandAloneCall(
namespace {
/// Kind of parameter in a function with 'declare simd' directive.
- enum ParamKindTy { LinearWithVarStride, Linear, Uniform, Vector };
- /// Attribute set of the parameter.
- struct ParamAttrTy {
- ParamKindTy Kind = Vector;
- llvm::APSInt StrideOrArg;
- llvm::APSInt Alignment;
- };
+enum ParamKindTy {
+ Linear,
+ LinearRef,
+ LinearUVal,
+ LinearVal,
+ Uniform,
+ Vector,
+};
+/// Attribute set of the parameter.
+struct ParamAttrTy {
+ ParamKindTy Kind = Vector;
+ llvm::APSInt StrideOrArg;
+ llvm::APSInt Alignment;
+ bool HasVarStride = false;
+};
} // namespace
static unsigned evaluateCDTSize(const FunctionDecl *FD,
@@ -11554,6 +11487,52 @@ static unsigned evaluateCDTSize(const FunctionDecl *FD,
return C.getTypeSize(CDT);
}
+/// Mangle the parameter part of the vector function name according to
+/// their OpenMP classification. The mangling function is defined in
+/// section 4.5 of the AAVFABI(2021Q1).
+static std::string mangleVectorParameters(ArrayRef<ParamAttrTy> ParamAttrs) {
+ SmallString<256> Buffer;
+ llvm::raw_svector_ostream Out(Buffer);
+ for (const auto &ParamAttr : ParamAttrs) {
+ switch (ParamAttr.Kind) {
+ case Linear:
+ Out << 'l';
+ break;
+ case LinearRef:
+ Out << 'R';
+ break;
+ case LinearUVal:
+ Out << 'U';
+ break;
+ case LinearVal:
+ Out << 'L';
+ break;
+ case Uniform:
+ Out << 'u';
+ break;
+ case Vector:
+ Out << 'v';
+ break;
+ }
+ if (ParamAttr.HasVarStride)
+ Out << "s" << ParamAttr.StrideOrArg;
+ else if (ParamAttr.Kind == Linear || ParamAttr.Kind == LinearRef ||
+ ParamAttr.Kind == LinearUVal || ParamAttr.Kind == LinearVal) {
+ // Don't print the step value if it is not present or if it is
+ // equal to 1.
+ if (ParamAttr.StrideOrArg < 0)
+ Out << 'n' << -ParamAttr.StrideOrArg;
+ else if (ParamAttr.StrideOrArg != 1)
+ Out << ParamAttr.StrideOrArg;
+ }
+
+ if (!!ParamAttr.Alignment)
+ Out << 'a' << ParamAttr.Alignment;
+ }
+
+ return std::string(Out.str());
+}
+
static void
emitX86DeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn,
const llvm::APSInt &VLENVal,
@@ -11602,26 +11581,7 @@ emitX86DeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn,
} else {
Out << VLENVal;
}
- for (const ParamAttrTy &ParamAttr : ParamAttrs) {
- switch (ParamAttr.Kind){
- case LinearWithVarStride:
- Out << 's' << ParamAttr.StrideOrArg;
- break;
- case Linear:
- Out << 'l';
- if (ParamAttr.StrideOrArg != 1)
- Out << ParamAttr.StrideOrArg;
- break;
- case Uniform:
- Out << 'u';
- break;
- case Vector:
- Out << 'v';
- break;
- }
- if (!!ParamAttr.Alignment)
- Out << 'a' << ParamAttr.Alignment;
- }
+ Out << mangleVectorParameters(ParamAttrs);
Out << '_' << Fn->getName();
Fn->addFnAttr(Out.str());
}
@@ -11634,11 +11594,7 @@ emitX86DeclareSimdFunction(const FunctionDecl *FD, llvm::Function *Fn,
// available at
// https://developer.arm.com/products/software-development-tools/hpc/arm-compiler-for-hpc/vector-function-abi.
-/// Maps To Vector (MTV), as defined in 3.1.1 of the AAVFABI.
-///
-/// TODO: Need to implement the behavior for reference marked with a
-/// var or no linear modifiers (1.b in the section). For this, we
-/// need to extend ParamKindTy to support the linear modifiers.
+/// Maps To Vector (MTV), as defined in 4.1.1 of the AAVFABI (2021Q1).
static bool getAArch64MTV(QualType QT, ParamKindTy Kind) {
QT = QT.getCanonicalType();
@@ -11648,12 +11604,11 @@ static bool getAArch64MTV(QualType QT, ParamKindTy Kind) {
if (Kind == ParamKindTy::Uniform)
return false;
- if (Kind == ParamKindTy::Linear)
+ if (Kind == ParamKindTy::LinearUVal || ParamKindTy::LinearRef)
return false;
- // TODO: Handle linear references with modifiers
-
- if (Kind == ParamKindTy::LinearWithVarStride)
+ if ((Kind == ParamKindTy::Linear || Kind == ParamKindTy::LinearVal) &&
+ !QT->isReferenceType())
return false;
return true;
@@ -11734,39 +11689,6 @@ getNDSWDS(const FunctionDecl *FD, ArrayRef<ParamAttrTy> ParamAttrs) {
OutputBecomesInput);
}
-/// Mangle the parameter part of the vector function name according to
-/// their OpenMP classification. The mangling function is defined in
-/// section 3.5 of the AAVFABI.
-static std::string mangleVectorParameters(ArrayRef<ParamAttrTy> ParamAttrs) {
- SmallString<256> Buffer;
- llvm::raw_svector_ostream Out(Buffer);
- for (const auto &ParamAttr : ParamAttrs) {
- switch (ParamAttr.Kind) {
- case LinearWithVarStride:
- Out << "ls" << ParamAttr.StrideOrArg;
- break;
- case Linear:
- Out << 'l';
- // Don't print the step value if it is not present or if it is
- // equal to 1.
- if (ParamAttr.StrideOrArg != 1)
- Out << ParamAttr.StrideOrArg;
- break;
- case Uniform:
- Out << 'u';
- break;
- case Vector:
- Out << 'v';
- break;
- }
-
- if (!!ParamAttr.Alignment)
- Out << 'a' << ParamAttr.Alignment;
- }
-
- return std::string(Out.str());
-}
-
// Function used to add the attribute. The parameter `VLEN` is
// templated to allow the use of "x" when targeting scalable functions
// for SVE.
@@ -11933,16 +11855,16 @@ void CGOpenMPRuntime::emitDeclareSimdFunction(const FunctionDecl *FD,
llvm::Function *Fn) {
ASTContext &C = CGM.getContext();
FD = FD->getMostRecentDecl();
- // Map params to their positions in function decl.
- llvm::DenseMap<const Decl *, unsigned> ParamPositions;
- if (isa<CXXMethodDecl>(FD))
- ParamPositions.try_emplace(FD, 0);
- unsigned ParamPos = ParamPositions.size();
- for (const ParmVarDecl *P : FD->parameters()) {
- ParamPositions.try_emplace(P->getCanonicalDecl(), ParamPos);
- ++ParamPos;
- }
while (FD) {
+ // Map params to their positions in function decl.
+ llvm::DenseMap<const Decl *, unsigned> ParamPositions;
+ if (isa<CXXMethodDecl>(FD))
+ ParamPositions.try_emplace(FD, 0);
+ unsigned ParamPos = ParamPositions.size();
+ for (const ParmVarDecl *P : FD->parameters()) {
+ ParamPositions.try_emplace(P->getCanonicalDecl(), ParamPos);
+ ++ParamPos;
+ }
for (const auto *Attr : FD->specific_attrs<OMPDeclareSimdDeclAttr>()) {
llvm::SmallVector<ParamAttrTy, 8> ParamAttrs(ParamPositions.size());
// Mark uniform parameters.
@@ -11954,12 +11876,14 @@ void CGOpenMPRuntime::emitDeclareSimdFunction(const FunctionDecl *FD,
} else {
const auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
->getCanonicalDecl();
- Pos = ParamPositions[PVD];
+ auto It = ParamPositions.find(PVD);
+ assert(It != ParamPositions.end() && "Function parameter not found");
+ Pos = It->second;
}
ParamAttrs[Pos].Kind = Uniform;
}
// Get alignment info.
- auto NI = Attr->alignments_begin();
+ auto *NI = Attr->alignments_begin();
for (const Expr *E : Attr->aligneds()) {
E = E->IgnoreParenImpCasts();
unsigned Pos;
@@ -11970,7 +11894,9 @@ void CGOpenMPRuntime::emitDeclareSimdFunction(const FunctionDecl *FD,
} else {
const auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
->getCanonicalDecl();
- Pos = ParamPositions[PVD];
+ auto It = ParamPositions.find(PVD);
+ assert(It != ParamPositions.end() && "Function parameter not found");
+ Pos = It->second;
ParmTy = PVD->getType();
}
ParamAttrs[Pos].Alignment =
@@ -11982,27 +11908,48 @@ void CGOpenMPRuntime::emitDeclareSimdFunction(const FunctionDecl *FD,
++NI;
}
// Mark linear parameters.
- auto SI = Attr->steps_begin();
- auto MI = Attr->modifiers_begin();
+ auto *SI = Attr->steps_begin();
+ auto *MI = Attr->modifiers_begin();
for (const Expr *E : Attr->linears()) {
E = E->IgnoreParenImpCasts();
unsigned Pos;
+ bool IsReferenceType = false;
// Rescaling factor needed to compute the linear parameter
// value in the mangled name.
unsigned PtrRescalingFactor = 1;
if (isa<CXXThisExpr>(E)) {
Pos = ParamPositions[FD];
+ auto *P = cast<PointerType>(E->getType());
+ PtrRescalingFactor = CGM.getContext()
+ .getTypeSizeInChars(P->getPointeeType())
+ .getQuantity();
} else {
const auto *PVD = cast<ParmVarDecl>(cast<DeclRefExpr>(E)->getDecl())
->getCanonicalDecl();
- Pos = ParamPositions[PVD];
+ auto It = ParamPositions.find(PVD);
+ assert(It != ParamPositions.end() && "Function parameter not found");
+ Pos = It->second;
if (auto *P = dyn_cast<PointerType>(PVD->getType()))
PtrRescalingFactor = CGM.getContext()
.getTypeSizeInChars(P->getPointeeType())
.getQuantity();
+ else if (PVD->getType()->isReferenceType()) {
+ IsReferenceType = true;
+ PtrRescalingFactor =
+ CGM.getContext()
+ .getTypeSizeInChars(PVD->getType().getNonReferenceType())
+ .getQuantity();
+ }
}
ParamAttrTy &ParamAttr = ParamAttrs[Pos];
- ParamAttr.Kind = Linear;
+ if (*MI == OMPC_LINEAR_ref)
+ ParamAttr.Kind = LinearRef;
+ else if (*MI == OMPC_LINEAR_uval)
+ ParamAttr.Kind = LinearUVal;
+ else if (IsReferenceType)
+ ParamAttr.Kind = LinearVal;
+ else
+ ParamAttr.Kind = Linear;
// Assuming a stride of 1, for `linear` without modifiers.
ParamAttr.StrideOrArg = llvm::APSInt::getUnsigned(1);
if (*SI) {
@@ -12010,10 +11957,13 @@ void CGOpenMPRuntime::emitDeclareSimdFunction(const FunctionDecl *FD,
if (!(*SI)->EvaluateAsInt(Result, C, Expr::SE_AllowSideEffects)) {
if (const auto *DRE =
cast<DeclRefExpr>((*SI)->IgnoreParenImpCasts())) {
- if (const auto *StridePVD = cast<ParmVarDecl>(DRE->getDecl())) {
- ParamAttr.Kind = LinearWithVarStride;
- ParamAttr.StrideOrArg = llvm::APSInt::getUnsigned(
- ParamPositions[StridePVD->getCanonicalDecl()]);
+ if (const auto *StridePVD =
+ dyn_cast<ParmVarDecl>(DRE->getDecl())) {
+ ParamAttr.HasVarStride = true;
+ auto It = ParamPositions.find(StridePVD->getCanonicalDecl());
+ assert(It != ParamPositions.end() &&
+ "Function parameter not found");
+ ParamAttr.StrideOrArg = llvm::APSInt::getUnsigned(It->second);
}
}
} else {
@@ -12023,7 +11973,8 @@ void CGOpenMPRuntime::emitDeclareSimdFunction(const FunctionDecl *FD,
// If we are using a linear clause on a pointer, we need to
// rescale the value of linear_step with the byte size of the
// pointee type.
- if (Linear == ParamAttr.Kind)
+ if (!ParamAttr.HasVarStride &&
+ (ParamAttr.Kind == Linear || ParamAttr.Kind == LinearRef))
ParamAttr.StrideOrArg = ParamAttr.StrideOrArg * PtrRescalingFactor;
++SI;
++MI;
@@ -12235,6 +12186,16 @@ static llvm::Value *getAllocatorVal(CodeGenFunction &CGF,
return AllocVal;
}
+/// Return the alignment from an allocate directive if present.
+static llvm::Value *getAlignmentValue(CodeGenModule &CGM, const VarDecl *VD) {
+ llvm::Optional<CharUnits> AllocateAlignment = CGM.getOMPAllocateAlignment(VD);
+
+ if (!AllocateAlignment)
+ return nullptr;
+
+ return llvm::ConstantInt::get(CGM.SizeTy, AllocateAlignment->getQuantity());
+}
+
Address CGOpenMPRuntime::getAddressOfLocalVariable(CodeGenFunction &CGF,
const VarDecl *VD) {
if (!VD)
@@ -12273,11 +12234,7 @@ Address CGOpenMPRuntime::getAddressOfLocalVariable(CodeGenFunction &CGF,
const auto *AA = CVD->getAttr<OMPAllocateDeclAttr>();
const Expr *Allocator = AA->getAllocator();
llvm::Value *AllocVal = getAllocatorVal(CGF, Allocator);
- llvm::Value *Alignment =
- AA->getAlignment()
- ? CGF.Builder.CreateIntCast(CGF.EmitScalarExpr(AA->getAlignment()),
- CGM.SizeTy, /*isSigned=*/false)
- : nullptr;
+ llvm::Value *Alignment = getAlignmentValue(CGM, CVD);
SmallVector<llvm::Value *, 4> Args;
Args.push_back(ThreadID);
if (Alignment)
@@ -12324,7 +12281,9 @@ Address CGOpenMPRuntime::getAddressOfLocalVariable(CodeGenFunction &CGF,
}
};
Address VDAddr =
- UntiedRealAddr.isValid() ? UntiedRealAddr : Address(Addr, Align);
+ UntiedRealAddr.isValid()
+ ? UntiedRealAddr
+ : Address(Addr, CGF.ConvertTypeForMem(CVD->getType()), Align);
CGF.EHStack.pushCleanup<OMPAllocateCleanupTy>(
NormalAndEHCleanup, FiniRTLFn, CVD->getLocation().getRawEncoding(),
VDAddr, Allocator);
@@ -12772,7 +12731,8 @@ void CGOpenMPRuntime::checkAndEmitLastprivateConditional(CodeGenFunction &CGF,
LValue PrivLVal = CGF.EmitLValue(FoundE);
Address StructAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
PrivLVal.getAddress(CGF),
- CGF.ConvertTypeForMem(CGF.getContext().getPointerType(StructTy)));
+ CGF.ConvertTypeForMem(CGF.getContext().getPointerType(StructTy)),
+ CGF.ConvertTypeForMem(StructTy));
LValue BaseLVal =
CGF.MakeAddrLValue(StructAddr, StructTy, AlignmentSource::Decl);
LValue FiredLVal = CGF.EmitLValueForField(BaseLVal, FiredDecl);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.h b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.h
index 19754b0cfacc..7fc6a7e278e5 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntime.h
@@ -218,6 +218,11 @@ public:
/// Returns true if the initialization of the reduction item uses initializer
/// from declare reduction construct.
bool usesReductionInitializer(unsigned N) const;
+ /// Return the type of the private item.
+ QualType getPrivateType(unsigned N) const {
+ return cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Private)->getDecl())
+ ->getType();
+ }
};
class CGOpenMPRuntime {
@@ -513,15 +518,6 @@ private:
/// kmp_int64 st; // stride
/// };
QualType KmpDimTy;
- /// Type struct __tgt_offload_entry{
- /// void *addr; // Pointer to the offload entry info.
- /// // (function or global)
- /// char *name; // Name of the function or global.
- /// size_t size; // Size of the entry info (0 if it a function).
- /// int32_t flags;
- /// int32_t reserved;
- /// };
- QualType TgtOffloadEntryQTy;
/// Entity that registers the offloading constants that were emitted so
/// far.
class OffloadEntriesInfoManagerTy {
@@ -777,9 +773,6 @@ private:
/// metadata.
void loadOffloadInfoMetadata();
- /// Returns __tgt_offload_entry type.
- QualType getTgtOffloadEntryQTy();
-
/// Start scanning from statement \a S and and emit all target regions
/// found along the way.
/// \param S Starting statement.
@@ -915,6 +908,14 @@ private:
LValue DepobjLVal,
SourceLocation Loc);
+ SmallVector<llvm::Value *, 4>
+ emitDepobjElementsSizes(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
+ const OMPTaskDataTy::DependData &Data);
+
+ void emitDepobjElements(CodeGenFunction &CGF, QualType &KmpDependInfoTy,
+ LValue PosLVal, const OMPTaskDataTy::DependData &Data,
+ Address DependenciesArray);
+
public:
explicit CGOpenMPRuntime(CodeGenModule &CGM)
: CGOpenMPRuntime(CGM, ".", ".") {}
@@ -1406,14 +1407,14 @@ public:
bool HasCancel = false);
/// Emits reduction function.
- /// \param ArgsType Array type containing pointers to reduction variables.
+ /// \param ArgsElemType Array type containing pointers to reduction variables.
/// \param Privates List of private copies for original reduction arguments.
/// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
/// or 'operator binop(LHS, RHS)'.
llvm::Function *emitReductionFunction(SourceLocation Loc,
- llvm::Type *ArgsType,
+ llvm::Type *ArgsElemType,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
index 2d5511336851..6dea846f486f 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
@@ -1203,15 +1203,17 @@ CGOpenMPRuntimeGPU::CGOpenMPRuntimeGPU(CodeGenModule &CGM)
llvm_unreachable("OpenMP can only handle device code.");
llvm::OpenMPIRBuilder &OMPBuilder = getOMPBuilder();
- if (CGM.getLangOpts().OpenMPTargetNewRuntime &&
- !CGM.getLangOpts().OMPHostIRFile.empty()) {
- OMPBuilder.createGlobalFlag(CGM.getLangOpts().OpenMPTargetDebug,
- "__omp_rtl_debug_kind");
- OMPBuilder.createGlobalFlag(CGM.getLangOpts().OpenMPTeamSubscription,
- "__omp_rtl_assume_teams_oversubscription");
- OMPBuilder.createGlobalFlag(CGM.getLangOpts().OpenMPThreadSubscription,
- "__omp_rtl_assume_threads_oversubscription");
- }
+ if (CGM.getLangOpts().NoGPULib || CGM.getLangOpts().OMPHostIRFile.empty())
+ return;
+
+ OMPBuilder.createGlobalFlag(CGM.getLangOpts().OpenMPTargetDebug,
+ "__omp_rtl_debug_kind");
+ OMPBuilder.createGlobalFlag(CGM.getLangOpts().OpenMPTeamSubscription,
+ "__omp_rtl_assume_teams_oversubscription");
+ OMPBuilder.createGlobalFlag(CGM.getLangOpts().OpenMPThreadSubscription,
+ "__omp_rtl_assume_threads_oversubscription");
+ OMPBuilder.createGlobalFlag(CGM.getLangOpts().OpenMPNoThreadState,
+ "__omp_rtl_assume_no_thread_state");
}
void CGOpenMPRuntimeGPU::emitProcBindClause(CodeGenFunction &CGF,
@@ -1715,7 +1717,8 @@ static llvm::Value *castValueToType(CodeGenFunction &CGF, llvm::Value *Val,
CastTy->hasSignedIntegerRepresentation());
Address CastItem = CGF.CreateMemTemp(CastTy);
Address ValCastItem = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- CastItem, Val->getType()->getPointerTo(CastItem.getAddressSpace()));
+ CastItem, Val->getType()->getPointerTo(CastItem.getAddressSpace()),
+ Val->getType());
CGF.EmitStoreOfScalar(Val, ValCastItem, /*Volatile=*/false, ValTy,
LValueBaseInfo(AlignmentSource::Type),
TBAAAccessInfo());
@@ -1778,7 +1781,7 @@ static void shuffleAndStore(CodeGenFunction &CGF, Address SrcAddr,
Address ElemPtr = DestAddr;
Address Ptr = SrcAddr;
Address PtrEnd = Bld.CreatePointerBitCastOrAddrSpaceCast(
- Bld.CreateConstGEP(SrcAddr, 1), CGF.VoidPtrTy);
+ Bld.CreateConstGEP(SrcAddr, 1), CGF.VoidPtrTy, CGF.Int8Ty);
for (int IntSize = 8; IntSize >= 1; IntSize /= 2) {
if (Size < CharUnits::fromQuantity(IntSize))
continue;
@@ -1786,9 +1789,10 @@ static void shuffleAndStore(CodeGenFunction &CGF, Address SrcAddr,
CGF.getContext().toBits(CharUnits::fromQuantity(IntSize)),
/*Signed=*/1);
llvm::Type *IntTy = CGF.ConvertTypeForMem(IntType);
- Ptr = Bld.CreatePointerBitCastOrAddrSpaceCast(Ptr, IntTy->getPointerTo());
- ElemPtr =
- Bld.CreatePointerBitCastOrAddrSpaceCast(ElemPtr, IntTy->getPointerTo());
+ Ptr = Bld.CreatePointerBitCastOrAddrSpaceCast(Ptr, IntTy->getPointerTo(),
+ IntTy);
+ ElemPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
+ ElemPtr, IntTy->getPointerTo(), IntTy);
if (Size.getQuantity() / IntSize > 1) {
llvm::BasicBlock *PreCondBB = CGF.createBasicBlock(".shuffle.pre_cond");
llvm::BasicBlock *ThenBB = CGF.createBasicBlock(".shuffle.then");
@@ -1801,8 +1805,9 @@ static void shuffleAndStore(CodeGenFunction &CGF, Address SrcAddr,
llvm::PHINode *PhiDest =
Bld.CreatePHI(ElemPtr.getType(), /*NumReservedValues=*/2);
PhiDest->addIncoming(ElemPtr.getPointer(), CurrentBB);
- Ptr = Address(PhiSrc, Ptr.getAlignment());
- ElemPtr = Address(PhiDest, ElemPtr.getAlignment());
+ Ptr = Address(PhiSrc, Ptr.getElementType(), Ptr.getAlignment());
+ ElemPtr =
+ Address(PhiDest, ElemPtr.getElementType(), ElemPtr.getAlignment());
llvm::Value *PtrDiff = Bld.CreatePtrDiff(
CGF.Int8Ty, PtrEnd.getPointer(),
Bld.CreatePointerBitCastOrAddrSpaceCast(Ptr.getPointer(),
@@ -1895,14 +1900,17 @@ static void emitReductionListCopy(
// new element.
bool IncrScratchpadSrc = false;
bool IncrScratchpadDest = false;
+ QualType PrivatePtrType = C.getPointerType(Private->getType());
+ llvm::Type *PrivateLlvmPtrType = CGF.ConvertType(PrivatePtrType);
switch (Action) {
case RemoteLaneToThread: {
// Step 1.1: Get the address for the src element in the Reduce list.
Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
- SrcElementAddr = CGF.EmitLoadOfPointer(
- SrcElementPtrAddr,
- C.getPointerType(Private->getType())->castAs<PointerType>());
+ SrcElementAddr =
+ CGF.EmitLoadOfPointer(CGF.Builder.CreateElementBitCast(
+ SrcElementPtrAddr, PrivateLlvmPtrType),
+ PrivatePtrType->castAs<PointerType>());
// Step 1.2: Create a temporary to store the element in the destination
// Reduce list.
@@ -1916,24 +1924,27 @@ static void emitReductionListCopy(
case ThreadCopy: {
// Step 1.1: Get the address for the src element in the Reduce list.
Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
- SrcElementAddr = CGF.EmitLoadOfPointer(
- SrcElementPtrAddr,
- C.getPointerType(Private->getType())->castAs<PointerType>());
+ SrcElementAddr =
+ CGF.EmitLoadOfPointer(CGF.Builder.CreateElementBitCast(
+ SrcElementPtrAddr, PrivateLlvmPtrType),
+ PrivatePtrType->castAs<PointerType>());
// Step 1.2: Get the address for dest element. The destination
// element has already been created on the thread's stack.
DestElementPtrAddr = Bld.CreateConstArrayGEP(DestBase, Idx);
- DestElementAddr = CGF.EmitLoadOfPointer(
- DestElementPtrAddr,
- C.getPointerType(Private->getType())->castAs<PointerType>());
+ DestElementAddr =
+ CGF.EmitLoadOfPointer(CGF.Builder.CreateElementBitCast(
+ DestElementPtrAddr, PrivateLlvmPtrType),
+ PrivatePtrType->castAs<PointerType>());
break;
}
case ThreadToScratchpad: {
// Step 1.1: Get the address for the src element in the Reduce list.
Address SrcElementPtrAddr = Bld.CreateConstArrayGEP(SrcBase, Idx);
- SrcElementAddr = CGF.EmitLoadOfPointer(
- SrcElementPtrAddr,
- C.getPointerType(Private->getType())->castAs<PointerType>());
+ SrcElementAddr =
+ CGF.EmitLoadOfPointer(CGF.Builder.CreateElementBitCast(
+ SrcElementPtrAddr, PrivateLlvmPtrType),
+ PrivatePtrType->castAs<PointerType>());
// Step 1.2: Get the address for dest element:
// address = base + index * ElementSizeInChars.
@@ -1944,7 +1955,7 @@ static void emitReductionListCopy(
Bld.CreateNUWAdd(DestBase.getPointer(), CurrentOffset);
ScratchPadElemAbsolutePtrVal =
Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
- DestElementAddr = Address(ScratchPadElemAbsolutePtrVal,
+ DestElementAddr = Address(ScratchPadElemAbsolutePtrVal, CGF.Int8Ty,
C.getTypeAlignInChars(Private->getType()));
IncrScratchpadDest = true;
break;
@@ -1959,7 +1970,7 @@ static void emitReductionListCopy(
Bld.CreateNUWAdd(SrcBase.getPointer(), CurrentOffset);
ScratchPadElemAbsolutePtrVal =
Bld.CreateIntToPtr(ScratchPadElemAbsolutePtrVal, CGF.VoidPtrTy);
- SrcElementAddr = Address(ScratchPadElemAbsolutePtrVal,
+ SrcElementAddr = Address(ScratchPadElemAbsolutePtrVal, CGF.Int8Ty,
C.getTypeAlignInChars(Private->getType()));
IncrScratchpadSrc = true;
@@ -2032,6 +2043,8 @@ static void emitReductionListCopy(
// address of the next element in scratchpad memory, unless we're currently
// processing the last one. Memory alignment is also taken care of here.
if ((IncrScratchpadDest || IncrScratchpadSrc) && (Idx + 1 < Size)) {
+ // FIXME: This code doesn't make any sense, it's trying to perform
+ // integer arithmetic on pointers.
llvm::Value *ScratchpadBasePtr =
IncrScratchpadDest ? DestBase.getPointer() : SrcBase.getPointer();
llvm::Value *ElementSizeInChars = CGF.getTypeSize(Private->getType());
@@ -2052,9 +2065,11 @@ static void emitReductionListCopy(
llvm::ConstantInt::get(CGM.SizeTy, GlobalMemoryAlignment));
if (IncrScratchpadDest)
- DestBase = Address(ScratchpadBasePtr, CGF.getPointerAlign());
+ DestBase =
+ Address(ScratchpadBasePtr, CGF.VoidPtrTy, CGF.getPointerAlign());
else /* IncrScratchpadSrc = true */
- SrcBase = Address(ScratchpadBasePtr, CGF.getPointerAlign());
+ SrcBase =
+ Address(ScratchpadBasePtr, CGF.VoidPtrTy, CGF.getPointerAlign());
}
++Idx;
@@ -2138,13 +2153,14 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
llvm::Value *WarpID = getNVPTXWarpID(CGF);
Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
+ llvm::Type *ElemTy = CGF.ConvertTypeForMem(ReductionArrayTy);
Address LocalReduceList(
Bld.CreatePointerBitCastOrAddrSpaceCast(
CGF.EmitLoadOfScalar(
AddrReduceListArg, /*Volatile=*/false, C.VoidPtrTy, Loc,
LValueBaseInfo(AlignmentSource::Type), TBAAAccessInfo()),
- CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
- CGF.getPointerAlign());
+ ElemTy->getPointerTo()),
+ ElemTy, CGF.getPointerAlign());
unsigned Idx = 0;
for (const Expr *Private : Privates) {
@@ -2202,7 +2218,7 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
// elemptr = ((CopyType*)(elemptrptr)) + I
- Address ElemPtr = Address(ElemPtrPtr, Align);
+ Address ElemPtr(ElemPtrPtr, CGF.Int8Ty, Align);
ElemPtr = Bld.CreateElementBitCast(ElemPtr, CopyType);
if (NumIters > 1)
ElemPtr = Bld.CreateGEP(ElemPtr, Cnt);
@@ -2212,10 +2228,14 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
llvm::Value *MediumPtrVal = Bld.CreateInBoundsGEP(
TransferMedium->getValueType(), TransferMedium,
{llvm::Constant::getNullValue(CGM.Int64Ty), WarpID});
- Address MediumPtr(MediumPtrVal, Align);
// Casting to actual data type.
// MediumPtr = (CopyType*)MediumPtrAddr;
- MediumPtr = Bld.CreateElementBitCast(MediumPtr, CopyType);
+ Address MediumPtr(
+ Bld.CreateBitCast(
+ MediumPtrVal,
+ CopyType->getPointerTo(
+ MediumPtrVal->getType()->getPointerAddressSpace())),
+ CopyType, Align);
// elem = *elemptr
//*MediumPtr = elem
@@ -2261,15 +2281,19 @@ static llvm::Value *emitInterWarpCopyFunction(CodeGenModule &CGM,
llvm::Value *SrcMediumPtrVal = Bld.CreateInBoundsGEP(
TransferMedium->getValueType(), TransferMedium,
{llvm::Constant::getNullValue(CGM.Int64Ty), ThreadID});
- Address SrcMediumPtr(SrcMediumPtrVal, Align);
// SrcMediumVal = *SrcMediumPtr;
- SrcMediumPtr = Bld.CreateElementBitCast(SrcMediumPtr, CopyType);
+ Address SrcMediumPtr(
+ Bld.CreateBitCast(
+ SrcMediumPtrVal,
+ CopyType->getPointerTo(
+ SrcMediumPtrVal->getType()->getPointerAddressSpace())),
+ CopyType, Align);
// TargetElemPtr = (CopyType*)(SrcDataAddr[i]) + I
Address TargetElemPtrPtr = Bld.CreateConstArrayGEP(LocalReduceList, Idx);
llvm::Value *TargetElemPtrVal = CGF.EmitLoadOfScalar(
TargetElemPtrPtr, /*Volatile=*/false, C.VoidPtrTy, Loc);
- Address TargetElemPtr = Address(TargetElemPtrVal, Align);
+ Address TargetElemPtr(TargetElemPtrVal, CGF.Int8Ty, Align);
TargetElemPtr = Bld.CreateElementBitCast(TargetElemPtr, CopyType);
if (NumIters > 1)
TargetElemPtr = Bld.CreateGEP(TargetElemPtr, Cnt);
@@ -2405,12 +2429,13 @@ static llvm::Function *emitShuffleAndReduceFunction(
CGBuilderTy &Bld = CGF.Builder;
Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
+ llvm::Type *ElemTy = CGF.ConvertTypeForMem(ReductionArrayTy);
Address LocalReduceList(
Bld.CreatePointerBitCastOrAddrSpaceCast(
CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
C.VoidPtrTy, SourceLocation()),
- CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
- CGF.getPointerAlign());
+ ElemTy->getPointerTo()),
+ ElemTy, CGF.getPointerAlign());
Address AddrLaneIDArg = CGF.GetAddrOfLocalVar(&LaneIDArg);
llvm::Value *LaneIDArgVal = CGF.EmitLoadOfScalar(
@@ -2561,12 +2586,13 @@ static llvm::Value *emitListToGlobalCopyFunction(
Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
+ llvm::Type *ElemTy = CGF.ConvertTypeForMem(ReductionArrayTy);
Address LocalReduceList(
Bld.CreatePointerBitCastOrAddrSpaceCast(
CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
C.VoidPtrTy, Loc),
- CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
- CGF.getPointerAlign());
+ ElemTy->getPointerTo()),
+ ElemTy, CGF.getPointerAlign());
QualType StaticTy = C.getRecordType(TeamReductionRec);
llvm::Type *LLVMReductionsBufferTy =
CGM.getTypes().ConvertTypeForMem(StaticTy);
@@ -2584,19 +2610,22 @@ static llvm::Value *emitListToGlobalCopyFunction(
llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
// elemptr = ((CopyType*)(elemptrptr)) + I
+ ElemTy = CGF.ConvertTypeForMem(Private->getType());
ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
- ElemPtrPtr, CGF.ConvertTypeForMem(Private->getType())->getPointerTo());
+ ElemPtrPtr, ElemTy->getPointerTo());
Address ElemPtr =
- Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType()));
+ Address(ElemPtrPtr, ElemTy, C.getTypeAlignInChars(Private->getType()));
const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl();
// Global = Buffer.VD[Idx];
const FieldDecl *FD = VarFieldMap.lookup(VD);
LValue GlobLVal = CGF.EmitLValueForField(
CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
Address GlobAddr = GlobLVal.getAddress(CGF);
- llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(
- GlobAddr.getElementType(), GlobAddr.getPointer(), Idxs);
- GlobLVal.setAddress(Address(BufferPtr, GlobAddr.getAlignment()));
+ llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(GlobAddr.getElementType(),
+ GlobAddr.getPointer(), Idxs);
+ GlobLVal.setAddress(Address(BufferPtr,
+ CGF.ConvertTypeForMem(Private->getType()),
+ GlobAddr.getAlignment()));
switch (CGF.getEvaluationKind(Private->getType())) {
case TEK_Scalar: {
llvm::Value *V = CGF.EmitLoadOfScalar(
@@ -2766,12 +2795,13 @@ static llvm::Value *emitGlobalToListCopyFunction(
Address AddrReduceListArg = CGF.GetAddrOfLocalVar(&ReduceListArg);
Address AddrBufferArg = CGF.GetAddrOfLocalVar(&BufferArg);
+ llvm::Type *ElemTy = CGF.ConvertTypeForMem(ReductionArrayTy);
Address LocalReduceList(
Bld.CreatePointerBitCastOrAddrSpaceCast(
CGF.EmitLoadOfScalar(AddrReduceListArg, /*Volatile=*/false,
C.VoidPtrTy, Loc),
- CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo()),
- CGF.getPointerAlign());
+ ElemTy->getPointerTo()),
+ ElemTy, CGF.getPointerAlign());
QualType StaticTy = C.getRecordType(TeamReductionRec);
llvm::Type *LLVMReductionsBufferTy =
CGM.getTypes().ConvertTypeForMem(StaticTy);
@@ -2790,19 +2820,22 @@ static llvm::Value *emitGlobalToListCopyFunction(
llvm::Value *ElemPtrPtr = CGF.EmitLoadOfScalar(
ElemPtrPtrAddr, /*Volatile=*/false, C.VoidPtrTy, SourceLocation());
// elemptr = ((CopyType*)(elemptrptr)) + I
+ ElemTy = CGF.ConvertTypeForMem(Private->getType());
ElemPtrPtr = Bld.CreatePointerBitCastOrAddrSpaceCast(
- ElemPtrPtr, CGF.ConvertTypeForMem(Private->getType())->getPointerTo());
+ ElemPtrPtr, ElemTy->getPointerTo());
Address ElemPtr =
- Address(ElemPtrPtr, C.getTypeAlignInChars(Private->getType()));
+ Address(ElemPtrPtr, ElemTy, C.getTypeAlignInChars(Private->getType()));
const ValueDecl *VD = cast<DeclRefExpr>(Private)->getDecl();
// Global = Buffer.VD[Idx];
const FieldDecl *FD = VarFieldMap.lookup(VD);
LValue GlobLVal = CGF.EmitLValueForField(
CGF.MakeNaturalAlignAddrLValue(BufferArrPtr, StaticTy), FD);
Address GlobAddr = GlobLVal.getAddress(CGF);
- llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(
- GlobAddr.getElementType(), GlobAddr.getPointer(), Idxs);
- GlobLVal.setAddress(Address(BufferPtr, GlobAddr.getAlignment()));
+ llvm::Value *BufferPtr = Bld.CreateInBoundsGEP(GlobAddr.getElementType(),
+ GlobAddr.getPointer(), Idxs);
+ GlobLVal.setAddress(Address(BufferPtr,
+ CGF.ConvertTypeForMem(Private->getType()),
+ GlobAddr.getAlignment()));
switch (CGF.getEvaluationKind(Private->getType())) {
case TEK_Scalar: {
llvm::Value *V = CGF.EmitLoadOfScalar(GlobLVal, Loc);
@@ -3242,9 +3275,9 @@ void CGOpenMPRuntimeGPU::emitReduction(
llvm::Value *RL = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
ReductionList.getPointer(), CGF.VoidPtrTy);
- llvm::Function *ReductionFn = emitReductionFunction(
- Loc, CGF.ConvertTypeForMem(ReductionArrayTy)->getPointerTo(), Privates,
- LHSExprs, RHSExprs, ReductionOps);
+ llvm::Function *ReductionFn =
+ emitReductionFunction(Loc, CGF.ConvertTypeForMem(ReductionArrayTy),
+ Privates, LHSExprs, RHSExprs, ReductionOps);
llvm::Value *ReductionArrayTySize = CGF.getTypeSize(ReductionArrayTy);
llvm::Function *ShuffleAndReduceFn = emitShuffleAndReduceFunction(
CGM, Privates, ReductionArrayTy, ReductionFn, Loc);
@@ -3284,7 +3317,7 @@ void CGOpenMPRuntimeGPU::emitReduction(
"_openmp_teams_reductions_buffer_$_$ptr");
}
llvm::Value *GlobalBufferPtr = CGF.EmitLoadOfScalar(
- Address(KernelTeamsReductionPtr, CGM.getPointerAlign()),
+ Address(KernelTeamsReductionPtr, CGF.VoidPtrTy, CGM.getPointerAlign()),
/*Volatile=*/false, C.getPointerType(C.VoidPtrTy), Loc);
llvm::Value *GlobalToBufferCpyFn = ::emitListToGlobalCopyFunction(
CGM, Privates, ReductionArrayTy, Loc, TeamReductionRec, VarFieldMap);
@@ -3526,15 +3559,14 @@ llvm::Function *CGOpenMPRuntimeGPU::createParallelDataSharingWrapper(
isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) {
SharedArgListAddress = CGF.EmitLoadOfPointer(
GlobalArgs, CGF.getContext()
- .getPointerType(CGF.getContext().getPointerType(
- CGF.getContext().VoidPtrTy))
+ .getPointerType(CGF.getContext().VoidPtrTy)
.castAs<PointerType>());
}
unsigned Idx = 0;
if (isOpenMPLoopBoundSharingDirective(D.getDirectiveKind())) {
Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
- Src, CGF.SizeTy->getPointerTo());
+ Src, CGF.SizeTy->getPointerTo(), CGF.SizeTy);
llvm::Value *LB = CGF.EmitLoadOfScalar(
TypedAddress,
/*Volatile=*/false,
@@ -3544,7 +3576,7 @@ llvm::Function *CGOpenMPRuntimeGPU::createParallelDataSharingWrapper(
++Idx;
Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, Idx);
TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
- Src, CGF.SizeTy->getPointerTo());
+ Src, CGF.SizeTy->getPointerTo(), CGF.SizeTy);
llvm::Value *UB = CGF.EmitLoadOfScalar(
TypedAddress,
/*Volatile=*/false,
@@ -3559,7 +3591,8 @@ llvm::Function *CGOpenMPRuntimeGPU::createParallelDataSharingWrapper(
QualType ElemTy = CurField->getType();
Address Src = Bld.CreateConstInBoundsGEP(SharedArgListAddress, I + Idx);
Address TypedAddress = Bld.CreatePointerBitCastOrAddrSpaceCast(
- Src, CGF.ConvertTypeForMem(CGFContext.getPointerType(ElemTy)));
+ Src, CGF.ConvertTypeForMem(CGFContext.getPointerType(ElemTy)),
+ CGF.ConvertTypeForMem(ElemTy));
llvm::Value *Arg = CGF.EmitLoadOfScalar(TypedAddress,
/*Volatile=*/false,
CGFContext.getPointerType(ElemTy),
@@ -3630,7 +3663,7 @@ void CGOpenMPRuntimeGPU::emitFunctionProlog(CodeGenFunction &CGF,
CheckVarsEscapingDeclContext VarChecker(CGF, llvm::None);
VarChecker.Visit(Body);
I->getSecond().SecondaryLocalVarData.emplace();
- DeclToAddrMapTy &Data = I->getSecond().SecondaryLocalVarData.getValue();
+ DeclToAddrMapTy &Data = *I->getSecond().SecondaryLocalVarData;
for (const ValueDecl *VD : VarChecker.getEscapedDecls()) {
assert(VD->isCanonicalDecl() && "Expected canonical declaration");
Data.insert(std::make_pair(VD, MappedVarData()));
@@ -3691,7 +3724,7 @@ Address CGOpenMPRuntimeGPU::getAddressOfLocalVariable(CodeGenFunction &CGF,
CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
GV, VarTy->getPointerTo(CGM.getContext().getTargetAddressSpace(
VD->getType().getAddressSpace()))),
- Align);
+ VarTy, Align);
}
if (getDataSharingMode(CGM) != CGOpenMPRuntimeGPU::Generic)
@@ -3902,6 +3935,7 @@ void CGOpenMPRuntimeGPU::processRequiresDirective(
case CudaArch::GFX909:
case CudaArch::GFX90a:
case CudaArch::GFX90c:
+ case CudaArch::GFX940:
case CudaArch::GFX1010:
case CudaArch::GFX1011:
case CudaArch::GFX1012:
@@ -3912,6 +3946,11 @@ void CGOpenMPRuntimeGPU::processRequiresDirective(
case CudaArch::GFX1033:
case CudaArch::GFX1034:
case CudaArch::GFX1035:
+ case CudaArch::GFX1036:
+ case CudaArch::GFX1100:
+ case CudaArch::GFX1101:
+ case CudaArch::GFX1102:
+ case CudaArch::GFX1103:
case CudaArch::Generic:
case CudaArch::UNUSED:
case CudaArch::UNKNOWN:
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayout.h b/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayout.h
index 5a3bcdf72f7b..d5ea74922603 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayout.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGRecordLayout.h
@@ -200,6 +200,12 @@ public:
return FieldInfo.lookup(FD);
}
+ // Return whether the following non virtual base has a corresponding
+ // entry in the LLVM struct.
+ bool hasNonVirtualBaseLLVMField(const CXXRecordDecl *RD) const {
+ return NonVirtualBases.count(RD);
+ }
+
unsigned getNonVirtualBaseLLVMFieldNo(const CXXRecordDecl *RD) const {
assert(NonVirtualBases.count(RD) && "Invalid non-virtual base!");
return NonVirtualBases.lookup(RD);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGStmt.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGStmt.cpp
index 9e939bb545ad..df7e5608f8f0 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGStmt.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGStmt.cpp
@@ -314,18 +314,31 @@ void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) {
case Stmt::OMPMasterTaskLoopDirectiveClass:
EmitOMPMasterTaskLoopDirective(cast<OMPMasterTaskLoopDirective>(*S));
break;
+ case Stmt::OMPMaskedTaskLoopDirectiveClass:
+ llvm_unreachable("masked taskloop directive not supported yet.");
+ break;
case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
EmitOMPMasterTaskLoopSimdDirective(
cast<OMPMasterTaskLoopSimdDirective>(*S));
break;
+ case Stmt::OMPMaskedTaskLoopSimdDirectiveClass:
+ llvm_unreachable("masked taskloop simd directive not supported yet.");
+ break;
case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
EmitOMPParallelMasterTaskLoopDirective(
cast<OMPParallelMasterTaskLoopDirective>(*S));
break;
+ case Stmt::OMPParallelMaskedTaskLoopDirectiveClass:
+ llvm_unreachable("parallel masked taskloop directive not supported yet.");
+ break;
case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
EmitOMPParallelMasterTaskLoopSimdDirective(
cast<OMPParallelMasterTaskLoopSimdDirective>(*S));
break;
+ case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass:
+ llvm_unreachable(
+ "parallel masked taskloop simd directive not supported yet.");
+ break;
case Stmt::OMPDistributeDirectiveClass:
EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S));
break;
@@ -396,6 +409,21 @@ void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) {
case Stmt::OMPGenericLoopDirectiveClass:
EmitOMPGenericLoopDirective(cast<OMPGenericLoopDirective>(*S));
break;
+ case Stmt::OMPTeamsGenericLoopDirectiveClass:
+ llvm_unreachable("teams loop directive not supported yet.");
+ break;
+ case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
+ llvm_unreachable("target teams loop directive not supported yet.");
+ break;
+ case Stmt::OMPParallelGenericLoopDirectiveClass:
+ llvm_unreachable("parallel loop directive not supported yet.");
+ break;
+ case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
+ llvm_unreachable("target parallel loop directive not supported yet.");
+ break;
+ case Stmt::OMPParallelMaskedDirectiveClass:
+ llvm_unreachable("parallel masked directive not supported yet.");
+ break;
}
}
@@ -666,19 +694,34 @@ void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) {
bool nomerge = false;
+ bool noinline = false;
+ bool alwaysinline = false;
const CallExpr *musttail = nullptr;
for (const auto *A : S.getAttrs()) {
- if (A->getKind() == attr::NoMerge) {
+ switch (A->getKind()) {
+ default:
+ break;
+ case attr::NoMerge:
nomerge = true;
- }
- if (A->getKind() == attr::MustTail) {
+ break;
+ case attr::NoInline:
+ noinline = true;
+ break;
+ case attr::AlwaysInline:
+ alwaysinline = true;
+ break;
+ case attr::MustTail:
const Stmt *Sub = S.getSubStmt();
const ReturnStmt *R = cast<ReturnStmt>(Sub);
musttail = cast<CallExpr>(R->getRetValue()->IgnoreParens());
+ break;
}
}
SaveAndRestore<bool> save_nomerge(InNoMergeAttributedStmt, nomerge);
+ SaveAndRestore<bool> save_noinline(InNoInlineAttributedStmt, noinline);
+ SaveAndRestore<bool> save_alwaysinline(InAlwaysInlineAttributedStmt,
+ alwaysinline);
SaveAndRestore<const CallExpr *> save_musttail(MustTailCall, musttail);
EmitStmt(S.getSubStmt(), S.getAttrs());
}
@@ -2121,10 +2164,9 @@ std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue(
if ((Size <= 64 && llvm::isPowerOf2_64(Size)) ||
getTargetHooks().isScalarizableAsmOperand(*this, Ty)) {
Ty = llvm::IntegerType::get(getLLVMContext(), Size);
- Ty = llvm::PointerType::getUnqual(Ty);
- return {Builder.CreateLoad(
- Builder.CreateBitCast(InputValue.getAddress(*this), Ty)),
+ return {Builder.CreateLoad(Builder.CreateElementBitCast(
+ InputValue.getAddress(*this), Ty)),
nullptr};
}
}
@@ -2260,6 +2302,9 @@ static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
}
void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
+ // Pop all cleanup blocks at the end of the asm statement.
+ CodeGenFunction::RunCleanupsScope Cleanups(*this);
+
// Assemble the final asm string.
std::string AsmString = S.generateAsmString(getContext());
@@ -2514,10 +2559,8 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
Arg = Builder.CreateZExt(Arg, OutputTy);
else if (isa<llvm::PointerType>(OutputTy))
Arg = Builder.CreateZExt(Arg, IntPtrTy);
- else {
- assert(OutputTy->isFloatingPointTy() && "Unexpected output type");
+ else if (OutputTy->isFloatingPointTy())
Arg = Builder.CreateFPExt(Arg, OutputTy);
- }
}
// Deal with the tied operands' constraint code in adjustInlineAsmType.
ReplaceConstraint = OutputConstraints[Output];
@@ -2713,8 +2756,8 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
// ResultTypeRequiresCast.size() elements of RegResults.
if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
unsigned Size = getContext().getTypeSize(ResultRegQualTys[i]);
- Address A = Builder.CreateBitCast(Dest.getAddress(*this),
- ResultRegTypes[i]->getPointerTo());
+ Address A = Builder.CreateElementBitCast(Dest.getAddress(*this),
+ ResultRegTypes[i]);
if (getTargetHooks().isScalarizableAsmOperand(*this, TruncTy)) {
Builder.CreateStore(Tmp, A);
continue;
@@ -2723,9 +2766,8 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
QualType Ty = getContext().getIntTypeForBitwidth(Size, /*Signed*/ false);
if (Ty.isNull()) {
const Expr *OutExpr = S.getOutputExpr(i);
- CGM.Error(
- OutExpr->getExprLoc(),
- "impossible constraint in asm: can't store value into a register");
+ CGM.getDiags().Report(OutExpr->getExprLoc(),
+ diag::err_store_value_to_reg);
return;
}
Dest = MakeAddrLValue(A, Ty);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGStmtOpenMP.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGStmtOpenMP.cpp
index 39dd4c00765d..301f5278df69 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGStmtOpenMP.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGStmtOpenMP.cpp
@@ -24,6 +24,7 @@
#include "clang/AST/StmtVisitor.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PrettyStackTrace.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/BinaryFormat/Dwarf.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
@@ -79,7 +80,7 @@ public:
InlinedShareds(CGF) {
if (EmitPreInitStmt)
emitPreInitStmt(CGF, S);
- if (!CapturedRegion.hasValue())
+ if (!CapturedRegion)
return;
assert(S.hasAssociatedStmt() &&
"Expected associated statement for inlined directive.");
@@ -94,9 +95,7 @@ public:
isCapturedVar(CGF, VD) || (CGF.CapturedStmtInfo &&
InlinedShareds.isGlobalVarCaptured(VD)),
VD->getType().getNonReferenceType(), VK_LValue, C.getLocation());
- InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address {
- return CGF.EmitLValue(&DRE).getAddress(CGF);
- });
+ InlinedShareds.addPrivate(VD, CGF.EmitLValue(&DRE).getAddress(CGF));
}
}
(void)InlinedShareds.Privatize();
@@ -154,11 +153,12 @@ class OMPLoopScope : public CodeGenFunction::RunCleanupsScope {
const auto *OrigVD =
cast<VarDecl>(cast<DeclRefExpr>(IRef)->getDecl());
if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
+ QualType OrigVDTy = OrigVD->getType().getNonReferenceType();
(void)PreCondVars.setVarAddr(
CGF, OrigVD,
Address(llvm::UndefValue::get(CGF.ConvertTypeForMem(
- CGF.getContext().getPointerType(
- OrigVD->getType().getNonReferenceType()))),
+ CGF.getContext().getPointerType(OrigVDTy))),
+ CGF.ConvertTypeForMem(OrigVDTy),
CGF.getContext().getDeclAlign(OrigVD)));
}
}
@@ -271,9 +271,7 @@ public:
InlinedShareds.isGlobalVarCaptured(VD)),
VD->getType().getNonReferenceType(), VK_LValue,
C.getLocation());
- InlinedShareds.addPrivate(VD, [&CGF, &DRE]() -> Address {
- return CGF.EmitLValue(&DRE).getAddress(CGF);
- });
+ InlinedShareds.addPrivate(VD, CGF.EmitLValue(&DRE).getAddress(CGF));
}
}
CS = dyn_cast<CapturedStmt>(CS->getCapturedStmt());
@@ -483,7 +481,11 @@ static llvm::Function *emitOutlinedFunctionPrologue(
if (ArgType->isVariablyModifiedType())
ArgType = getCanonicalParamType(Ctx, ArgType);
VarDecl *Arg;
- if (DebugFunctionDecl && (CapVar || I->capturesThis())) {
+ if (CapVar && (CapVar->getTLSKind() != clang::VarDecl::TLS_None)) {
+ Arg = ImplicitParamDecl::Create(Ctx, /*DC=*/nullptr, FD->getLocation(),
+ II, ArgType,
+ ImplicitParamDecl::ThreadPrivateVar);
+ } else if (DebugFunctionDecl && (CapVar || I->capturesThis())) {
Arg = ParmVarDecl::Create(
Ctx, DebugFunctionDecl,
CapVar ? CapVar->getBeginLoc() : FD->getBeginLoc(),
@@ -578,8 +580,7 @@ static llvm::Function *emitOutlinedFunctionPrologue(
}
if (!FO.RegisterCastedArgsOnly) {
LocalAddrs.insert(
- {Args[Cnt],
- {Var, Address(ArgAddr.getPointer(), Ctx.getDeclAlign(Var))}});
+ {Args[Cnt], {Var, ArgAddr.withAlignment(Ctx.getDeclAlign(Var))}});
}
} else if (I->capturesVariableByCopy()) {
assert(!FD->getType()->isAnyPointerType() &&
@@ -629,9 +630,8 @@ CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S,
CodeGenFunction::OMPPrivateScope LocalScope(*this);
for (const auto &LocalAddrPair : LocalAddrs) {
if (LocalAddrPair.second.first) {
- LocalScope.addPrivate(LocalAddrPair.second.first, [&LocalAddrPair]() {
- return LocalAddrPair.second.second;
- });
+ LocalScope.addPrivate(LocalAddrPair.second.first,
+ LocalAddrPair.second.second);
}
}
(void)LocalScope.Privatize();
@@ -669,7 +669,8 @@ CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S,
LV.setAddress(WrapperCGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
LV.getAddress(WrapperCGF),
PI->getType()->getPointerTo(
- LV.getAddress(WrapperCGF).getAddressSpace())));
+ LV.getAddress(WrapperCGF).getAddressSpace()),
+ PI->getType()));
CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc());
} else {
auto EI = VLASizes.find(Arg);
@@ -726,14 +727,14 @@ void CodeGenFunction::EmitOMPAggregateAssign(
Builder.CreatePHI(SrcBegin->getType(), 2, "omp.arraycpy.srcElementPast");
SrcElementPHI->addIncoming(SrcBegin, EntryBB);
Address SrcElementCurrent =
- Address(SrcElementPHI,
+ Address(SrcElementPHI, SrcAddr.getElementType(),
SrcAddr.getAlignment().alignmentOfArrayElement(ElementSize));
llvm::PHINode *DestElementPHI = Builder.CreatePHI(
DestBegin->getType(), 2, "omp.arraycpy.destElementPast");
DestElementPHI->addIncoming(DestBegin, EntryBB);
Address DestElementCurrent =
- Address(DestElementPHI,
+ Address(DestElementPHI, DestAddr.getElementType(),
DestAddr.getAlignment().alignmentOfArrayElement(ElementSize));
// Emit copy.
@@ -777,8 +778,8 @@ void CodeGenFunction::EmitOMPCopy(QualType OriginalType, Address DestAddr,
// destination and source variables to corresponding array
// elements.
CodeGenFunction::OMPPrivateScope Remap(*this);
- Remap.addPrivate(DestVD, [DestElement]() { return DestElement; });
- Remap.addPrivate(SrcVD, [SrcElement]() { return SrcElement; });
+ Remap.addPrivate(DestVD, DestElement);
+ Remap.addPrivate(SrcVD, SrcElement);
(void)Remap.Privatize();
EmitIgnoredExpr(Copy);
});
@@ -786,8 +787,8 @@ void CodeGenFunction::EmitOMPCopy(QualType OriginalType, Address DestAddr,
} else {
// Remap pseudo source variable to private copy.
CodeGenFunction::OMPPrivateScope Remap(*this);
- Remap.addPrivate(SrcVD, [SrcAddr]() { return SrcAddr; });
- Remap.addPrivate(DestVD, [DestAddr]() { return DestAddr; });
+ Remap.addPrivate(SrcVD, SrcAddr);
+ Remap.addPrivate(DestVD, DestAddr);
(void)Remap.Privatize();
// Emit copying of the whole variable.
EmitIgnoredExpr(Copy);
@@ -876,68 +877,56 @@ bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
// Emit VarDecl with copy init for arrays.
// Get the address of the original variable captured in current
// captured region.
- IsRegistered = PrivateScope.addPrivate(
- OrigVD, [this, VD, Type, OriginalLVal, VDInit]() {
- AutoVarEmission Emission = EmitAutoVarAlloca(*VD);
- const Expr *Init = VD->getInit();
- if (!isa<CXXConstructExpr>(Init) ||
- isTrivialInitializer(Init)) {
- // Perform simple memcpy.
- LValue Dest =
- MakeAddrLValue(Emission.getAllocatedAddress(), Type);
- EmitAggregateAssign(Dest, OriginalLVal, Type);
- } else {
- EmitOMPAggregateAssign(
- Emission.getAllocatedAddress(),
- OriginalLVal.getAddress(*this), Type,
- [this, VDInit, Init](Address DestElement,
- Address SrcElement) {
- // Clean up any temporaries needed by the
- // initialization.
- RunCleanupsScope InitScope(*this);
- // Emit initialization for single element.
- setAddrOfLocalVar(VDInit, SrcElement);
- EmitAnyExprToMem(Init, DestElement,
- Init->getType().getQualifiers(),
- /*IsInitializer*/ false);
- LocalDeclMap.erase(VDInit);
- });
- }
- EmitAutoVarCleanups(Emission);
- return Emission.getAllocatedAddress();
- });
+ AutoVarEmission Emission = EmitAutoVarAlloca(*VD);
+ const Expr *Init = VD->getInit();
+ if (!isa<CXXConstructExpr>(Init) || isTrivialInitializer(Init)) {
+ // Perform simple memcpy.
+ LValue Dest = MakeAddrLValue(Emission.getAllocatedAddress(), Type);
+ EmitAggregateAssign(Dest, OriginalLVal, Type);
+ } else {
+ EmitOMPAggregateAssign(
+ Emission.getAllocatedAddress(), OriginalLVal.getAddress(*this),
+ Type,
+ [this, VDInit, Init](Address DestElement, Address SrcElement) {
+ // Clean up any temporaries needed by the
+ // initialization.
+ RunCleanupsScope InitScope(*this);
+ // Emit initialization for single element.
+ setAddrOfLocalVar(VDInit, SrcElement);
+ EmitAnyExprToMem(Init, DestElement,
+ Init->getType().getQualifiers(),
+ /*IsInitializer*/ false);
+ LocalDeclMap.erase(VDInit);
+ });
+ }
+ EmitAutoVarCleanups(Emission);
+ IsRegistered =
+ PrivateScope.addPrivate(OrigVD, Emission.getAllocatedAddress());
} else {
Address OriginalAddr = OriginalLVal.getAddress(*this);
- IsRegistered =
- PrivateScope.addPrivate(OrigVD, [this, VDInit, OriginalAddr, VD,
- ThisFirstprivateIsLastprivate,
- OrigVD, &Lastprivates, IRef]() {
- // Emit private VarDecl with copy init.
- // Remap temp VDInit variable to the address of the original
- // variable (for proper handling of captured global variables).
- setAddrOfLocalVar(VDInit, OriginalAddr);
- EmitDecl(*VD);
- LocalDeclMap.erase(VDInit);
- if (ThisFirstprivateIsLastprivate &&
- Lastprivates[OrigVD->getCanonicalDecl()] ==
- OMPC_LASTPRIVATE_conditional) {
- // Create/init special variable for lastprivate conditionals.
- Address VDAddr =
- CGM.getOpenMPRuntime().emitLastprivateConditionalInit(
- *this, OrigVD);
- llvm::Value *V = EmitLoadOfScalar(
- MakeAddrLValue(GetAddrOfLocalVar(VD), (*IRef)->getType(),
- AlignmentSource::Decl),
- (*IRef)->getExprLoc());
- EmitStoreOfScalar(V,
- MakeAddrLValue(VDAddr, (*IRef)->getType(),
- AlignmentSource::Decl));
- LocalDeclMap.erase(VD);
- setAddrOfLocalVar(VD, VDAddr);
- return VDAddr;
- }
- return GetAddrOfLocalVar(VD);
- });
+ // Emit private VarDecl with copy init.
+ // Remap temp VDInit variable to the address of the original
+ // variable (for proper handling of captured global variables).
+ setAddrOfLocalVar(VDInit, OriginalAddr);
+ EmitDecl(*VD);
+ LocalDeclMap.erase(VDInit);
+ Address VDAddr = GetAddrOfLocalVar(VD);
+ if (ThisFirstprivateIsLastprivate &&
+ Lastprivates[OrigVD->getCanonicalDecl()] ==
+ OMPC_LASTPRIVATE_conditional) {
+ // Create/init special variable for lastprivate conditionals.
+ llvm::Value *V =
+ EmitLoadOfScalar(MakeAddrLValue(VDAddr, (*IRef)->getType(),
+ AlignmentSource::Decl),
+ (*IRef)->getExprLoc());
+ VDAddr = CGM.getOpenMPRuntime().emitLastprivateConditionalInit(
+ *this, OrigVD);
+ EmitStoreOfScalar(V, MakeAddrLValue(VDAddr, (*IRef)->getType(),
+ AlignmentSource::Decl));
+ LocalDeclMap.erase(VD);
+ setAddrOfLocalVar(VD, VDAddr);
+ }
+ IsRegistered = PrivateScope.addPrivate(OrigVD, VDAddr);
}
assert(IsRegistered &&
"firstprivate var already registered as private");
@@ -963,11 +952,10 @@ void CodeGenFunction::EmitOMPPrivateClause(
const auto *OrigVD = cast<VarDecl>(cast<DeclRefExpr>(*IRef)->getDecl());
if (EmittedAsPrivate.insert(OrigVD->getCanonicalDecl()).second) {
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
- bool IsRegistered = PrivateScope.addPrivate(OrigVD, [this, VD]() {
- // Emit private VarDecl with copy init.
- EmitDecl(*VD);
- return GetAddrOfLocalVar(VD);
- });
+ EmitDecl(*VD);
+ // Emit private VarDecl with copy init.
+ bool IsRegistered =
+ PrivateScope.addPrivate(OrigVD, GetAddrOfLocalVar(VD));
assert(IsRegistered && "private var already registered as private");
// Silence the warning about unused variable.
(void)IsRegistered;
@@ -1010,6 +998,7 @@ bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) {
MasterAddr =
Address(VD->isStaticLocal() ? CGM.getStaticLocalDeclAddress(VD)
: CGM.GetAddrOfGlobal(VD),
+ CGM.getTypes().ConvertTypeForMem(VD->getType()),
getContext().getDeclAlign(VD));
}
// Get the address of the threadprivate variable.
@@ -1078,31 +1067,27 @@ bool CodeGenFunction::EmitOMPLastprivateClauseInit(
if (AlreadyEmittedVars.insert(OrigVD->getCanonicalDecl()).second) {
const auto *DestVD =
cast<VarDecl>(cast<DeclRefExpr>(*IDestRef)->getDecl());
- PrivateScope.addPrivate(DestVD, [this, OrigVD, IRef]() {
- DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
- /*RefersToEnclosingVariableOrCapture=*/
- CapturedStmtInfo->lookup(OrigVD) != nullptr,
- (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
- return EmitLValue(&DRE).getAddress(*this);
- });
+ DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
+ /*RefersToEnclosingVariableOrCapture=*/
+ CapturedStmtInfo->lookup(OrigVD) != nullptr,
+ (*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
+ PrivateScope.addPrivate(DestVD, EmitLValue(&DRE).getAddress(*this));
// Check if the variable is also a firstprivate: in this case IInit is
// not generated. Initialization of this variable will happen in codegen
// for 'firstprivate' clause.
if (IInit && !SIMDLCVs.count(OrigVD->getCanonicalDecl())) {
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(IInit)->getDecl());
- bool IsRegistered =
- PrivateScope.addPrivate(OrigVD, [this, VD, C, OrigVD]() {
- if (C->getKind() == OMPC_LASTPRIVATE_conditional) {
- Address VDAddr =
- CGM.getOpenMPRuntime().emitLastprivateConditionalInit(
- *this, OrigVD);
- setAddrOfLocalVar(VD, VDAddr);
- return VDAddr;
- }
- // Emit private VarDecl with copy init.
- EmitDecl(*VD);
- return GetAddrOfLocalVar(VD);
- });
+ Address VDAddr = Address::invalid();
+ if (C->getKind() == OMPC_LASTPRIVATE_conditional) {
+ VDAddr = CGM.getOpenMPRuntime().emitLastprivateConditionalInit(
+ *this, OrigVD);
+ setAddrOfLocalVar(VD, VDAddr);
+ } else {
+ // Emit private VarDecl with copy init.
+ EmitDecl(*VD);
+ VDAddr = GetAddrOfLocalVar(VD);
+ }
+ bool IsRegistered = PrivateScope.addPrivate(OrigVD, VDAddr);
assert(IsRegistered &&
"lastprivate var already registered as private");
(void)IsRegistered;
@@ -1182,9 +1167,10 @@ void CodeGenFunction::EmitOMPLastprivateClauseFinal(
// Get the address of the private variable.
Address PrivateAddr = GetAddrOfLocalVar(PrivateVD);
if (const auto *RefTy = PrivateVD->getType()->getAs<ReferenceType>())
- PrivateAddr =
- Address(Builder.CreateLoad(PrivateAddr),
- CGM.getNaturalTypeAlignment(RefTy->getPointeeType()));
+ PrivateAddr = Address(
+ Builder.CreateLoad(PrivateAddr),
+ CGM.getTypes().ConvertTypeForMem(RefTy->getPointeeType()),
+ CGM.getNaturalTypeAlignment(RefTy->getPointeeType()));
// Store the last value to the private copy in the last iteration.
if (C->getKind() == OMPC_LASTPRIVATE_conditional)
CGM.getOpenMPRuntime().emitLastprivateConditionalFinalUpdate(
@@ -1256,8 +1242,8 @@ void CodeGenFunction::EmitOMPReductionClauseInit(
EmitAutoVarCleanups(Emission);
Address BaseAddr = RedCG.adjustPrivateAddress(
*this, Count, Emission.getAllocatedAddress());
- bool IsRegistered = PrivateScope.addPrivate(
- RedCG.getBaseDecl(Count), [BaseAddr]() { return BaseAddr; });
+ bool IsRegistered =
+ PrivateScope.addPrivate(RedCG.getBaseDecl(Count), BaseAddr);
assert(IsRegistered && "private var already registered as private");
// Silence the warning about unused variable.
(void)IsRegistered;
@@ -1269,23 +1255,19 @@ void CodeGenFunction::EmitOMPReductionClauseInit(
if (isaOMPArraySectionExpr && Type->isVariablyModifiedType()) {
// Store the address of the original variable associated with the LHS
// implicit variable.
- PrivateScope.addPrivate(LHSVD, [&RedCG, Count, this]() {
- return RedCG.getSharedLValue(Count).getAddress(*this);
- });
- PrivateScope.addPrivate(
- RHSVD, [this, PrivateVD]() { return GetAddrOfLocalVar(PrivateVD); });
+ PrivateScope.addPrivate(LHSVD,
+ RedCG.getSharedLValue(Count).getAddress(*this));
+ PrivateScope.addPrivate(RHSVD, GetAddrOfLocalVar(PrivateVD));
} else if ((isaOMPArraySectionExpr && Type->isScalarType()) ||
isa<ArraySubscriptExpr>(IRef)) {
// Store the address of the original variable associated with the LHS
// implicit variable.
- PrivateScope.addPrivate(LHSVD, [&RedCG, Count, this]() {
- return RedCG.getSharedLValue(Count).getAddress(*this);
- });
- PrivateScope.addPrivate(RHSVD, [this, PrivateVD, RHSVD]() {
- return Builder.CreateElementBitCast(GetAddrOfLocalVar(PrivateVD),
- ConvertTypeForMem(RHSVD->getType()),
- "rhs.begin");
- });
+ PrivateScope.addPrivate(LHSVD,
+ RedCG.getSharedLValue(Count).getAddress(*this));
+ PrivateScope.addPrivate(RHSVD, Builder.CreateElementBitCast(
+ GetAddrOfLocalVar(PrivateVD),
+ ConvertTypeForMem(RHSVD->getType()),
+ "rhs.begin"));
} else {
QualType Type = PrivateVD->getType();
bool IsArray = getContext().getAsArrayType(Type) != nullptr;
@@ -1296,13 +1278,12 @@ void CodeGenFunction::EmitOMPReductionClauseInit(
OriginalAddr = Builder.CreateElementBitCast(
OriginalAddr, ConvertTypeForMem(LHSVD->getType()), "lhs.begin");
}
- PrivateScope.addPrivate(LHSVD, [OriginalAddr]() { return OriginalAddr; });
- PrivateScope.addPrivate(RHSVD, [this, PrivateVD, RHSVD, IsArray]() {
- return IsArray ? Builder.CreateElementBitCast(
- GetAddrOfLocalVar(PrivateVD),
- ConvertTypeForMem(RHSVD->getType()), "rhs.begin")
- : GetAddrOfLocalVar(PrivateVD);
- });
+ PrivateScope.addPrivate(LHSVD, OriginalAddr);
+ PrivateScope.addPrivate(
+ RHSVD, IsArray ? Builder.CreateElementBitCast(
+ GetAddrOfLocalVar(PrivateVD),
+ ConvertTypeForMem(RHSVD->getType()), "rhs.begin")
+ : GetAddrOfLocalVar(PrivateVD));
}
++ILHS;
++IRHS;
@@ -1659,7 +1640,7 @@ Address CodeGenFunction::OMPBuilderCBHelpers::getAddressOfLocalVariable(
Addr,
CGF.ConvertTypeForMem(CGM.getContext().getPointerType(CVD->getType())),
getNameWithSeparators({CVD->getName(), ".addr"}, ".", "."));
- return Address(Addr, Align);
+ return Address(Addr, CGF.ConvertTypeForMem(CVD->getType()), Align);
}
Address CodeGenFunction::OMPBuilderCBHelpers::getAddrOfThreadPrivate(
@@ -1682,7 +1663,7 @@ Address CodeGenFunction::OMPBuilderCBHelpers::getAddrOfThreadPrivate(
llvm::CallInst *ThreadPrivateCacheCall =
OMPBuilder.createCachedThreadPrivate(CGF.Builder, Data, Size, CacheName);
- return Address(ThreadPrivateCacheCall, VDAddr.getAlignment());
+ return Address(ThreadPrivateCacheCall, CGM.Int8Ty, VDAddr.getAlignment());
}
std::string CodeGenFunction::OMPBuilderCBHelpers::getNameWithSeparators(
@@ -1696,6 +1677,41 @@ std::string CodeGenFunction::OMPBuilderCBHelpers::getNameWithSeparators(
}
return OS.str().str();
}
+
+void CodeGenFunction::OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
+ CodeGenFunction &CGF, const Stmt *RegionBodyStmt, InsertPointTy AllocaIP,
+ InsertPointTy CodeGenIP, Twine RegionName) {
+ CGBuilderTy &Builder = CGF.Builder;
+ Builder.restoreIP(CodeGenIP);
+ llvm::BasicBlock *FiniBB = splitBBWithSuffix(Builder, /*CreateBranch=*/false,
+ "." + RegionName + ".after");
+
+ {
+ OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(CGF, AllocaIP, *FiniBB);
+ CGF.EmitStmt(RegionBodyStmt);
+ }
+
+ if (Builder.saveIP().isSet())
+ Builder.CreateBr(FiniBB);
+}
+
+void CodeGenFunction::OMPBuilderCBHelpers::EmitOMPOutlinedRegionBody(
+ CodeGenFunction &CGF, const Stmt *RegionBodyStmt, InsertPointTy AllocaIP,
+ InsertPointTy CodeGenIP, Twine RegionName) {
+ CGBuilderTy &Builder = CGF.Builder;
+ Builder.restoreIP(CodeGenIP);
+ llvm::BasicBlock *FiniBB = splitBBWithSuffix(Builder, /*CreateBranch=*/false,
+ "." + RegionName + ".after");
+
+ {
+ OMPBuilderCBHelpers::OutlinedRegionBodyRAII IRB(CGF, AllocaIP, *FiniBB);
+ CGF.EmitStmt(RegionBodyStmt);
+ }
+
+ if (Builder.saveIP().isSet())
+ Builder.CreateBr(FiniBB);
+}
+
void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
if (CGM.getLangOpts().OpenMPIRBuilder) {
llvm::OpenMPIRBuilder &OMPBuilder = CGM.getOpenMPRuntime().getOMPBuilder();
@@ -1738,13 +1754,10 @@ void CodeGenFunction::EmitOMPParallelDirective(const OMPParallelDirective &S) {
const CapturedStmt *CS = S.getCapturedStmt(OMPD_parallel);
const Stmt *ParallelRegionBodyStmt = CS->getCapturedStmt();
- auto BodyGenCB = [ParallelRegionBodyStmt,
- this](InsertPointTy AllocaIP, InsertPointTy CodeGenIP,
- llvm::BasicBlock &ContinuationBB) {
- OMPBuilderCBHelpers::OutlinedRegionBodyRAII ORB(*this, AllocaIP,
- ContinuationBB);
- OMPBuilderCBHelpers::EmitOMPRegionBody(*this, ParallelRegionBodyStmt,
- CodeGenIP, ContinuationBB);
+ auto BodyGenCB = [&, this](InsertPointTy AllocaIP,
+ InsertPointTy CodeGenIP) {
+ OMPBuilderCBHelpers::EmitOMPOutlinedRegionBody(
+ *this, ParallelRegionBodyStmt, AllocaIP, CodeGenIP, "parallel");
};
CGCapturedStmtInfo CGSI(*CS, CR_OpenMP);
@@ -2181,7 +2194,7 @@ void CodeGenFunction::EmitOMPLinearClauseFinal(
(*IC)->getType(), VK_LValue, (*IC)->getExprLoc());
Address OrigAddr = EmitLValue(&DRE).getAddress(*this);
CodeGenFunction::OMPPrivateScope VarScope(*this);
- VarScope.addPrivate(OrigVD, [OrigAddr]() { return OrigAddr; });
+ VarScope.addPrivate(OrigVD, OrigAddr);
(void)VarScope.Privatize();
EmitIgnoredExpr(F);
++IC;
@@ -2240,20 +2253,15 @@ void CodeGenFunction::EmitOMPPrivateLoopCounters(
AutoVarEmission VarEmission = EmitAutoVarAlloca(*PrivateVD);
EmitAutoVarCleanups(VarEmission);
LocalDeclMap.erase(PrivateVD);
- (void)LoopScope.addPrivate(
- VD, [&VarEmission]() { return VarEmission.getAllocatedAddress(); });
+ (void)LoopScope.addPrivate(VD, VarEmission.getAllocatedAddress());
if (LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD) ||
VD->hasGlobalStorage()) {
- (void)LoopScope.addPrivate(PrivateVD, [this, VD, E]() {
- DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD),
- LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD),
- E->getType(), VK_LValue, E->getExprLoc());
- return EmitLValue(&DRE).getAddress(*this);
- });
+ DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD),
+ LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD),
+ E->getType(), VK_LValue, E->getExprLoc());
+ (void)LoopScope.addPrivate(PrivateVD, EmitLValue(&DRE).getAddress(*this));
} else {
- (void)LoopScope.addPrivate(PrivateVD, [&VarEmission]() {
- return VarEmission.getAllocatedAddress();
- });
+ (void)LoopScope.addPrivate(PrivateVD, VarEmission.getAllocatedAddress());
}
++I;
}
@@ -2268,9 +2276,8 @@ void CodeGenFunction::EmitOMPPrivateLoopCounters(
// Override only those variables that can be captured to avoid re-emission
// of the variables declared within the loops.
if (DRE->refersToEnclosingVariableOrCapture()) {
- (void)LoopScope.addPrivate(VD, [this, DRE, VD]() {
- return CreateMemTemp(DRE->getType(), VD->getName());
- });
+ (void)LoopScope.addPrivate(
+ VD, CreateMemTemp(DRE->getType(), VD->getName()));
}
}
}
@@ -2333,11 +2340,10 @@ void CodeGenFunction::EmitOMPLinearClause(
const auto *PrivateVD =
cast<VarDecl>(cast<DeclRefExpr>(*CurPrivate)->getDecl());
if (!SIMDLCVs.count(VD->getCanonicalDecl())) {
- bool IsRegistered = PrivateScope.addPrivate(VD, [this, PrivateVD]() {
- // Emit private VarDecl with copy init.
- EmitVarDecl(*PrivateVD);
- return GetAddrOfLocalVar(PrivateVD);
- });
+ // Emit private VarDecl with copy init.
+ EmitVarDecl(*PrivateVD);
+ bool IsRegistered =
+ PrivateScope.addPrivate(VD, GetAddrOfLocalVar(PrivateVD));
assert(IsRegistered && "linear var already registered as private");
// Silence the warning about unused variable.
(void)IsRegistered;
@@ -2428,7 +2434,7 @@ void CodeGenFunction::EmitOMPSimdFinal(
OrigAddr = EmitLValue(&DRE).getAddress(*this);
}
OMPPrivateScope VarScope(*this);
- VarScope.addPrivate(OrigVD, [OrigAddr]() { return OrigAddr; });
+ VarScope.addPrivate(OrigVD, OrigAddr);
(void)VarScope.Privatize();
EmitIgnoredExpr(F);
}
@@ -3509,6 +3515,57 @@ static void emitScanBasedDirectiveDecls(
}
}
+/// Copies final inscan reductions values to the original variables.
+/// The code is the following:
+/// \code
+/// <orig_var> = buffer[num_iters-1];
+/// \endcode
+static void emitScanBasedDirectiveFinals(
+ CodeGenFunction &CGF, const OMPLoopDirective &S,
+ llvm::function_ref<llvm::Value *(CodeGenFunction &)> NumIteratorsGen) {
+ llvm::Value *OMPScanNumIterations = CGF.Builder.CreateIntCast(
+ NumIteratorsGen(CGF), CGF.SizeTy, /*isSigned=*/false);
+ SmallVector<const Expr *, 4> Shareds;
+ SmallVector<const Expr *, 4> LHSs;
+ SmallVector<const Expr *, 4> RHSs;
+ SmallVector<const Expr *, 4> Privates;
+ SmallVector<const Expr *, 4> CopyOps;
+ SmallVector<const Expr *, 4> CopyArrayElems;
+ for (const auto *C : S.getClausesOfKind<OMPReductionClause>()) {
+ assert(C->getModifier() == OMPC_REDUCTION_inscan &&
+ "Only inscan reductions are expected.");
+ Shareds.append(C->varlist_begin(), C->varlist_end());
+ LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
+ RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
+ Privates.append(C->privates().begin(), C->privates().end());
+ CopyOps.append(C->copy_ops().begin(), C->copy_ops().end());
+ CopyArrayElems.append(C->copy_array_elems().begin(),
+ C->copy_array_elems().end());
+ }
+ // Create temp var and copy LHS value to this temp value.
+ // LHS = TMP[LastIter];
+ llvm::Value *OMPLast = CGF.Builder.CreateNSWSub(
+ OMPScanNumIterations,
+ llvm::ConstantInt::get(CGF.SizeTy, 1, /*isSigned=*/false));
+ for (unsigned I = 0, E = CopyArrayElems.size(); I < E; ++I) {
+ const Expr *PrivateExpr = Privates[I];
+ const Expr *OrigExpr = Shareds[I];
+ const Expr *CopyArrayElem = CopyArrayElems[I];
+ CodeGenFunction::OpaqueValueMapping IdxMapping(
+ CGF,
+ cast<OpaqueValueExpr>(
+ cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
+ RValue::get(OMPLast));
+ LValue DestLVal = CGF.EmitLValue(OrigExpr);
+ LValue SrcLVal = CGF.EmitLValue(CopyArrayElem);
+ CGF.EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(CGF),
+ SrcLVal.getAddress(CGF),
+ cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
+ cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
+ CopyOps[I]);
+ }
+}
+
/// Emits the code for the directive with inscan reductions.
/// The code is the following:
/// \code
@@ -3617,7 +3674,7 @@ static void emitScanBasedDirective(
RValue::get(IVal));
LHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF);
}
- PrivScope.addPrivate(LHSVD, [LHSAddr]() { return LHSAddr; });
+ PrivScope.addPrivate(LHSVD, LHSAddr);
Address RHSAddr = Address::invalid();
{
llvm::Value *OffsetIVal = CGF.Builder.CreateNUWSub(IVal, Pow2K);
@@ -3628,7 +3685,7 @@ static void emitScanBasedDirective(
RValue::get(OffsetIVal));
RHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF);
}
- PrivScope.addPrivate(RHSVD, [RHSAddr]() { return RHSAddr; });
+ PrivScope.addPrivate(RHSVD, RHSAddr);
++ILHS;
++IRHS;
}
@@ -3703,6 +3760,8 @@ static bool emitWorksharingDirective(CodeGenFunction &CGF,
if (!isOpenMPParallelDirective(S.getDirectiveKind()))
emitScanBasedDirectiveDecls(CGF, S, NumIteratorsGen);
emitScanBasedDirective(CGF, S, NumIteratorsGen, FirstGen, SecondGen);
+ if (!isOpenMPParallelDirective(S.getDirectiveKind()))
+ emitScanBasedDirectiveFinals(CGF, S, NumIteratorsGen);
} else {
CodeGenFunction::OMPCancelStackRAII CancelRegion(CGF, S.getDirectiveKind(),
HasCancel);
@@ -3716,13 +3775,52 @@ static bool emitWorksharingDirective(CodeGenFunction &CGF,
static bool isSupportedByOpenMPIRBuilder(const OMPForDirective &S) {
if (S.hasCancel())
return false;
- for (OMPClause *C : S.clauses())
- if (!isa<OMPNowaitClause>(C))
- return false;
+ for (OMPClause *C : S.clauses()) {
+ if (isa<OMPNowaitClause>(C))
+ continue;
+
+ if (auto *SC = dyn_cast<OMPScheduleClause>(C)) {
+ if (SC->getFirstScheduleModifier() != OMPC_SCHEDULE_MODIFIER_unknown)
+ return false;
+ if (SC->getSecondScheduleModifier() != OMPC_SCHEDULE_MODIFIER_unknown)
+ return false;
+ switch (SC->getScheduleKind()) {
+ case OMPC_SCHEDULE_auto:
+ case OMPC_SCHEDULE_dynamic:
+ case OMPC_SCHEDULE_runtime:
+ case OMPC_SCHEDULE_guided:
+ case OMPC_SCHEDULE_static:
+ continue;
+ case OMPC_SCHEDULE_unknown:
+ return false;
+ }
+ }
+
+ return false;
+ }
return true;
}
+static llvm::omp::ScheduleKind
+convertClauseKindToSchedKind(OpenMPScheduleClauseKind ScheduleClauseKind) {
+ switch (ScheduleClauseKind) {
+ case OMPC_SCHEDULE_unknown:
+ return llvm::omp::OMP_SCHEDULE_Default;
+ case OMPC_SCHEDULE_auto:
+ return llvm::omp::OMP_SCHEDULE_Auto;
+ case OMPC_SCHEDULE_dynamic:
+ return llvm::omp::OMP_SCHEDULE_Dynamic;
+ case OMPC_SCHEDULE_guided:
+ return llvm::omp::OMP_SCHEDULE_Guided;
+ case OMPC_SCHEDULE_runtime:
+ return llvm::omp::OMP_SCHEDULE_Runtime;
+ case OMPC_SCHEDULE_static:
+ return llvm::omp::OMP_SCHEDULE_Static;
+ }
+ llvm_unreachable("Unhandled schedule kind");
+}
+
void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) {
bool HasLastprivates = false;
bool UseOMPIRBuilder =
@@ -3731,18 +3829,31 @@ void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) {
UseOMPIRBuilder](CodeGenFunction &CGF, PrePostActionTy &) {
// Use the OpenMPIRBuilder if enabled.
if (UseOMPIRBuilder) {
+ bool NeedsBarrier = !S.getSingleClause<OMPNowaitClause>();
+
+ llvm::omp::ScheduleKind SchedKind = llvm::omp::OMP_SCHEDULE_Default;
+ llvm::Value *ChunkSize = nullptr;
+ if (auto *SchedClause = S.getSingleClause<OMPScheduleClause>()) {
+ SchedKind =
+ convertClauseKindToSchedKind(SchedClause->getScheduleKind());
+ if (const Expr *ChunkSizeExpr = SchedClause->getChunkSize())
+ ChunkSize = EmitScalarExpr(ChunkSizeExpr);
+ }
+
// Emit the associated statement and get its loop representation.
const Stmt *Inner = S.getRawStmt();
llvm::CanonicalLoopInfo *CLI =
EmitOMPCollapsedCanonicalLoopNest(Inner, 1);
- bool NeedsBarrier = !S.getSingleClause<OMPNowaitClause>();
llvm::OpenMPIRBuilder &OMPBuilder =
CGM.getOpenMPRuntime().getOMPBuilder();
llvm::OpenMPIRBuilder::InsertPointTy AllocaIP(
AllocaInsertPt->getParent(), AllocaInsertPt->getIterator());
- OMPBuilder.applyWorkshareLoop(Builder.getCurrentDebugLocation(), CLI,
- AllocaIP, NeedsBarrier);
+ OMPBuilder.applyWorkshareLoop(
+ Builder.getCurrentDebugLocation(), CLI, AllocaIP, NeedsBarrier,
+ SchedKind, ChunkSize, /*HasSimdModifier=*/false,
+ /*HasMonotonicModifier=*/false, /*HasNonmonotonicModifier=*/false,
+ /*HasOrderedClause=*/false);
return;
}
@@ -3957,22 +4068,17 @@ void CodeGenFunction::EmitOMPSectionsDirective(const OMPSectionsDirective &S) {
if (CS) {
for (const Stmt *SubStmt : CS->children()) {
auto SectionCB = [this, SubStmt](InsertPointTy AllocaIP,
- InsertPointTy CodeGenIP,
- llvm::BasicBlock &FiniBB) {
- OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP,
- FiniBB);
- OMPBuilderCBHelpers::EmitOMPRegionBody(*this, SubStmt, CodeGenIP,
- FiniBB);
+ InsertPointTy CodeGenIP) {
+ OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
+ *this, SubStmt, AllocaIP, CodeGenIP, "section");
};
SectionCBVector.push_back(SectionCB);
}
} else {
auto SectionCB = [this, CapturedStmt](InsertPointTy AllocaIP,
- InsertPointTy CodeGenIP,
- llvm::BasicBlock &FiniBB) {
- OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB);
- OMPBuilderCBHelpers::EmitOMPRegionBody(*this, CapturedStmt, CodeGenIP,
- FiniBB);
+ InsertPointTy CodeGenIP) {
+ OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
+ *this, CapturedStmt, AllocaIP, CodeGenIP, "section");
};
SectionCBVector.push_back(SectionCB);
}
@@ -4025,11 +4131,9 @@ void CodeGenFunction::EmitOMPSectionDirective(const OMPSectionDirective &S) {
};
auto BodyGenCB = [SectionRegionBodyStmt, this](InsertPointTy AllocaIP,
- InsertPointTy CodeGenIP,
- llvm::BasicBlock &FiniBB) {
- OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB);
- OMPBuilderCBHelpers::EmitOMPRegionBody(*this, SectionRegionBodyStmt,
- CodeGenIP, FiniBB);
+ InsertPointTy CodeGenIP) {
+ OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
+ *this, SectionRegionBodyStmt, AllocaIP, CodeGenIP, "section");
};
LexicalScope Scope(*this, S.getSourceRange());
@@ -4108,11 +4212,9 @@ void CodeGenFunction::EmitOMPMasterDirective(const OMPMasterDirective &S) {
};
auto BodyGenCB = [MasterRegionBodyStmt, this](InsertPointTy AllocaIP,
- InsertPointTy CodeGenIP,
- llvm::BasicBlock &FiniBB) {
- OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB);
- OMPBuilderCBHelpers::EmitOMPRegionBody(*this, MasterRegionBodyStmt,
- CodeGenIP, FiniBB);
+ InsertPointTy CodeGenIP) {
+ OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
+ *this, MasterRegionBodyStmt, AllocaIP, CodeGenIP, "master");
};
LexicalScope Scope(*this, S.getSourceRange());
@@ -4156,11 +4258,9 @@ void CodeGenFunction::EmitOMPMaskedDirective(const OMPMaskedDirective &S) {
};
auto BodyGenCB = [MaskedRegionBodyStmt, this](InsertPointTy AllocaIP,
- InsertPointTy CodeGenIP,
- llvm::BasicBlock &FiniBB) {
- OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB);
- OMPBuilderCBHelpers::EmitOMPRegionBody(*this, MaskedRegionBodyStmt,
- CodeGenIP, FiniBB);
+ InsertPointTy CodeGenIP) {
+ OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
+ *this, MaskedRegionBodyStmt, AllocaIP, CodeGenIP, "masked");
};
LexicalScope Scope(*this, S.getSourceRange());
@@ -4198,11 +4298,9 @@ void CodeGenFunction::EmitOMPCriticalDirective(const OMPCriticalDirective &S) {
};
auto BodyGenCB = [CriticalRegionBodyStmt, this](InsertPointTy AllocaIP,
- InsertPointTy CodeGenIP,
- llvm::BasicBlock &FiniBB) {
- OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP, FiniBB);
- OMPBuilderCBHelpers::EmitOMPRegionBody(*this, CriticalRegionBodyStmt,
- CodeGenIP, FiniBB);
+ InsertPointTy CodeGenIP) {
+ OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
+ *this, CriticalRegionBodyStmt, AllocaIP, CodeGenIP, "critical");
};
LexicalScope Scope(*this, S.getSourceRange());
@@ -4237,23 +4335,25 @@ void CodeGenFunction::EmitOMPParallelForDirective(
(void)emitWorksharingDirective(CGF, S, S.hasCancel());
};
{
- if (llvm::any_of(S.getClausesOfKind<OMPReductionClause>(),
+ const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) {
+ CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF);
+ CGCapturedStmtInfo CGSI(CR_OpenMP);
+ CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGSI);
+ OMPLoopScope LoopScope(CGF, S);
+ return CGF.EmitScalarExpr(S.getNumIterations());
+ };
+ bool IsInscan = llvm::any_of(S.getClausesOfKind<OMPReductionClause>(),
[](const OMPReductionClause *C) {
return C->getModifier() == OMPC_REDUCTION_inscan;
- })) {
- const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) {
- CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF);
- CGCapturedStmtInfo CGSI(CR_OpenMP);
- CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGSI);
- OMPLoopScope LoopScope(CGF, S);
- return CGF.EmitScalarExpr(S.getNumIterations());
- };
+ });
+ if (IsInscan)
emitScanBasedDirectiveDecls(*this, S, NumIteratorsGen);
- }
auto LPCRegion =
CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
emitCommonOMPParallelDirective(*this, S, OMPD_for, CodeGen,
emitEmptyBoundParameters);
+ if (IsInscan)
+ emitScanBasedDirectiveFinals(*this, S, NumIteratorsGen);
}
// Check for outer lastprivate conditional update.
checkForLastprivateConditionalUpdate(*this, S);
@@ -4268,23 +4368,25 @@ void CodeGenFunction::EmitOMPParallelForSimdDirective(
(void)emitWorksharingDirective(CGF, S, /*HasCancel=*/false);
};
{
- if (llvm::any_of(S.getClausesOfKind<OMPReductionClause>(),
+ const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) {
+ CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF);
+ CGCapturedStmtInfo CGSI(CR_OpenMP);
+ CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGSI);
+ OMPLoopScope LoopScope(CGF, S);
+ return CGF.EmitScalarExpr(S.getNumIterations());
+ };
+ bool IsInscan = llvm::any_of(S.getClausesOfKind<OMPReductionClause>(),
[](const OMPReductionClause *C) {
return C->getModifier() == OMPC_REDUCTION_inscan;
- })) {
- const auto &&NumIteratorsGen = [&S](CodeGenFunction &CGF) {
- CodeGenFunction::OMPLocalDeclMapRAII Scope(CGF);
- CGCapturedStmtInfo CGSI(CR_OpenMP);
- CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CGSI);
- OMPLoopScope LoopScope(CGF, S);
- return CGF.EmitScalarExpr(S.getNumIterations());
- };
+ });
+ if (IsInscan)
emitScanBasedDirectiveDecls(*this, S, NumIteratorsGen);
- }
auto LPCRegion =
CGOpenMPRuntime::LastprivateConditionalRAII::disable(*this, S);
emitCommonOMPParallelDirective(*this, S, OMPD_for_simd, CodeGen,
emitEmptyBoundParameters);
+ if (IsInscan)
+ emitScanBasedDirectiveFinals(*this, S, NumIteratorsGen);
}
// Check for outer lastprivate conditional update.
checkForLastprivateConditionalUpdate(*this, S);
@@ -4379,6 +4481,40 @@ public:
};
} // anonymous namespace
+static void buildDependences(const OMPExecutableDirective &S,
+ OMPTaskDataTy &Data) {
+
+ // First look for 'omp_all_memory' and add this first.
+ bool OmpAllMemory = false;
+ if (llvm::any_of(
+ S.getClausesOfKind<OMPDependClause>(), [](const OMPDependClause *C) {
+ return C->getDependencyKind() == OMPC_DEPEND_outallmemory ||
+ C->getDependencyKind() == OMPC_DEPEND_inoutallmemory;
+ })) {
+ OmpAllMemory = true;
+ // Since both OMPC_DEPEND_outallmemory and OMPC_DEPEND_inoutallmemory are
+ // equivalent to the runtime, always use OMPC_DEPEND_outallmemory to
+ // simplify.
+ OMPTaskDataTy::DependData &DD =
+ Data.Dependences.emplace_back(OMPC_DEPEND_outallmemory,
+ /*IteratorExpr=*/nullptr);
+ // Add a nullptr Expr to simplify the codegen in emitDependData.
+ DD.DepExprs.push_back(nullptr);
+ }
+ // Add remaining dependences skipping any 'out' or 'inout' if they are
+ // overridden by 'omp_all_memory'.
+ for (const auto *C : S.getClausesOfKind<OMPDependClause>()) {
+ OpenMPDependClauseKind Kind = C->getDependencyKind();
+ if (Kind == OMPC_DEPEND_outallmemory || Kind == OMPC_DEPEND_inoutallmemory)
+ continue;
+ if (OmpAllMemory && (Kind == OMPC_DEPEND_out || Kind == OMPC_DEPEND_inout))
+ continue;
+ OMPTaskDataTy::DependData &DD =
+ Data.Dependences.emplace_back(C->getDependencyKind(), C->getModifier());
+ DD.DepExprs.append(C->varlist_begin(), C->varlist_end());
+ }
+}
+
void CodeGenFunction::EmitOMPTaskBasedDirective(
const OMPExecutableDirective &S, const OpenMPDirectiveKind CapturedRegion,
const RegionCodeGenTy &BodyGen, const TaskGenTy &TaskGen,
@@ -4474,11 +4610,7 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
Data.Reductions = CGM.getOpenMPRuntime().emitTaskReductionInit(
*this, S.getBeginLoc(), LHSs, RHSs, Data);
// Build list of dependences.
- for (const auto *C : S.getClausesOfKind<OMPDependClause>()) {
- OMPTaskDataTy::DependData &DD =
- Data.Dependences.emplace_back(C->getDependencyKind(), C->getModifier());
- DD.DepExprs.append(C->varlist_begin(), C->varlist_end());
- }
+ buildDependences(S, Data);
// Get list of local vars for untied tasks.
if (!Data.Tied) {
CheckVarsEscapingUntiedTaskDeclContext Checker;
@@ -4613,14 +4745,14 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
CGF.CapturedStmtInfo->lookup(OrigVD) != nullptr,
Pair.second->getType(), VK_LValue,
Pair.second->getExprLoc());
- Scope.addPrivate(Pair.first, [&CGF, &DRE]() {
- return CGF.EmitLValue(&DRE).getAddress(CGF);
- });
+ Scope.addPrivate(Pair.first, CGF.EmitLValue(&DRE).getAddress(CGF));
}
for (const auto &Pair : PrivatePtrs) {
- Address Replacement(CGF.Builder.CreateLoad(Pair.second),
- CGF.getContext().getDeclAlign(Pair.first));
- Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; });
+ Address Replacement = Address(
+ CGF.Builder.CreateLoad(Pair.second),
+ CGF.ConvertTypeForMem(Pair.first->getType().getNonReferenceType()),
+ CGF.getContext().getDeclAlign(Pair.first));
+ Scope.addPrivate(Pair.first, Replacement);
if (auto *DI = CGF.getDebugInfo())
if (CGF.CGM.getCodeGenOpts().hasReducedDebugInfo())
(void)DI->EmitDeclareOfAutoVariable(
@@ -4630,16 +4762,22 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
// Adjust mapping for internal locals by mapping actual memory instead of
// a pointer to this memory.
for (auto &Pair : UntiedLocalVars) {
+ QualType VDType = Pair.first->getType().getNonReferenceType();
if (isAllocatableDecl(Pair.first)) {
llvm::Value *Ptr = CGF.Builder.CreateLoad(Pair.second.first);
- Address Replacement(Ptr, CGF.getPointerAlign());
+ Address Replacement(
+ Ptr,
+ CGF.ConvertTypeForMem(CGF.getContext().getPointerType(VDType)),
+ CGF.getPointerAlign());
Pair.second.first = Replacement;
Ptr = CGF.Builder.CreateLoad(Replacement);
- Replacement = Address(Ptr, CGF.getContext().getDeclAlign(Pair.first));
+ Replacement = Address(Ptr, CGF.ConvertTypeForMem(VDType),
+ CGF.getContext().getDeclAlign(Pair.first));
Pair.second.second = Replacement;
} else {
llvm::Value *Ptr = CGF.Builder.CreateLoad(Pair.second.first);
- Address Replacement(Ptr, CGF.getContext().getDeclAlign(Pair.first));
+ Address Replacement(Ptr, CGF.ConvertTypeForMem(VDType),
+ CGF.getContext().getDeclAlign(Pair.first));
Pair.second.first = Replacement;
}
}
@@ -4647,10 +4785,11 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
if (Data.Reductions) {
OMPPrivateScope FirstprivateScope(CGF);
for (const auto &Pair : FirstprivatePtrs) {
- Address Replacement(CGF.Builder.CreateLoad(Pair.second),
- CGF.getContext().getDeclAlign(Pair.first));
- FirstprivateScope.addPrivate(Pair.first,
- [Replacement]() { return Replacement; });
+ Address Replacement(
+ CGF.Builder.CreateLoad(Pair.second),
+ CGF.ConvertTypeForMem(Pair.first->getType().getNonReferenceType()),
+ CGF.getContext().getDeclAlign(Pair.first));
+ FirstprivateScope.addPrivate(Pair.first, Replacement);
}
(void)FirstprivateScope.Privatize();
OMPLexicalScope LexScope(CGF, S, CapturedRegion);
@@ -4674,10 +4813,10 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
CGF.getContext().getPointerType(
Data.ReductionCopies[Cnt]->getType()),
Data.ReductionCopies[Cnt]->getExprLoc()),
+ CGF.ConvertTypeForMem(Data.ReductionCopies[Cnt]->getType()),
Replacement.getAlignment());
Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement);
- Scope.addPrivate(RedCG.getBaseDecl(Cnt),
- [Replacement]() { return Replacement; });
+ Scope.addPrivate(RedCG.getBaseDecl(Cnt), Replacement);
}
}
// Privatize all private variables except for in_reduction items.
@@ -4729,10 +4868,10 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
Replacement.getPointer(), CGF.getContext().VoidPtrTy,
CGF.getContext().getPointerType(InRedPrivs[Cnt]->getType()),
InRedPrivs[Cnt]->getExprLoc()),
+ CGF.ConvertTypeForMem(InRedPrivs[Cnt]->getType()),
Replacement.getAlignment());
Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement);
- InRedScope.addPrivate(RedCG.getBaseDecl(Cnt),
- [Replacement]() { return Replacement; });
+ InRedScope.addPrivate(RedCG.getBaseDecl(Cnt), Replacement);
}
}
(void)InRedScope.Privatize();
@@ -4806,6 +4945,17 @@ void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
++IElemInitRef;
}
}
+ SmallVector<const Expr *, 4> LHSs;
+ SmallVector<const Expr *, 4> RHSs;
+ for (const auto *C : S.getClausesOfKind<OMPInReductionClause>()) {
+ Data.ReductionVars.append(C->varlist_begin(), C->varlist_end());
+ Data.ReductionOrigs.append(C->varlist_begin(), C->varlist_end());
+ Data.ReductionCopies.append(C->privates().begin(), C->privates().end());
+ Data.ReductionOps.append(C->reduction_ops().begin(),
+ C->reduction_ops().end());
+ LHSs.append(C->lhs_exprs().begin(), C->lhs_exprs().end());
+ RHSs.append(C->rhs_exprs().begin(), C->rhs_exprs().end());
+ }
OMPPrivateScope TargetScope(*this);
VarDecl *BPVD = nullptr;
VarDecl *PVD = nullptr;
@@ -4828,29 +4978,20 @@ void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
/*IndexTypeQuals=*/0);
SVD = createImplicitFirstprivateForType(getContext(), Data, SizesType, CD,
S.getBeginLoc());
- TargetScope.addPrivate(
- BPVD, [&InputInfo]() { return InputInfo.BasePointersArray; });
- TargetScope.addPrivate(PVD,
- [&InputInfo]() { return InputInfo.PointersArray; });
- TargetScope.addPrivate(SVD,
- [&InputInfo]() { return InputInfo.SizesArray; });
+ TargetScope.addPrivate(BPVD, InputInfo.BasePointersArray);
+ TargetScope.addPrivate(PVD, InputInfo.PointersArray);
+ TargetScope.addPrivate(SVD, InputInfo.SizesArray);
// If there is no user-defined mapper, the mapper array will be nullptr. In
// this case, we don't need to privatize it.
if (!isa_and_nonnull<llvm::ConstantPointerNull>(
InputInfo.MappersArray.getPointer())) {
MVD = createImplicitFirstprivateForType(
getContext(), Data, BaseAndPointerAndMapperType, CD, S.getBeginLoc());
- TargetScope.addPrivate(MVD,
- [&InputInfo]() { return InputInfo.MappersArray; });
+ TargetScope.addPrivate(MVD, InputInfo.MappersArray);
}
}
(void)TargetScope.Privatize();
- // Build list of dependences.
- for (const auto *C : S.getClausesOfKind<OMPDependClause>()) {
- OMPTaskDataTy::DependData &DD =
- Data.Dependences.emplace_back(C->getDependencyKind(), C->getModifier());
- DD.DepExprs.append(C->varlist_begin(), C->varlist_end());
- }
+ buildDependences(S, Data);
auto &&CodeGen = [&Data, &S, CS, &BodyGen, BPVD, PVD, SVD, MVD,
&InputInfo](CodeGenFunction &CGF, PrePostActionTy &Action) {
// Set proper addresses for generated private copies.
@@ -4883,13 +5024,14 @@ void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
CGF.CGM.getOpenMPRuntime().emitOutlinedFunctionCall(
CGF, S.getBeginLoc(), {CopyFnTy, CopyFn}, CallArgs);
for (const auto &Pair : PrivatePtrs) {
- Address Replacement(CGF.Builder.CreateLoad(Pair.second),
- CGF.getContext().getDeclAlign(Pair.first));
- Scope.addPrivate(Pair.first, [Replacement]() { return Replacement; });
+ Address Replacement(
+ CGF.Builder.CreateLoad(Pair.second),
+ CGF.ConvertTypeForMem(Pair.first->getType().getNonReferenceType()),
+ CGF.getContext().getDeclAlign(Pair.first));
+ Scope.addPrivate(Pair.first, Replacement);
}
}
- // Privatize all private variables except for in_reduction items.
- (void)Scope.Privatize();
+ CGF.processInReduction(S, Data, CGF, CS, Scope);
if (InputInfo.NumberOfTargetItems > 0) {
InputInfo.BasePointersArray = CGF.Builder.CreateConstArrayGEP(
CGF.GetAddrOfLocalVar(BPVD), /*Index=*/0);
@@ -4914,11 +5056,97 @@ void CodeGenFunction::EmitOMPTargetTaskBasedDirective(
IntegerLiteral IfCond(getContext(), TrueOrFalse,
getContext().getIntTypeForBitwidth(32, /*Signed=*/0),
SourceLocation());
-
CGM.getOpenMPRuntime().emitTaskCall(*this, S.getBeginLoc(), S, OutlinedFn,
SharedsTy, CapturedStruct, &IfCond, Data);
}
+void CodeGenFunction::processInReduction(const OMPExecutableDirective &S,
+ OMPTaskDataTy &Data,
+ CodeGenFunction &CGF,
+ const CapturedStmt *CS,
+ OMPPrivateScope &Scope) {
+ if (Data.Reductions) {
+ OpenMPDirectiveKind CapturedRegion = S.getDirectiveKind();
+ OMPLexicalScope LexScope(CGF, S, CapturedRegion);
+ ReductionCodeGen RedCG(Data.ReductionVars, Data.ReductionVars,
+ Data.ReductionCopies, Data.ReductionOps);
+ llvm::Value *ReductionsPtr = CGF.Builder.CreateLoad(
+ CGF.GetAddrOfLocalVar(CS->getCapturedDecl()->getParam(4)));
+ for (unsigned Cnt = 0, E = Data.ReductionVars.size(); Cnt < E; ++Cnt) {
+ RedCG.emitSharedOrigLValue(CGF, Cnt);
+ RedCG.emitAggregateType(CGF, Cnt);
+ // FIXME: This must removed once the runtime library is fixed.
+ // Emit required threadprivate variables for
+ // initializer/combiner/finalizer.
+ CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(),
+ RedCG, Cnt);
+ Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem(
+ CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
+ Replacement =
+ Address(CGF.EmitScalarConversion(
+ Replacement.getPointer(), CGF.getContext().VoidPtrTy,
+ CGF.getContext().getPointerType(
+ Data.ReductionCopies[Cnt]->getType()),
+ Data.ReductionCopies[Cnt]->getExprLoc()),
+ CGF.ConvertTypeForMem(Data.ReductionCopies[Cnt]->getType()),
+ Replacement.getAlignment());
+ Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement);
+ Scope.addPrivate(RedCG.getBaseDecl(Cnt), Replacement);
+ }
+ }
+ (void)Scope.Privatize();
+ SmallVector<const Expr *, 4> InRedVars;
+ SmallVector<const Expr *, 4> InRedPrivs;
+ SmallVector<const Expr *, 4> InRedOps;
+ SmallVector<const Expr *, 4> TaskgroupDescriptors;
+ for (const auto *C : S.getClausesOfKind<OMPInReductionClause>()) {
+ auto IPriv = C->privates().begin();
+ auto IRed = C->reduction_ops().begin();
+ auto ITD = C->taskgroup_descriptors().begin();
+ for (const Expr *Ref : C->varlists()) {
+ InRedVars.emplace_back(Ref);
+ InRedPrivs.emplace_back(*IPriv);
+ InRedOps.emplace_back(*IRed);
+ TaskgroupDescriptors.emplace_back(*ITD);
+ std::advance(IPriv, 1);
+ std::advance(IRed, 1);
+ std::advance(ITD, 1);
+ }
+ }
+ OMPPrivateScope InRedScope(CGF);
+ if (!InRedVars.empty()) {
+ ReductionCodeGen RedCG(InRedVars, InRedVars, InRedPrivs, InRedOps);
+ for (unsigned Cnt = 0, E = InRedVars.size(); Cnt < E; ++Cnt) {
+ RedCG.emitSharedOrigLValue(CGF, Cnt);
+ RedCG.emitAggregateType(CGF, Cnt);
+ // FIXME: This must removed once the runtime library is fixed.
+ // Emit required threadprivate variables for
+ // initializer/combiner/finalizer.
+ CGF.CGM.getOpenMPRuntime().emitTaskReductionFixups(CGF, S.getBeginLoc(),
+ RedCG, Cnt);
+ llvm::Value *ReductionsPtr;
+ if (const Expr *TRExpr = TaskgroupDescriptors[Cnt]) {
+ ReductionsPtr =
+ CGF.EmitLoadOfScalar(CGF.EmitLValue(TRExpr), TRExpr->getExprLoc());
+ } else {
+ ReductionsPtr = llvm::ConstantPointerNull::get(CGF.VoidPtrTy);
+ }
+ Address Replacement = CGF.CGM.getOpenMPRuntime().getTaskReductionItem(
+ CGF, S.getBeginLoc(), ReductionsPtr, RedCG.getSharedLValue(Cnt));
+ Replacement = Address(
+ CGF.EmitScalarConversion(
+ Replacement.getPointer(), CGF.getContext().VoidPtrTy,
+ CGF.getContext().getPointerType(InRedPrivs[Cnt]->getType()),
+ InRedPrivs[Cnt]->getExprLoc()),
+ CGF.ConvertTypeForMem(InRedPrivs[Cnt]->getType()),
+ Replacement.getAlignment());
+ Replacement = RedCG.adjustPrivateAddress(CGF, Cnt, Replacement);
+ InRedScope.addPrivate(RedCG.getBaseDecl(Cnt), Replacement);
+ }
+ }
+ (void)InRedScope.Privatize();
+}
+
void CodeGenFunction::EmitOMPTaskDirective(const OMPTaskDirective &S) {
// Emit outlined function for task construct.
const CapturedStmt *CS = S.getCapturedStmt(OMPD_task);
@@ -4963,11 +5191,7 @@ void CodeGenFunction::EmitOMPBarrierDirective(const OMPBarrierDirective &S) {
void CodeGenFunction::EmitOMPTaskwaitDirective(const OMPTaskwaitDirective &S) {
OMPTaskDataTy Data;
// Build list of dependences
- for (const auto *C : S.getClausesOfKind<OMPDependClause>()) {
- OMPTaskDataTy::DependData &DD =
- Data.Dependences.emplace_back(C->getDependencyKind(), C->getModifier());
- DD.DepExprs.append(C->varlist_begin(), C->varlist_end());
- }
+ buildDependences(S, Data);
CGM.getOpenMPRuntime().emitTaskwaitCall(*this, S.getBeginLoc(), Data);
}
@@ -5533,10 +5757,13 @@ void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) {
};
auto BodyGenCB = [&S, C, this](InsertPointTy AllocaIP,
- InsertPointTy CodeGenIP,
- llvm::BasicBlock &FiniBB) {
+ InsertPointTy CodeGenIP) {
+ Builder.restoreIP(CodeGenIP);
+
const CapturedStmt *CS = S.getInnermostCapturedStmt();
if (C) {
+ llvm::BasicBlock *FiniBB = splitBBWithSuffix(
+ Builder, /*CreateBranch=*/false, ".ordered.after");
llvm::SmallVector<llvm::Value *, 16> CapturedVars;
GenerateOpenMPCapturedVars(*CS, CapturedVars);
llvm::Function *OutlinedFn =
@@ -5544,13 +5771,11 @@ void CodeGenFunction::EmitOMPOrderedDirective(const OMPOrderedDirective &S) {
assert(S.getBeginLoc().isValid() &&
"Outlined function call location must be valid.");
ApplyDebugLocation::CreateDefaultArtificial(*this, S.getBeginLoc());
- OMPBuilderCBHelpers::EmitCaptureStmt(*this, CodeGenIP, FiniBB,
+ OMPBuilderCBHelpers::EmitCaptureStmt(*this, CodeGenIP, *FiniBB,
OutlinedFn, CapturedVars);
} else {
- OMPBuilderCBHelpers::InlinedRegionBodyRAII IRB(*this, AllocaIP,
- FiniBB);
- OMPBuilderCBHelpers::EmitOMPRegionBody(*this, CS->getCapturedStmt(),
- CodeGenIP, FiniBB);
+ OMPBuilderCBHelpers::EmitOMPInlinedRegionBody(
+ *this, CS->getCapturedStmt(), AllocaIP, CodeGenIP, "ordered");
}
};
@@ -5730,25 +5955,38 @@ static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X,
// Allow atomicrmw only if 'x' and 'update' are integer values, lvalue for 'x'
// expression is simple and atomic is allowed for the given type for the
// target platform.
- if (BO == BO_Comma || !Update.isScalar() ||
- !Update.getScalarVal()->getType()->isIntegerTy() || !X.isSimple() ||
+ if (BO == BO_Comma || !Update.isScalar() || !X.isSimple() ||
(!isa<llvm::ConstantInt>(Update.getScalarVal()) &&
(Update.getScalarVal()->getType() !=
X.getAddress(CGF).getElementType())) ||
- !X.getAddress(CGF).getElementType()->isIntegerTy() ||
!Context.getTargetInfo().hasBuiltinAtomic(
Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment())))
return std::make_pair(false, RValue::get(nullptr));
+ auto &&CheckAtomicSupport = [&CGF](llvm::Type *T, BinaryOperatorKind BO) {
+ if (T->isIntegerTy())
+ return true;
+
+ if (T->isFloatingPointTy() && (BO == BO_Add || BO == BO_Sub))
+ return llvm::isPowerOf2_64(CGF.CGM.getDataLayout().getTypeStoreSize(T));
+
+ return false;
+ };
+
+ if (!CheckAtomicSupport(Update.getScalarVal()->getType(), BO) ||
+ !CheckAtomicSupport(X.getAddress(CGF).getElementType(), BO))
+ return std::make_pair(false, RValue::get(nullptr));
+
+ bool IsInteger = X.getAddress(CGF).getElementType()->isIntegerTy();
llvm::AtomicRMWInst::BinOp RMWOp;
switch (BO) {
case BO_Add:
- RMWOp = llvm::AtomicRMWInst::Add;
+ RMWOp = IsInteger ? llvm::AtomicRMWInst::Add : llvm::AtomicRMWInst::FAdd;
break;
case BO_Sub:
if (!IsXLHSInRHSPart)
return std::make_pair(false, RValue::get(nullptr));
- RMWOp = llvm::AtomicRMWInst::Sub;
+ RMWOp = IsInteger ? llvm::AtomicRMWInst::Sub : llvm::AtomicRMWInst::FSub;
break;
case BO_And:
RMWOp = llvm::AtomicRMWInst::And;
@@ -5806,9 +6044,13 @@ static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X,
}
llvm::Value *UpdateVal = Update.getScalarVal();
if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) {
- UpdateVal = CGF.Builder.CreateIntCast(
- IC, X.getAddress(CGF).getElementType(),
- X.getType()->hasSignedIntegerRepresentation());
+ if (IsInteger)
+ UpdateVal = CGF.Builder.CreateIntCast(
+ IC, X.getAddress(CGF).getElementType(),
+ X.getType()->hasSignedIntegerRepresentation());
+ else
+ UpdateVal = CGF.Builder.CreateCast(llvm::Instruction::CastOps::UIToFP, IC,
+ X.getAddress(CGF).getElementType());
}
llvm::Value *Res =
CGF.Builder.CreateAtomicRMW(RMWOp, X.getPointer(CGF), UpdateVal, AO);
@@ -6011,11 +6253,88 @@ static void emitOMPAtomicCaptureExpr(CodeGenFunction &CGF,
}
}
+static void emitOMPAtomicCompareExpr(CodeGenFunction &CGF,
+ llvm::AtomicOrdering AO, const Expr *X,
+ const Expr *V, const Expr *R,
+ const Expr *E, const Expr *D,
+ const Expr *CE, bool IsXBinopExpr,
+ bool IsPostfixUpdate, bool IsFailOnly,
+ SourceLocation Loc) {
+ llvm::OpenMPIRBuilder &OMPBuilder =
+ CGF.CGM.getOpenMPRuntime().getOMPBuilder();
+
+ OMPAtomicCompareOp Op;
+ assert(isa<BinaryOperator>(CE) && "CE is not a BinaryOperator");
+ switch (cast<BinaryOperator>(CE)->getOpcode()) {
+ case BO_EQ:
+ Op = OMPAtomicCompareOp::EQ;
+ break;
+ case BO_LT:
+ Op = OMPAtomicCompareOp::MIN;
+ break;
+ case BO_GT:
+ Op = OMPAtomicCompareOp::MAX;
+ break;
+ default:
+ llvm_unreachable("unsupported atomic compare binary operator");
+ }
+
+ LValue XLVal = CGF.EmitLValue(X);
+ Address XAddr = XLVal.getAddress(CGF);
+
+ auto EmitRValueWithCastIfNeeded = [&CGF, Loc](const Expr *X, const Expr *E) {
+ if (X->getType() == E->getType())
+ return CGF.EmitScalarExpr(E);
+ const Expr *NewE = E->IgnoreImplicitAsWritten();
+ llvm::Value *V = CGF.EmitScalarExpr(NewE);
+ if (NewE->getType() == X->getType())
+ return V;
+ return CGF.EmitScalarConversion(V, NewE->getType(), X->getType(), Loc);
+ };
+
+ llvm::Value *EVal = EmitRValueWithCastIfNeeded(X, E);
+ llvm::Value *DVal = D ? EmitRValueWithCastIfNeeded(X, D) : nullptr;
+ if (auto *CI = dyn_cast<llvm::ConstantInt>(EVal))
+ EVal = CGF.Builder.CreateIntCast(
+ CI, XLVal.getAddress(CGF).getElementType(),
+ E->getType()->hasSignedIntegerRepresentation());
+ if (DVal)
+ if (auto *CI = dyn_cast<llvm::ConstantInt>(DVal))
+ DVal = CGF.Builder.CreateIntCast(
+ CI, XLVal.getAddress(CGF).getElementType(),
+ D->getType()->hasSignedIntegerRepresentation());
+
+ llvm::OpenMPIRBuilder::AtomicOpValue XOpVal{
+ XAddr.getPointer(), XAddr.getElementType(),
+ X->getType()->hasSignedIntegerRepresentation(),
+ X->getType().isVolatileQualified()};
+ llvm::OpenMPIRBuilder::AtomicOpValue VOpVal, ROpVal;
+ if (V) {
+ LValue LV = CGF.EmitLValue(V);
+ Address Addr = LV.getAddress(CGF);
+ VOpVal = {Addr.getPointer(), Addr.getElementType(),
+ V->getType()->hasSignedIntegerRepresentation(),
+ V->getType().isVolatileQualified()};
+ }
+ if (R) {
+ LValue LV = CGF.EmitLValue(R);
+ Address Addr = LV.getAddress(CGF);
+ ROpVal = {Addr.getPointer(), Addr.getElementType(),
+ R->getType()->hasSignedIntegerRepresentation(),
+ R->getType().isVolatileQualified()};
+ }
+
+ CGF.Builder.restoreIP(OMPBuilder.createAtomicCompare(
+ CGF.Builder, XOpVal, VOpVal, ROpVal, EVal, DVal, AO, Op, IsXBinopExpr,
+ IsPostfixUpdate, IsFailOnly));
+}
+
static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
llvm::AtomicOrdering AO, bool IsPostfixUpdate,
- const Expr *X, const Expr *V, const Expr *E,
- const Expr *UE, bool IsXLHSInRHSPart,
- SourceLocation Loc) {
+ const Expr *X, const Expr *V, const Expr *R,
+ const Expr *E, const Expr *UE, const Expr *D,
+ const Expr *CE, bool IsXLHSInRHSPart,
+ bool IsFailOnly, SourceLocation Loc) {
switch (Kind) {
case OMPC_read:
emitOMPAtomicReadExpr(CGF, AO, X, V, Loc);
@@ -6031,9 +6350,11 @@ static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
emitOMPAtomicCaptureExpr(CGF, AO, IsPostfixUpdate, V, X, E, UE,
IsXLHSInRHSPart, Loc);
break;
- case OMPC_compare:
- // Do nothing here as we already emit an error.
+ case OMPC_compare: {
+ emitOMPAtomicCompareExpr(CGF, AO, X, V, R, E, D, CE, IsXLHSInRHSPart,
+ IsPostfixUpdate, IsFailOnly, Loc);
break;
+ }
case OMPC_if:
case OMPC_final:
case OMPC_num_threads:
@@ -6091,6 +6412,7 @@ static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
case OMPC_use_device_ptr:
case OMPC_use_device_addr:
case OMPC_is_device_ptr:
+ case OMPC_has_device_addr:
case OMPC_unified_address:
case OMPC_unified_shared_memory:
case OMPC_reverse_offload:
@@ -6121,6 +6443,7 @@ static void emitOMPAtomicExpr(CodeGenFunction &CGF, OpenMPClauseKind Kind,
case OMPC_memory_order:
case OMPC_bind:
case OMPC_align:
+ case OMPC_cancellation_construct_type:
llvm_unreachable("Clause is not allowed in 'omp atomic'.");
}
}
@@ -6144,19 +6467,24 @@ void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) {
AO = llvm::AtomicOrdering::Monotonic;
MemOrderingSpecified = true;
}
+ llvm::SmallSet<OpenMPClauseKind, 2> KindsEncountered;
OpenMPClauseKind Kind = OMPC_unknown;
for (const OMPClause *C : S.clauses()) {
// Find first clause (skip seq_cst|acq_rel|aqcuire|release|relaxed clause,
// if it is first).
- if (C->getClauseKind() != OMPC_seq_cst &&
- C->getClauseKind() != OMPC_acq_rel &&
- C->getClauseKind() != OMPC_acquire &&
- C->getClauseKind() != OMPC_release &&
- C->getClauseKind() != OMPC_relaxed && C->getClauseKind() != OMPC_hint) {
- Kind = C->getClauseKind();
- break;
- }
- }
+ OpenMPClauseKind K = C->getClauseKind();
+ if (K == OMPC_seq_cst || K == OMPC_acq_rel || K == OMPC_acquire ||
+ K == OMPC_release || K == OMPC_relaxed || K == OMPC_hint)
+ continue;
+ Kind = K;
+ KindsEncountered.insert(K);
+ }
+ // We just need to correct Kind here. No need to set a bool saying it is
+ // actually compare capture because we can tell from whether V and R are
+ // nullptr.
+ if (KindsEncountered.contains(OMPC_compare) &&
+ KindsEncountered.contains(OMPC_capture))
+ Kind = OMPC_compare;
if (!MemOrderingSpecified) {
llvm::AtomicOrdering DefaultOrder =
CGM.getOpenMPRuntime().getDefaultMemoryOrdering();
@@ -6178,7 +6506,8 @@ void CodeGenFunction::EmitOMPAtomicDirective(const OMPAtomicDirective &S) {
LexicalScope Scope(*this, S.getSourceRange());
EmitStopPoint(S.getAssociatedStmt());
emitOMPAtomicExpr(*this, Kind, AO, S.isPostfixUpdate(), S.getX(), S.getV(),
- S.getExpr(), S.getUpdateExpr(), S.isXLHSInRHSPart(),
+ S.getR(), S.getExpr(), S.getUpdateExpr(), S.getD(),
+ S.getCondExpr(), S.isXLHSInRHSPart(), S.isFailOnly(),
S.getBeginLoc());
}
@@ -6230,6 +6559,13 @@ static void emitCommonOMPTargetDirective(CodeGenFunction &CGF,
if (CGM.getLangOpts().OMPTargetTriples.empty())
IsOffloadEntry = false;
+ if (CGM.getLangOpts().OpenMPOffloadMandatory && !IsOffloadEntry) {
+ unsigned DiagID = CGM.getDiags().getCustomDiagID(
+ DiagnosticsEngine::Error,
+ "No offloading entry generated while offloading is mandatory.");
+ CGM.getDiags().Report(DiagID);
+ }
+
assert(CGF.CurFuncDecl && "No parent declaration for target region!");
StringRef ParentName;
// In case we have Ctors/Dtors we use the complete type variant to produce
@@ -6806,30 +7142,26 @@ void CodeGenFunction::EmitOMPUseDevicePtrClause(
if (InitAddrIt == CaptureDeviceAddrMap.end())
continue;
- bool IsRegistered = PrivateScope.addPrivate(
- OrigVD, [this, OrigVD, InitAddrIt, InitVD, PvtVD]() {
- // Initialize the temporary initialization variable with the address
- // we get from the runtime library. We have to cast the source address
- // because it is always a void *. References are materialized in the
- // privatization scope, so the initialization here disregards the fact
- // the original variable is a reference.
- QualType AddrQTy = getContext().getPointerType(
- OrigVD->getType().getNonReferenceType());
- llvm::Type *AddrTy = ConvertTypeForMem(AddrQTy);
- Address InitAddr = Builder.CreateBitCast(InitAddrIt->second, AddrTy);
- setAddrOfLocalVar(InitVD, InitAddr);
-
- // Emit private declaration, it will be initialized by the value we
- // declaration we just added to the local declarations map.
- EmitDecl(*PvtVD);
-
- // The initialization variables reached its purpose in the emission
- // of the previous declaration, so we don't need it anymore.
- LocalDeclMap.erase(InitVD);
-
- // Return the address of the private variable.
- return GetAddrOfLocalVar(PvtVD);
- });
+ // Initialize the temporary initialization variable with the address
+ // we get from the runtime library. We have to cast the source address
+ // because it is always a void *. References are materialized in the
+ // privatization scope, so the initialization here disregards the fact
+ // the original variable is a reference.
+ llvm::Type *Ty = ConvertTypeForMem(OrigVD->getType().getNonReferenceType());
+ Address InitAddr = Builder.CreateElementBitCast(InitAddrIt->second, Ty);
+ setAddrOfLocalVar(InitVD, InitAddr);
+
+ // Emit private declaration, it will be initialized by the value we
+ // declaration we just added to the local declarations map.
+ EmitDecl(*PvtVD);
+
+ // The initialization variables reached its purpose in the emission
+ // of the previous declaration, so we don't need it anymore.
+ LocalDeclMap.erase(InitVD);
+
+ // Return the address of the private variable.
+ bool IsRegistered =
+ PrivateScope.addPrivate(OrigVD, GetAddrOfLocalVar(PvtVD));
assert(IsRegistered && "firstprivate var already registered as private");
// Silence the warning about unused variable.
(void)IsRegistered;
@@ -6879,17 +7211,15 @@ void CodeGenFunction::EmitOMPUseDeviceAddrClause(
// For declrefs and variable length array need to load the pointer for
// correct mapping, since the pointer to the data was passed to the runtime.
if (isa<DeclRefExpr>(Ref->IgnoreParenImpCasts()) ||
- MatchingVD->getType()->isArrayType())
- PrivAddr =
- EmitLoadOfPointer(PrivAddr, getContext()
- .getPointerType(OrigVD->getType())
- ->castAs<PointerType>());
- llvm::Type *RealTy =
- ConvertTypeForMem(OrigVD->getType().getNonReferenceType())
- ->getPointerTo();
- PrivAddr = Builder.CreatePointerBitCastOrAddrSpaceCast(PrivAddr, RealTy);
+ MatchingVD->getType()->isArrayType()) {
+ QualType PtrTy = getContext().getPointerType(
+ OrigVD->getType().getNonReferenceType());
+ PrivAddr = EmitLoadOfPointer(
+ Builder.CreateElementBitCast(PrivAddr, ConvertTypeForMem(PtrTy)),
+ PtrTy->castAs<PointerType>());
+ }
- (void)PrivateScope.addPrivate(OrigVD, [PrivAddr]() { return PrivAddr; });
+ (void)PrivateScope.addPrivate(OrigVD, PrivAddr);
}
}
@@ -7161,8 +7491,7 @@ static void mapParam(CodeGenFunction &CGF, const DeclRefExpr *Helper,
const ImplicitParamDecl *PVD,
CodeGenFunction::OMPPrivateScope &Privates) {
const auto *VDecl = cast<VarDecl>(Helper->getDecl());
- Privates.addPrivate(VDecl,
- [&CGF, PVD]() { return CGF.GetAddrOfLocalVar(PVD); });
+ Privates.addPrivate(VDecl, CGF.GetAddrOfLocalVar(PVD));
}
void CodeGenFunction::EmitOMPTaskLoopBasedDirective(const OMPLoopDirective &S) {
@@ -7454,8 +7783,7 @@ void CodeGenFunction::EmitSimpleOMPExecutableDirective(
continue;
if (!CGF.LocalDeclMap.count(VD)) {
LValue GlobLVal = CGF.EmitLValue(Ref);
- GlobalsScope.addPrivate(
- VD, [&GlobLVal, &CGF]() { return GlobLVal.getAddress(CGF); });
+ GlobalsScope.addPrivate(VD, GlobLVal.getAddress(CGF));
}
}
}
@@ -7470,8 +7798,7 @@ void CodeGenFunction::EmitSimpleOMPExecutableDirective(
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
if (!VD->hasLocalStorage() && !CGF.LocalDeclMap.count(VD)) {
LValue GlobLVal = CGF.EmitLValue(E);
- GlobalsScope.addPrivate(
- VD, [&GlobLVal, &CGF]() { return GlobLVal.getAddress(CGF); });
+ GlobalsScope.addPrivate(VD, GlobLVal.getAddress(CGF));
}
if (isa<OMPCapturedExprDecl>(VD)) {
// Emit only those that were not explicitly referenced in clauses.
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGVTT.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGVTT.cpp
index 564d9f354e64..ebac9196df02 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGVTT.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGVTT.cpp
@@ -96,9 +96,6 @@ CodeGenVTables::EmitVTTDefinition(llvm::GlobalVariable *VTT,
if (CGM.supportsCOMDAT() && VTT->isWeakForLinker())
VTT->setComdat(CGM.getModule().getOrInsertComdat(VTT->getName()));
-
- // Set the right visibility.
- CGM.setGVProperties(VTT, RD);
}
llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTT(const CXXRecordDecl *RD) {
@@ -122,6 +119,7 @@ llvm::GlobalVariable *CodeGenVTables::GetAddrOfVTT(const CXXRecordDecl *RD) {
llvm::GlobalVariable *GV = CGM.CreateOrReplaceCXXRuntimeVariable(
Name, ArrayType, llvm::GlobalValue::ExternalLinkage, Align);
GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
+ CGM.setGVProperties(GV, RD);
return GV;
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CGVTables.cpp b/contrib/llvm-project/clang/lib/CodeGen/CGVTables.cpp
index c839376880c4..cdd40d2a6a2e 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CGVTables.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CGVTables.cpp
@@ -90,9 +90,11 @@ static RValue PerformReturnAdjustment(CodeGenFunction &CGF,
auto ClassDecl = ResultType->getPointeeType()->getAsCXXRecordDecl();
auto ClassAlign = CGF.CGM.getClassPointerAlignment(ClassDecl);
- ReturnValue = CGF.CGM.getCXXABI().performReturnAdjustment(CGF,
- Address(ReturnValue, ClassAlign),
- Thunk.Return);
+ ReturnValue = CGF.CGM.getCXXABI().performReturnAdjustment(
+ CGF,
+ Address(ReturnValue, CGF.ConvertTypeForMem(ResultType->getPointeeType()),
+ ClassAlign),
+ Thunk.Return);
if (NullCheckValue) {
CGF.Builder.CreateBr(AdjustEnd);
@@ -198,7 +200,9 @@ CodeGenFunction::GenerateVarArgsThunk(llvm::Function *Fn,
// Find the first store of "this", which will be to the alloca associated
// with "this".
- Address ThisPtr(&*AI, CGM.getClassPointerAlignment(MD->getParent()));
+ Address ThisPtr =
+ Address(&*AI, ConvertTypeForMem(MD->getThisType()->getPointeeType()),
+ CGM.getClassPointerAlignment(MD->getParent()));
llvm::BasicBlock *EntryBB = &Fn->front();
llvm::BasicBlock::iterator ThisStore =
llvm::find_if(*EntryBB, [&](llvm::Instruction &I) {
@@ -396,9 +400,7 @@ void CodeGenFunction::EmitMustTailThunk(GlobalDecl GD,
// to translate AST arguments into LLVM IR arguments. For thunks, we know
// that the caller prototype more or less matches the callee prototype with
// the exception of 'this'.
- SmallVector<llvm::Value *, 8> Args;
- for (llvm::Argument &A : CurFn->args())
- Args.push_back(&A);
+ SmallVector<llvm::Value *, 8> Args(llvm::make_pointer_range(CurFn->args()));
// Set the adjusted 'this' pointer.
const ABIArgInfo &ThisAI = CurFnInfo->arg_begin()->info;
@@ -1173,7 +1175,10 @@ void CodeGenModule::EmitDeferredVTables() {
DeferredVTables.clear();
}
-bool CodeGenModule::HasLTOVisibilityPublicStd(const CXXRecordDecl *RD) {
+bool CodeGenModule::AlwaysHasLTOVisibilityPublic(const CXXRecordDecl *RD) {
+ if (RD->hasAttr<LTOVisibilityPublicAttr>() || RD->hasAttr<UuidAttr>())
+ return true;
+
if (!getCodeGenOpts().LTOVisibilityPublicStd)
return false;
@@ -1198,9 +1203,6 @@ bool CodeGenModule::HasHiddenLTOVisibility(const CXXRecordDecl *RD) {
if (!isExternallyVisible(LV.getLinkage()))
return true;
- if (RD->hasAttr<LTOVisibilityPublicAttr>() || RD->hasAttr<UuidAttr>())
- return false;
-
if (getTriple().isOSBinFormatCOFF()) {
if (RD->hasAttr<DLLExportAttr>() || RD->hasAttr<DLLImportAttr>())
return false;
@@ -1209,7 +1211,7 @@ bool CodeGenModule::HasHiddenLTOVisibility(const CXXRecordDecl *RD) {
return false;
}
- return !HasLTOVisibilityPublicStd(RD);
+ return !AlwaysHasLTOVisibilityPublic(RD);
}
llvm::GlobalObject::VCallVisibility CodeGenModule::GetVCallVisibilityLevel(
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenAction.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenAction.cpp
index c2c508dedb09..4ffbecdf2741 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenAction.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenAction.cpp
@@ -340,6 +340,15 @@ namespace clang {
CodeGenOpts.getProfileUse() != CodeGenOptions::ProfileNone)
Ctx.setDiagnosticsHotnessRequested(true);
+ if (CodeGenOpts.MisExpect) {
+ Ctx.setMisExpectWarningRequested(true);
+ }
+
+ if (CodeGenOpts.DiagnosticsMisExpectTolerance) {
+ Ctx.setDiagnosticsMisExpectTolerance(
+ CodeGenOpts.DiagnosticsMisExpectTolerance);
+ }
+
// Link each LinkModule into our module.
if (LinkInModules())
return;
@@ -440,6 +449,9 @@ namespace clang {
void OptimizationFailureHandler(
const llvm::DiagnosticInfoOptimizationFailure &D);
void DontCallDiagHandler(const DiagnosticInfoDontCall &D);
+ /// Specialized handler for misexpect warnings.
+ /// Note that misexpect remarks are emitted through ORE
+ void MisExpectDiagHandler(const llvm::DiagnosticInfoMisExpect &D);
};
void BackendConsumer::anchor() {}
@@ -821,6 +833,25 @@ void BackendConsumer::DontCallDiagHandler(const DiagnosticInfoDontCall &D) {
<< llvm::demangle(D.getFunctionName().str()) << D.getNote();
}
+void BackendConsumer::MisExpectDiagHandler(
+ const llvm::DiagnosticInfoMisExpect &D) {
+ StringRef Filename;
+ unsigned Line, Column;
+ bool BadDebugInfo = false;
+ FullSourceLoc Loc =
+ getBestLocationFromDebugLoc(D, BadDebugInfo, Filename, Line, Column);
+
+ Diags.Report(Loc, diag::warn_profile_data_misexpect) << D.getMsg().str();
+
+ if (BadDebugInfo)
+ // If we were not able to translate the file:line:col information
+ // back to a SourceLocation, at least emit a note stating that
+ // we could not translate this location. This can happen in the
+ // case of #line directives.
+ Diags.Report(Loc, diag::note_fe_backend_invalid_loc)
+ << Filename << Line << Column;
+}
+
/// This function is invoked when the backend needs
/// to report something to the user.
void BackendConsumer::DiagnosticHandlerImpl(const DiagnosticInfo &DI) {
@@ -895,6 +926,9 @@ void BackendConsumer::DiagnosticHandlerImpl(const DiagnosticInfo &DI) {
case llvm::DK_DontCall:
DontCallDiagHandler(cast<DiagnosticInfoDontCall>(DI));
return;
+ case llvm::DK_MisExpect:
+ MisExpectDiagHandler(cast<DiagnosticInfoMisExpect>(DI));
+ return;
default:
// Plugin IDs are not bound to any value as they are set dynamically.
ComputeDiagRemarkID(Severity, backend_plugin, DiagID);
@@ -983,6 +1017,8 @@ CodeGenAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
if (BA != Backend_EmitNothing && !OS)
return nullptr;
+ VMContext->setOpaquePointers(CI.getCodeGenOpts().OpaquePointers);
+
// Load bitcode modules to link with, if we need to.
if (LinkModules.empty())
for (const CodeGenOptions::BitcodeFileToLink &F :
@@ -1113,7 +1149,7 @@ void CodeGenAction::ExecuteAction() {
auto &CodeGenOpts = CI.getCodeGenOpts();
auto &Diagnostics = CI.getDiagnostics();
std::unique_ptr<raw_pwrite_stream> OS =
- GetOutputStream(CI, getCurrentFile(), BA);
+ GetOutputStream(CI, getCurrentFileOrBufferName(), BA);
if (BA != Backend_EmitNothing && !OS)
return;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp
index 50e1638924d1..05942f462dd1 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.cpp
@@ -105,8 +105,9 @@ clang::ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind) {
case LangOptions::FPE_Ignore: return llvm::fp::ebIgnore;
case LangOptions::FPE_MayTrap: return llvm::fp::ebMayTrap;
case LangOptions::FPE_Strict: return llvm::fp::ebStrict;
+ default:
+ llvm_unreachable("Unsupported FP Exception Behavior");
}
- llvm_unreachable("Unsupported FP Exception Behavior");
}
void CodeGenFunction::SetFastMathFlags(FPOptions FPFeatures) {
@@ -145,12 +146,11 @@ void CodeGenFunction::CGFPOptionsRAII::ConstructorHelper(FPOptions FPFeatures) {
FMFGuard.emplace(CGF.Builder);
- llvm::RoundingMode NewRoundingBehavior =
- static_cast<llvm::RoundingMode>(FPFeatures.getRoundingMode());
+ llvm::RoundingMode NewRoundingBehavior = FPFeatures.getRoundingMode();
CGF.Builder.setDefaultConstrainedRounding(NewRoundingBehavior);
auto NewExceptionBehavior =
ToConstrainedExceptMD(static_cast<LangOptions::FPExceptionModeKind>(
- FPFeatures.getFPExceptionMode()));
+ FPFeatures.getExceptionMode()));
CGF.Builder.setDefaultConstrainedExcept(NewExceptionBehavior);
CGF.SetFastMathFlags(FPFeatures);
@@ -383,9 +383,6 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
"__cyg_profile_func_exit");
}
- if (ShouldSkipSanitizerInstrumentation())
- CurFn->addFnAttr(llvm::Attribute::DisableSanitizerInstrumentation);
-
// Emit debug descriptor for function end.
if (CGDebugInfo *DI = getDebugInfo())
DI->EmitFunctionEnd(Builder, CurFn);
@@ -488,6 +485,9 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
std::max((uint64_t)LargestVectorWidth,
VT->getPrimitiveSizeInBits().getKnownMinSize());
+ if (CurFnInfo->getMaxVectorWidth() > LargestVectorWidth)
+ LargestVectorWidth = CurFnInfo->getMaxVectorWidth();
+
// Add the required-vector-width attribute. This contains the max width from:
// 1. min-vector-width attribute used in the source program.
// 2. Any builtins used that have a vector width specified.
@@ -502,8 +502,7 @@ void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
getContext().getTargetInfo().getVScaleRange(getLangOpts());
if (VScaleRange) {
CurFn->addFnAttr(llvm::Attribute::getWithVScaleRangeArgs(
- getLLVMContext(), VScaleRange.getValue().first,
- VScaleRange.getValue().second));
+ getLLVMContext(), VScaleRange->first, VScaleRange->second));
}
// If we generated an unreachable return block, delete it now.
@@ -560,29 +559,6 @@ bool CodeGenFunction::AlwaysEmitXRayTypedEvents() const {
XRayInstrKind::Typed);
}
-llvm::Constant *
-CodeGenFunction::EncodeAddrForUseInPrologue(llvm::Function *F,
- llvm::Constant *Addr) {
- // Addresses stored in prologue data can't require run-time fixups and must
- // be PC-relative. Run-time fixups are undesirable because they necessitate
- // writable text segments, which are unsafe. And absolute addresses are
- // undesirable because they break PIE mode.
-
- // Add a layer of indirection through a private global. Taking its address
- // won't result in a run-time fixup, even if Addr has linkonce_odr linkage.
- auto *GV = new llvm::GlobalVariable(CGM.getModule(), Addr->getType(),
- /*isConstant=*/true,
- llvm::GlobalValue::PrivateLinkage, Addr);
-
- // Create a PC-relative address.
- auto *GOTAsInt = llvm::ConstantExpr::getPtrToInt(GV, IntPtrTy);
- auto *FuncAsInt = llvm::ConstantExpr::getPtrToInt(F, IntPtrTy);
- auto *PCRelAsInt = llvm::ConstantExpr::getSub(GOTAsInt, FuncAsInt);
- return (IntPtrTy == Int32Ty)
- ? PCRelAsInt
- : llvm::ConstantExpr::getTrunc(PCRelAsInt, Int32Ty);
-}
-
llvm::Value *
CodeGenFunction::DecodeAddrUsedInPrologue(llvm::Value *F,
llvm::Value *EncodedAddr) {
@@ -593,19 +569,21 @@ CodeGenFunction::DecodeAddrUsedInPrologue(llvm::Value *F,
auto *GOTAddr = Builder.CreateIntToPtr(GOTAsInt, Int8PtrPtrTy, "global_addr");
// Load the original pointer through the global.
- return Builder.CreateLoad(Address(GOTAddr, getPointerAlign()),
+ return Builder.CreateLoad(Address(GOTAddr, Int8PtrTy, getPointerAlign()),
"decoded_addr");
}
-void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD,
- llvm::Function *Fn)
-{
- if (!FD->hasAttr<OpenCLKernelAttr>())
+void CodeGenFunction::EmitKernelMetadata(const FunctionDecl *FD,
+ llvm::Function *Fn) {
+ if (!FD->hasAttr<OpenCLKernelAttr>() && !FD->hasAttr<CUDAGlobalAttr>())
return;
llvm::LLVMContext &Context = getLLVMContext();
- CGM.GenOpenCLArgMetadata(Fn, FD, this);
+ CGM.GenKernelArgMetadata(Fn, FD, this);
+
+ if (!getLangOpts().OpenCL)
+ return;
if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
QualType HintQTy = A->getTypeHint();
@@ -743,6 +721,7 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
} while (false);
if (D) {
+ const bool SanitizeBounds = SanOpts.hasOneOf(SanitizerKind::Bounds);
bool NoSanitizeCoverage = false;
for (auto Attr : D->specific_attrs<NoSanitizeAttr>()) {
@@ -763,21 +742,29 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
NoSanitizeCoverage = true;
}
+ if (SanitizeBounds && !SanOpts.hasOneOf(SanitizerKind::Bounds))
+ Fn->addFnAttr(llvm::Attribute::NoSanitizeBounds);
+
if (NoSanitizeCoverage && CGM.getCodeGenOpts().hasSanitizeCoverage())
Fn->addFnAttr(llvm::Attribute::NoSanitizeCoverage);
}
- // Apply sanitizer attributes to the function.
- if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress))
- Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
- if (SanOpts.hasOneOf(SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress))
- Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
- if (SanOpts.has(SanitizerKind::MemTag))
- Fn->addFnAttr(llvm::Attribute::SanitizeMemTag);
- if (SanOpts.has(SanitizerKind::Thread))
- Fn->addFnAttr(llvm::Attribute::SanitizeThread);
- if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory))
- Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
+ if (ShouldSkipSanitizerInstrumentation()) {
+ CurFn->addFnAttr(llvm::Attribute::DisableSanitizerInstrumentation);
+ } else {
+ // Apply sanitizer attributes to the function.
+ if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress))
+ Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
+ if (SanOpts.hasOneOf(SanitizerKind::HWAddress |
+ SanitizerKind::KernelHWAddress))
+ Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
+ if (SanOpts.has(SanitizerKind::MemtagStack))
+ Fn->addFnAttr(llvm::Attribute::SanitizeMemTag);
+ if (SanOpts.has(SanitizerKind::Thread))
+ Fn->addFnAttr(llvm::Attribute::SanitizeThread);
+ if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory))
+ Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
+ }
if (SanOpts.has(SanitizerKind::SafeStack))
Fn->addFnAttr(llvm::Attribute::SafeStack);
if (SanOpts.has(SanitizerKind::ShadowCallStack))
@@ -911,9 +898,10 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
if (D && D->hasAttr<NoProfileFunctionAttr>())
Fn->addFnAttr(llvm::Attribute::NoProfile);
- if (FD && getLangOpts().OpenCL) {
+ if (FD && (getLangOpts().OpenCL ||
+ (getLangOpts().HIP && getLangOpts().CUDAIsDevice))) {
// Add metadata for a kernel function.
- EmitOpenCLKernelMetadata(FD, Fn);
+ EmitKernelMetadata(FD, Fn);
}
// If we are checking function types, emit a function type signature as
@@ -926,12 +914,13 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
FD->getType(), EST_None);
llvm::Constant *FTRTTIConst =
CGM.GetAddrOfRTTIDescriptor(ProtoTy, /*ForEH=*/true);
- llvm::Constant *FTRTTIConstEncoded =
- EncodeAddrForUseInPrologue(Fn, FTRTTIConst);
- llvm::Constant *PrologueStructElems[] = {PrologueSig, FTRTTIConstEncoded};
- llvm::Constant *PrologueStructConst =
- llvm::ConstantStruct::getAnon(PrologueStructElems, /*Packed=*/true);
- Fn->setPrologueData(PrologueStructConst);
+ llvm::GlobalVariable *FTRTTIProxy =
+ CGM.GetOrCreateRTTIProxyGlobalVariable(FTRTTIConst);
+ llvm::LLVMContext &Ctx = Fn->getContext();
+ llvm::MDBuilder MDB(Ctx);
+ Fn->setMetadata(llvm::LLVMContext::MD_func_sanitize,
+ MDB.createRTTIPointerPrologue(PrologueSig, FTRTTIProxy));
+ CGM.addCompilerUsedGlobal(FTRTTIProxy);
}
}
@@ -963,9 +952,9 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
(getLangOpts().CUDA && FD->hasAttr<CUDAGlobalAttr>())))
Fn->addFnAttr(llvm::Attribute::NoRecurse);
- llvm::RoundingMode RM = getLangOpts().getFPRoundingMode();
+ llvm::RoundingMode RM = getLangOpts().getDefaultRoundingMode();
llvm::fp::ExceptionBehavior FPExceptionBehavior =
- ToConstrainedExceptMD(getLangOpts().getFPExceptionMode());
+ ToConstrainedExceptMD(getLangOpts().getDefaultExceptionMode());
Builder.setDefaultConstrainedRounding(RM);
Builder.setDefaultConstrainedExcept(FPExceptionBehavior);
if ((FD && (FD->UsesFPIntrin() || FD->hasAttr<StrictFPAttr>())) ||
@@ -981,6 +970,10 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
CGM.getCodeGenOpts().StackAlignment))
Fn->addFnAttr("stackrealign");
+ // "main" doesn't need to zero out call-used registers.
+ if (FD && FD->isMain())
+ Fn->removeFnAttr("zero-call-used-regs");
+
llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
// Create a marker to make it easy to insert allocas into the entryblock
@@ -1094,12 +1087,13 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
llvm::Function::arg_iterator EI = CurFn->arg_end();
--EI;
llvm::Value *Addr = Builder.CreateStructGEP(
- EI->getType()->getPointerElementType(), &*EI, Idx);
+ CurFnInfo->getArgStruct(), &*EI, Idx);
llvm::Type *Ty =
cast<llvm::GetElementPtrInst>(Addr)->getResultElementType();
- ReturnValuePointer = Address(Addr, getPointerAlign());
+ ReturnValuePointer = Address(Addr, Ty, getPointerAlign());
Addr = Builder.CreateAlignedLoad(Ty, Addr, getPointerAlign(), "agg.result");
- ReturnValue = Address(Addr, CGM.getNaturalTypeAlignment(RetTy));
+ ReturnValue =
+ Address(Addr, ConvertType(RetTy), CGM.getNaturalTypeAlignment(RetTy));
} else {
ReturnValue = CreateIRTemp(RetTy, "retval");
@@ -1122,7 +1116,8 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
EmitFunctionProlog(*CurFnInfo, CurFn, Args);
- if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) {
+ if (isa_and_nonnull<CXXMethodDecl>(D) &&
+ cast<CXXMethodDecl>(D)->isInstance()) {
CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
if (MD->getParent()->isLambda() &&
@@ -1183,27 +1178,26 @@ void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
}
// If any of the arguments have a variably modified type, make sure to
- // emit the type size.
- for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
- i != e; ++i) {
- const VarDecl *VD = *i;
-
- // Dig out the type as written from ParmVarDecls; it's unclear whether
- // the standard (C99 6.9.1p10) requires this, but we're following the
- // precedent set by gcc.
- QualType Ty;
- if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
- Ty = PVD->getOriginalType();
- else
- Ty = VD->getType();
+ // emit the type size, but only if the function is not naked. Naked functions
+ // have no prolog to run this evaluation.
+ if (!FD || !FD->hasAttr<NakedAttr>()) {
+ for (const VarDecl *VD : Args) {
+ // Dig out the type as written from ParmVarDecls; it's unclear whether
+ // the standard (C99 6.9.1p10) requires this, but we're following the
+ // precedent set by gcc.
+ QualType Ty;
+ if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
+ Ty = PVD->getOriginalType();
+ else
+ Ty = VD->getType();
- if (Ty->isVariablyModifiedType())
- EmitVariablyModifiedType(Ty);
+ if (Ty->isVariablyModifiedType())
+ EmitVariablyModifiedType(Ty);
+ }
}
// Emit a location at the end of the prologue.
if (CGDebugInfo *DI = getDebugInfo())
DI->EmitLocation(Builder, StartLoc);
-
// TODO: Do we need to handle this in two places like we do with
// target-features/target-cpu?
if (CurFuncDecl)
@@ -1398,8 +1392,7 @@ void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
// Save parameters for coroutine function.
if (Body && isa_and_nonnull<CoroutineBodyStmt>(Body))
- for (const auto *ParamDecl : FD->parameters())
- FnArgs.push_back(ParamDecl);
+ llvm::append_range(FnArgs, FD->parameters());
// Generate the body of the function.
PGO.assignRegionCounters(GD, CurFn);
@@ -1924,7 +1917,7 @@ static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
dest.getAlignment().alignmentOfArrayElement(baseSize);
// memcpy the individual element bit-pattern.
- Builder.CreateMemCpy(Address(cur, curAlign), src, baseSizeInChars,
+ Builder.CreateMemCpy(Address(cur, CGF.Int8Ty, curAlign), src, baseSizeInChars,
/*volatile*/ false);
// Go to the next element.
@@ -1997,7 +1990,7 @@ CodeGenFunction::EmitNullInitialization(Address DestPtr, QualType Ty) {
CharUnits NullAlign = DestPtr.getAlignment();
NullVariable->setAlignment(NullAlign.getAsAlign());
Address SrcPtr(Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy()),
- NullAlign);
+ Builder.getInt8Ty(), NullAlign);
if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
@@ -2299,6 +2292,7 @@ void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
case Type::TypeOf:
case Type::UnaryTransform:
case Type::Attributed:
+ case Type::BTFTagAttributed:
case Type::SubstTemplateTypeParm:
case Type::MacroQualified:
// Keep walking after single level desugaring.
@@ -2475,7 +2469,7 @@ Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
V = Builder.CreateBitCast(V, VTy);
}
- return Address(V, Addr.getAlignment());
+ return Address(V, Addr.getElementType(), Addr.getAlignment());
}
CodeGenFunction::CGCapturedStmtInfo::~CGCapturedStmtInfo() { }
@@ -2536,16 +2530,13 @@ void CodeGenFunction::checkTargetFeatures(SourceLocation Loc,
llvm::StringMap<bool> CallerFeatureMap;
CGM.getContext().getFunctionFeatureMap(CallerFeatureMap, FD);
if (BuiltinID) {
- StringRef FeatureList(
- CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID));
- // Return if the builtin doesn't have any required features.
- if (FeatureList.empty())
- return;
- assert(!FeatureList.contains(' ') && "Space in feature list");
- TargetFeatures TF(CallerFeatureMap);
- if (!TF.hasRequiredFeatures(FeatureList))
+ StringRef FeatureList(CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID));
+ if (!Builtin::evaluateRequiredTargetFeatures(
+ FeatureList, CallerFeatureMap)) {
CGM.getDiags().Report(Loc, diag::err_builtin_needs_feature)
- << TargetDecl->getDeclName() << FeatureList;
+ << TargetDecl->getDeclName()
+ << FeatureList;
+ }
} else if (!TargetDecl->isMultiVersion() &&
TargetDecl->hasAttr<TargetAttr>()) {
// Get the required features for the callee.
@@ -2614,9 +2605,8 @@ static void CreateMultiVersionResolverReturn(CodeGenModule &CGM,
return;
}
- llvm::SmallVector<llvm::Value *, 10> Args;
- llvm::for_each(Resolver->args(),
- [&](llvm::Argument &Arg) { Args.push_back(&Arg); });
+ llvm::SmallVector<llvm::Value *, 10> Args(
+ llvm::make_pointer_range(Resolver->args()));
llvm::CallInst *Result = Builder.CreateCall(FuncToReturn, Args);
Result->setTailCallKind(llvm::CallInst::TCK_MustTail);
@@ -2754,3 +2744,19 @@ CodeGenFunction::emitCondLikelihoodViaExpectIntrinsic(llvm::Value *Cond,
}
llvm_unreachable("Unknown Likelihood");
}
+
+llvm::Value *CodeGenFunction::emitBoolVecConversion(llvm::Value *SrcVec,
+ unsigned NumElementsDst,
+ const llvm::Twine &Name) {
+ auto *SrcTy = cast<llvm::FixedVectorType>(SrcVec->getType());
+ unsigned NumElementsSrc = SrcTy->getNumElements();
+ if (NumElementsSrc == NumElementsDst)
+ return SrcVec;
+
+ std::vector<int> ShuffleMask(NumElementsDst, -1);
+ for (unsigned MaskIdx = 0;
+ MaskIdx < std::min<>(NumElementsDst, NumElementsSrc); ++MaskIdx)
+ ShuffleMask[MaskIdx] = MaskIdx;
+
+ return Builder.CreateShuffleVector(SrcVec, ShuffleMask, Name);
+}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h b/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h
index df99cd9a1b79..fe0890f433e8 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenFunction.h
@@ -201,10 +201,11 @@ template <> struct DominatingValue<RValue> {
AggregateAddress, ComplexAddress };
llvm::Value *Value;
+ llvm::Type *ElementType;
unsigned K : 3;
unsigned Align : 29;
- saved_type(llvm::Value *v, Kind k, unsigned a = 0)
- : Value(v), K(k), Align(a) {}
+ saved_type(llvm::Value *v, llvm::Type *e, Kind k, unsigned a = 0)
+ : Value(v), ElementType(e), K(k), Align(a) {}
public:
static bool needsSaving(RValue value);
@@ -551,6 +552,12 @@ public:
/// True if the current statement has nomerge attribute.
bool InNoMergeAttributedStmt = false;
+ /// True if the current statement has noinline attribute.
+ bool InNoInlineAttributedStmt = false;
+
+ /// True if the current statement has always_inline attribute.
+ bool InAlwaysInlineAttributedStmt = false;
+
// The CallExpr within the current statement that the musttail attribute
// applies to. nullptr if there is no 'musttail' on the current statement.
const CallExpr *MustTailCall = nullptr;
@@ -1065,15 +1072,14 @@ public:
/// Enter a new OpenMP private scope.
explicit OMPPrivateScope(CodeGenFunction &CGF) : RunCleanupsScope(CGF) {}
- /// Registers \p LocalVD variable as a private and apply \p PrivateGen
- /// function for it to generate corresponding private variable. \p
- /// PrivateGen returns an address of the generated private variable.
+ /// Registers \p LocalVD variable as a private with \p Addr as the address
+ /// of the corresponding private variable. \p
+ /// PrivateGen is the address of the generated private variable.
/// \return true if the variable is registered as private, false if it has
/// been privatized already.
- bool addPrivate(const VarDecl *LocalVD,
- const llvm::function_ref<Address()> PrivateGen) {
+ bool addPrivate(const VarDecl *LocalVD, Address Addr) {
assert(PerformCleanup && "adding private to dead scope");
- return MappedVars.setVarAddr(CGF, LocalVD, PrivateGen());
+ return MappedVars.setVarAddr(CGF, LocalVD, Addr);
}
/// Privatizes local variables previously registered as private.
@@ -1523,10 +1529,7 @@ public:
/// Get the profiler's count for the given statement.
uint64_t getProfileCount(const Stmt *S) {
- Optional<uint64_t> Count = PGO.getStmtCount(S);
- if (!Count.hasValue())
- return 0;
- return *Count;
+ return PGO.getStmtCount(S).value_or(0);
}
/// Set the profiler's current count.
@@ -1785,26 +1788,17 @@ public:
}
/// Emit the body of an OMP region
- /// \param CGF The Codegen function this belongs to
- /// \param RegionBodyStmt The body statement for the OpenMP region being
- /// generated
- /// \param CodeGenIP Insertion point for generating the body code.
- /// \param FiniBB The finalization basic block
- static void EmitOMPRegionBody(CodeGenFunction &CGF,
- const Stmt *RegionBodyStmt,
- InsertPointTy CodeGenIP,
- llvm::BasicBlock &FiniBB) {
- llvm::BasicBlock *CodeGenIPBB = CodeGenIP.getBlock();
- if (llvm::Instruction *CodeGenIPBBTI = CodeGenIPBB->getTerminator())
- CodeGenIPBBTI->eraseFromParent();
-
- CGF.Builder.SetInsertPoint(CodeGenIPBB);
-
- CGF.EmitStmt(RegionBodyStmt);
-
- if (CGF.Builder.saveIP().isSet())
- CGF.Builder.CreateBr(&FiniBB);
- }
+ /// \param CGF The Codegen function this belongs to
+ /// \param RegionBodyStmt The body statement for the OpenMP region being
+ /// generated
+ /// \param AllocaIP Where to insert alloca instructions
+ /// \param CodeGenIP Where to insert the region code
+ /// \param RegionName Name to be used for new blocks
+ static void EmitOMPInlinedRegionBody(CodeGenFunction &CGF,
+ const Stmt *RegionBodyStmt,
+ InsertPointTy AllocaIP,
+ InsertPointTy CodeGenIP,
+ Twine RegionName);
static void EmitCaptureStmt(CodeGenFunction &CGF, InsertPointTy CodeGenIP,
llvm::BasicBlock &FiniBB, llvm::Function *Fn,
@@ -1824,12 +1818,25 @@ public:
CGF.Builder.CreateBr(&FiniBB);
}
+ /// Emit the body of an OMP region that will be outlined in
+ /// OpenMPIRBuilder::finalize().
+ /// \param CGF The Codegen function this belongs to
+ /// \param RegionBodyStmt The body statement for the OpenMP region being
+ /// generated
+ /// \param AllocaIP Where to insert alloca instructions
+ /// \param CodeGenIP Where to insert the region code
+ /// \param RegionName Name to be used for new blocks
+ static void EmitOMPOutlinedRegionBody(CodeGenFunction &CGF,
+ const Stmt *RegionBodyStmt,
+ InsertPointTy AllocaIP,
+ InsertPointTy CodeGenIP,
+ Twine RegionName);
+
/// RAII for preserving necessary info during Outlined region body codegen.
class OutlinedRegionBodyRAII {
llvm::AssertingVH<llvm::Instruction> OldAllocaIP;
CodeGenFunction::JumpDest OldReturnBlock;
- CGBuilderTy::InsertPoint IP;
CodeGenFunction &CGF;
public:
@@ -1840,7 +1847,6 @@ public:
"Must specify Insertion point for allocas of outlined function");
OldAllocaIP = CGF.AllocaInsertPt;
CGF.AllocaInsertPt = &*AllocaIP.getPoint();
- IP = CGF.Builder.saveIP();
OldReturnBlock = CGF.ReturnBlock;
CGF.ReturnBlock = CGF.getJumpDestInCurrentScope(&RetBB);
@@ -1849,7 +1855,6 @@ public:
~OutlinedRegionBodyRAII() {
CGF.AllocaInsertPt = OldAllocaIP;
CGF.ReturnBlock = OldReturnBlock;
- CGF.Builder.restoreIP(IP);
}
};
@@ -1963,8 +1968,7 @@ private:
/// Add OpenCL kernel arg metadata and the kernel attribute metadata to
/// the function metadata.
- void EmitOpenCLKernelMetadata(const FunctionDecl *FD,
- llvm::Function *Fn);
+ void EmitKernelMetadata(const FunctionDecl *FD, llvm::Function *Fn);
public:
CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext=false);
@@ -2296,9 +2300,8 @@ public:
/// Derived is the presumed address of an object of type T after a
/// cast. If T is a polymorphic class type, emit a check that the virtual
/// table for Derived belongs to a class derived from T.
- void EmitVTablePtrCheckForCast(QualType T, llvm::Value *Derived,
- bool MayBeNull, CFITypeCheckKind TCK,
- SourceLocation Loc);
+ void EmitVTablePtrCheckForCast(QualType T, Address Derived, bool MayBeNull,
+ CFITypeCheckKind TCK, SourceLocation Loc);
/// EmitVTablePtrCheckForCall - Virtual method MD is being called via VTable.
/// If vptr CFI is enabled, emit a check that VTable is valid.
@@ -2322,7 +2325,9 @@ public:
bool ShouldEmitVTableTypeCheckedLoad(const CXXRecordDecl *RD);
/// Emit a type checked load from the given vtable.
- llvm::Value *EmitVTableTypeCheckedLoad(const CXXRecordDecl *RD, llvm::Value *VTable,
+ llvm::Value *EmitVTableTypeCheckedLoad(const CXXRecordDecl *RD,
+ llvm::Value *VTable,
+ llvm::Type *VTableTy,
uint64_t VTableByteOffset);
/// EnterDtorCleanups - Enter the cleanups necessary to complete the
@@ -2351,10 +2356,6 @@ public:
/// XRay typed event handling calls.
bool AlwaysEmitXRayTypedEvents() const;
- /// Encode an address into a form suitable for use in a function prologue.
- llvm::Constant *EncodeAddrForUseInPrologue(llvm::Function *F,
- llvm::Constant *Addr);
-
/// Decode an address used in a function prologue, encoded by \c
/// EncodeAddrForUseInPrologue.
llvm::Value *DecodeAddrUsedInPrologue(llvm::Value *F,
@@ -2520,6 +2521,9 @@ public:
return EmitLoadOfReferenceLValue(RefLVal);
}
+ /// Load a pointer with type \p PtrTy stored at address \p Ptr.
+ /// Note that \p PtrTy is the type of the loaded pointer, not the addresses
+ /// it is loaded from.
Address EmitLoadOfPointer(Address Ptr, const PointerType *PtrTy,
LValueBaseInfo *BaseInfo = nullptr,
TBAAAccessInfo *TBAAInfo = nullptr);
@@ -3486,7 +3490,11 @@ public:
void EmitOMPTargetTaskBasedDirective(const OMPExecutableDirective &S,
const RegionCodeGenTy &BodyGen,
OMPTargetDataInfo &InputInfo);
-
+ void processInReduction(const OMPExecutableDirective &S,
+ OMPTaskDataTy &Data,
+ CodeGenFunction &CGF,
+ const CapturedStmt *CS,
+ OMPPrivateScope &Scope);
void EmitOMPMetaDirective(const OMPMetaDirective &S);
void EmitOMPParallelDirective(const OMPParallelDirective &S);
void EmitOMPSimdDirective(const OMPSimdDirective &S);
@@ -3905,6 +3913,7 @@ public:
LValue EmitObjCIsaExpr(const ObjCIsaExpr *E);
LValue EmitCompoundLiteralLValue(const CompoundLiteralExpr *E);
LValue EmitInitListLValue(const InitListExpr *E);
+ void EmitIgnoredConditionalOperator(const AbstractConditionalOperator *E);
LValue EmitConditionalOperatorLValue(const AbstractConditionalOperator *E);
LValue EmitCastLValue(const CastExpr *E);
LValue EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *E);
@@ -4643,6 +4652,11 @@ public:
/// Set the codegen fast-math flags.
void SetFastMathFlags(FPOptions FPFeatures);
+ // Truncate or extend a boolean vector to the requested number of elements.
+ llvm::Value *emitBoolVecConversion(llvm::Value *SrcVec,
+ unsigned NumElementsDst,
+ const llvm::Twine &Name = "");
+
private:
llvm::MDNode *getRangeForLoadFromType(QualType Ty);
void EmitReturnOfRValue(RValue RV, QualType Ty);
@@ -4796,76 +4810,6 @@ private:
llvm::Value *FormResolverCondition(const MultiVersionResolverOption &RO);
};
-/// TargetFeatures - This class is used to check whether the builtin function
-/// has the required tagert specific features. It is able to support the
-/// combination of ','(and), '|'(or), and '()'. By default, the priority of
-/// ',' is higher than that of '|' .
-/// E.g:
-/// A,B|C means the builtin function requires both A and B, or C.
-/// If we want the builtin function requires both A and B, or both A and C,
-/// there are two ways: A,B|A,C or A,(B|C).
-/// The FeaturesList should not contain spaces, and brackets must appear in
-/// pairs.
-class TargetFeatures {
- struct FeatureListStatus {
- bool HasFeatures;
- StringRef CurFeaturesList;
- };
-
- const llvm::StringMap<bool> &CallerFeatureMap;
-
- FeatureListStatus getAndFeatures(StringRef FeatureList) {
- int InParentheses = 0;
- bool HasFeatures = true;
- size_t SubexpressionStart = 0;
- for (size_t i = 0, e = FeatureList.size(); i < e; ++i) {
- char CurrentToken = FeatureList[i];
- switch (CurrentToken) {
- default:
- break;
- case '(':
- if (InParentheses == 0)
- SubexpressionStart = i + 1;
- ++InParentheses;
- break;
- case ')':
- --InParentheses;
- assert(InParentheses >= 0 && "Parentheses are not in pair");
- LLVM_FALLTHROUGH;
- case '|':
- case ',':
- if (InParentheses == 0) {
- if (HasFeatures && i != SubexpressionStart) {
- StringRef F = FeatureList.slice(SubexpressionStart, i);
- HasFeatures = CurrentToken == ')' ? hasRequiredFeatures(F)
- : CallerFeatureMap.lookup(F);
- }
- SubexpressionStart = i + 1;
- if (CurrentToken == '|') {
- return {HasFeatures, FeatureList.substr(SubexpressionStart)};
- }
- }
- break;
- }
- }
- assert(InParentheses == 0 && "Parentheses are not in pair");
- if (HasFeatures && SubexpressionStart != FeatureList.size())
- HasFeatures =
- CallerFeatureMap.lookup(FeatureList.substr(SubexpressionStart));
- return {HasFeatures, StringRef()};
- }
-
-public:
- bool hasRequiredFeatures(StringRef FeatureList) {
- FeatureListStatus FS = {false, FeatureList};
- while (!FS.HasFeatures && !FS.CurFeaturesList.empty())
- FS = getAndFeatures(FS.CurFeaturesList);
- return FS.HasFeatures;
- }
-
- TargetFeatures(const llvm::StringMap<bool> &CallerFeatureMap)
- : CallerFeatureMap(CallerFeatureMap) {}
-};
inline DominatingLLVMValue::saved_type
DominatingLLVMValue::save(CodeGenFunction &CGF, llvm::Value *value) {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp
index 2777fc22600d..56ed59d1e3f1 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.cpp
@@ -16,6 +16,7 @@
#include "CGCXXABI.h"
#include "CGCall.h"
#include "CGDebugInfo.h"
+#include "CGHLSLRuntime.h"
#include "CGObjCRuntime.h"
#include "CGOpenCLRuntime.h"
#include "CGOpenMPRuntime.h"
@@ -43,6 +44,7 @@
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Basic/Version.h"
+#include "clang/CodeGen/BackendUtil.h"
#include "clang/CodeGen/ConstantInitBuilder.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "llvm/ADT/StringSwitch.h"
@@ -68,9 +70,8 @@ using namespace clang;
using namespace CodeGen;
static llvm::cl::opt<bool> LimitedCoverage(
- "limited-coverage-experimental", llvm::cl::ZeroOrMore, llvm::cl::Hidden,
- llvm::cl::desc("Emit limited coverage mapping information (experimental)"),
- llvm::cl::init(false));
+ "limited-coverage-experimental", llvm::cl::Hidden,
+ llvm::cl::desc("Emit limited coverage mapping information (experimental)"));
static const char AnnotationSection[] = "llvm.metadata";
@@ -145,6 +146,8 @@ CodeGenModule::CodeGenModule(ASTContext &C, const HeaderSearchOptions &HSO,
createOpenMPRuntime();
if (LangOpts.CUDA)
createCUDARuntime();
+ if (LangOpts.HLSL)
+ createHLSLRuntime();
// Enable TBAA unless it's suppressed. ThreadSanitizer needs TBAA even at O0.
if (LangOpts.Sanitize.has(SanitizerKind::Thread) ||
@@ -261,6 +264,10 @@ void CodeGenModule::createCUDARuntime() {
CUDARuntime.reset(CreateNVCUDARuntime(*this));
}
+void CodeGenModule::createHLSLRuntime() {
+ HLSLRuntime.reset(new CGHLSLRuntime(*this));
+}
+
void CodeGenModule::addReplacement(StringRef Name, llvm::Constant *C) {
Replacements[Name] = C;
}
@@ -506,7 +513,6 @@ void CodeGenModule::Release() {
EmitVTablesOpportunistically();
applyGlobalValReplacements();
applyReplacements();
- checkAliases();
emitMultiVersionFunctions();
EmitCXXGlobalInitFunc();
EmitCXXGlobalCleanUpFunc();
@@ -538,6 +544,7 @@ void CodeGenModule::Release() {
EmitCtorList(GlobalDtors, "llvm.global_dtors");
EmitGlobalAnnotations();
EmitStaticExternCAliases();
+ checkAliases();
EmitDeferredUnusedCoverageMappings();
CodeGenPGO(*this).setValueProfilingFlag(getModule());
if (CoverageMapping)
@@ -547,27 +554,57 @@ void CodeGenModule::Release() {
CodeGenFunction(*this).EmitCfiCheckStub();
}
emitAtAvailableLinkGuard();
- if (Context.getTargetInfo().getTriple().isWasm() &&
- !Context.getTargetInfo().getTriple().isOSEmscripten()) {
+ if (Context.getTargetInfo().getTriple().isWasm())
EmitMainVoidAlias();
+
+ if (getTriple().isAMDGPU()) {
+ // Emit reference of __amdgpu_device_library_preserve_asan_functions to
+ // preserve ASAN functions in bitcode libraries.
+ if (LangOpts.Sanitize.has(SanitizerKind::Address)) {
+ auto *FT = llvm::FunctionType::get(VoidTy, {});
+ auto *F = llvm::Function::Create(
+ FT, llvm::GlobalValue::ExternalLinkage,
+ "__amdgpu_device_library_preserve_asan_functions", &getModule());
+ auto *Var = new llvm::GlobalVariable(
+ getModule(), FT->getPointerTo(),
+ /*isConstant=*/true, llvm::GlobalValue::WeakAnyLinkage, F,
+ "__amdgpu_device_library_preserve_asan_functions_ptr", nullptr,
+ llvm::GlobalVariable::NotThreadLocal);
+ addCompilerUsedGlobal(Var);
+ }
+ // Emit amdgpu_code_object_version module flag, which is code object version
+ // times 100.
+ // ToDo: Enable module flag for all code object version when ROCm device
+ // library is ready.
+ if (getTarget().getTargetOpts().CodeObjectVersion == TargetOptions::COV_5) {
+ getModule().addModuleFlag(llvm::Module::Error,
+ "amdgpu_code_object_version",
+ getTarget().getTargetOpts().CodeObjectVersion);
+ }
}
- // Emit reference of __amdgpu_device_library_preserve_asan_functions to
- // preserve ASAN functions in bitcode libraries.
- if (LangOpts.Sanitize.has(SanitizerKind::Address) && getTriple().isAMDGPU()) {
- auto *FT = llvm::FunctionType::get(VoidTy, {});
- auto *F = llvm::Function::Create(
- FT, llvm::GlobalValue::ExternalLinkage,
- "__amdgpu_device_library_preserve_asan_functions", &getModule());
- auto *Var = new llvm::GlobalVariable(
- getModule(), FT->getPointerTo(),
- /*isConstant=*/true, llvm::GlobalValue::WeakAnyLinkage, F,
- "__amdgpu_device_library_preserve_asan_functions_ptr", nullptr,
- llvm::GlobalVariable::NotThreadLocal);
- addCompilerUsedGlobal(Var);
- if (!getModule().getModuleFlag("amdgpu_hostcall")) {
- getModule().addModuleFlag(llvm::Module::Override, "amdgpu_hostcall", 1);
+ // Emit a global array containing all external kernels or device variables
+ // used by host functions and mark it as used for CUDA/HIP. This is necessary
+ // to get kernels or device variables in archives linked in even if these
+ // kernels or device variables are only used in host functions.
+ if (!Context.CUDAExternalDeviceDeclODRUsedByHost.empty()) {
+ SmallVector<llvm::Constant *, 8> UsedArray;
+ for (auto D : Context.CUDAExternalDeviceDeclODRUsedByHost) {
+ GlobalDecl GD;
+ if (auto *FD = dyn_cast<FunctionDecl>(D))
+ GD = GlobalDecl(FD, KernelReferenceKind::Kernel);
+ else
+ GD = GlobalDecl(D);
+ UsedArray.push_back(llvm::ConstantExpr::getPointerBitCastOrAddrSpaceCast(
+ GetAddrOfGlobal(GD), Int8PtrTy));
}
+
+ llvm::ArrayType *ATy = llvm::ArrayType::get(Int8PtrTy, UsedArray.size());
+
+ auto *GV = new llvm::GlobalVariable(
+ getModule(), ATy, false, llvm::GlobalValue::InternalLinkage,
+ llvm::ConstantArray::get(ATy, UsedArray), "__clang_gpu_used_external");
+ addCompilerUsedGlobal(GV);
}
emitLLVMUsed();
@@ -720,13 +757,16 @@ void CodeGenModule::Release() {
// attributes, but we use module metadata to emit build attributes. This is
// needed for LTO, where the function attributes are inside bitcode
// serialised into a global variable by the time build attributes are
- // emitted, so we can't access them.
+ // emitted, so we can't access them. LTO objects could be compiled with
+ // different flags therefore module flags are set to "Min" behavior to achieve
+ // the same end result of the normal build where e.g BTI is off if any object
+ // doesn't support it.
if (Context.getTargetInfo().hasFeature("ptrauth") &&
LangOpts.getSignReturnAddressScope() !=
LangOptions::SignReturnAddressScopeKind::None)
getModule().addModuleFlag(llvm::Module::Override,
"sign-return-address-buildattr", 1);
- if (LangOpts.Sanitize.has(SanitizerKind::MemTag))
+ if (LangOpts.Sanitize.has(SanitizerKind::MemtagStack))
getModule().addModuleFlag(llvm::Module::Override,
"tag-stack-memory-buildattr", 1);
@@ -734,16 +774,16 @@ void CodeGenModule::Release() {
Arch == llvm::Triple::arm || Arch == llvm::Triple::armeb ||
Arch == llvm::Triple::aarch64 || Arch == llvm::Triple::aarch64_32 ||
Arch == llvm::Triple::aarch64_be) {
- getModule().addModuleFlag(llvm::Module::Error, "branch-target-enforcement",
+ getModule().addModuleFlag(llvm::Module::Min, "branch-target-enforcement",
LangOpts.BranchTargetEnforcement);
- getModule().addModuleFlag(llvm::Module::Error, "sign-return-address",
+ getModule().addModuleFlag(llvm::Module::Min, "sign-return-address",
LangOpts.hasSignReturnAddress());
- getModule().addModuleFlag(llvm::Module::Error, "sign-return-address-all",
+ getModule().addModuleFlag(llvm::Module::Min, "sign-return-address-all",
LangOpts.isSignReturnAddressScopeAll());
- getModule().addModuleFlag(llvm::Module::Error,
+ getModule().addModuleFlag(llvm::Module::Min,
"sign-return-address-with-bkey",
!LangOpts.isSignReturnAddressWithAKey());
}
@@ -775,7 +815,7 @@ void CodeGenModule::Release() {
LangOpts.OpenMP);
// Emit OpenCL specific module metadata: OpenCL/SPIR version.
- if (LangOpts.OpenCL) {
+ if (LangOpts.OpenCL || (LangOpts.CUDAIsDevice && getTriple().isSPIRV())) {
EmitOpenCLMetadata();
// Emit SPIR version.
if (getTriple().isSPIR()) {
@@ -796,6 +836,10 @@ void CodeGenModule::Release() {
}
}
+ // HLSL related end of code gen work items.
+ if (LangOpts.HLSL)
+ getHLSLRuntime().finishCodeGen();
+
if (uint32_t PLevel = Context.getLangOpts().PICLevel) {
assert(PLevel < 3 && "Invalid PIC Level");
getModule().setPICLevel(static_cast<llvm::PICLevel::Level>(PLevel));
@@ -820,7 +864,7 @@ void CodeGenModule::Release() {
if (CodeGenOpts.NoPLT)
getModule().setRtLibUseGOT();
if (CodeGenOpts.UnwindTables)
- getModule().setUwtable();
+ getModule().setUwtable(llvm::UWTableKind(CodeGenOpts.UnwindTables));
switch (CodeGenOpts.getFramePointer()) {
case CodeGenOptions::FramePointerKind::None:
@@ -868,6 +912,9 @@ void CodeGenModule::Release() {
EmitBackendOptionsMetadata(getCodeGenOpts());
+ // If there is device offloading code embed it in the host now.
+ EmbedObject(&getModule(), CodeGenOpts, getDiags());
+
// Set visibility from DLL storage class
// We do this at the end of LLVM IR generation; after any operation
// that might affect the DLL storage class or the visibility, and
@@ -1167,7 +1214,9 @@ void CodeGenModule::setDLLImportDLLExport(llvm::GlobalValue *GV,
if (D && D->isExternallyVisible()) {
if (D->hasAttr<DLLImportAttr>())
GV->setDLLStorageClass(llvm::GlobalVariable::DLLImportStorageClass);
- else if (D->hasAttr<DLLExportAttr>() && !GV->isDeclarationForLinker())
+ else if ((D->hasAttr<DLLExportAttr>() ||
+ shouldMapVisibilityToDLLExport(D)) &&
+ !GV->isDeclarationForLinker())
GV->setDLLStorageClass(llvm::GlobalVariable::DLLExportStorageClass);
}
}
@@ -1364,10 +1413,11 @@ static std::string getMangledNameImpl(CodeGenModule &CGM, GlobalDecl GD,
}
// Make unique name for device side static file-scope variable for HIP.
- if (CGM.getContext().shouldExternalizeStaticVar(ND) &&
+ if (CGM.getContext().shouldExternalize(ND) &&
CGM.getLangOpts().GPURelocatableDeviceCode &&
- CGM.getLangOpts().CUDAIsDevice && !CGM.getLangOpts().CUID.empty())
+ CGM.getLangOpts().CUDAIsDevice)
CGM.printPostfixForExternalizedDecl(Out, ND);
+
return std::string(Out.str());
}
@@ -1434,8 +1484,7 @@ StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
// static device variable depends on whether the variable is referenced by
// a host or device host function. Therefore the mangled name cannot be
// cached.
- if (!LangOpts.CUDAIsDevice ||
- !getContext().mayExternalizeStaticVar(GD.getDecl())) {
+ if (!LangOpts.CUDAIsDevice || !getContext().mayExternalize(GD.getDecl())) {
auto FoundName = MangledDeclNames.find(CanonicalGD);
if (FoundName != MangledDeclNames.end())
return FoundName->second;
@@ -1455,7 +1504,7 @@ StringRef CodeGenModule::getMangledName(GlobalDecl GD) {
// directly between host- and device-compilations, the host- and
// device-mangling in host compilation could help catching certain ones.
assert(!isa<FunctionDecl>(ND) || !ND->hasAttr<CUDAGlobalAttr>() ||
- getContext().shouldExternalizeStaticVar(ND) || getLangOpts().CUDAIsDevice ||
+ getContext().shouldExternalize(ND) || getLangOpts().CUDAIsDevice ||
(getContext().getAuxTargetInfo() &&
(getContext().getAuxTargetInfo()->getCXXABI() !=
getContext().getTargetInfo().getCXXABI())) ||
@@ -1490,6 +1539,16 @@ StringRef CodeGenModule::getBlockMangledName(GlobalDecl GD,
return Result.first->first();
}
+const GlobalDecl CodeGenModule::getMangledNameDecl(StringRef Name) {
+ auto it = MangledDeclNames.begin();
+ while (it != MangledDeclNames.end()) {
+ if (it->second == Name)
+ return it->first;
+ it++;
+ }
+ return GlobalDecl();
+}
+
llvm::GlobalValue *CodeGenModule::GetGlobalValue(StringRef Name) {
return getModule().getNamedValue(Name);
}
@@ -1638,7 +1697,7 @@ static unsigned ArgInfoAddressSpace(LangAS AS) {
}
}
-void CodeGenModule::GenOpenCLArgMetadata(llvm::Function *Fn,
+void CodeGenModule::GenKernelArgMetadata(llvm::Function *Fn,
const FunctionDecl *FD,
CodeGenFunction *CGF) {
assert(((FD && CGF) || (!FD && !CGF)) &&
@@ -1670,6 +1729,11 @@ void CodeGenModule::GenOpenCLArgMetadata(llvm::Function *Fn,
if (FD && CGF)
for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) {
const ParmVarDecl *parm = FD->getParamDecl(i);
+ // Get argument name.
+ argNames.push_back(llvm::MDString::get(VMContext, parm->getName()));
+
+ if (!getLangOpts().OpenCL)
+ continue;
QualType ty = parm->getType();
std::string typeQuals;
@@ -1688,9 +1752,6 @@ void CodeGenModule::GenOpenCLArgMetadata(llvm::Function *Fn,
} else
accessQuals.push_back(llvm::MDString::get(VMContext, "none"));
- // Get argument name.
- argNames.push_back(llvm::MDString::get(VMContext, parm->getName()));
-
auto getTypeSpelling = [&](QualType Ty) {
auto typeName = Ty.getUnqualifiedType().getAsString(Policy);
@@ -1763,17 +1824,20 @@ void CodeGenModule::GenOpenCLArgMetadata(llvm::Function *Fn,
argTypeQuals.push_back(llvm::MDString::get(VMContext, typeQuals));
}
- Fn->setMetadata("kernel_arg_addr_space",
- llvm::MDNode::get(VMContext, addressQuals));
- Fn->setMetadata("kernel_arg_access_qual",
- llvm::MDNode::get(VMContext, accessQuals));
- Fn->setMetadata("kernel_arg_type",
- llvm::MDNode::get(VMContext, argTypeNames));
- Fn->setMetadata("kernel_arg_base_type",
- llvm::MDNode::get(VMContext, argBaseTypeNames));
- Fn->setMetadata("kernel_arg_type_qual",
- llvm::MDNode::get(VMContext, argTypeQuals));
- if (getCodeGenOpts().EmitOpenCLArgMetadata)
+ if (getLangOpts().OpenCL) {
+ Fn->setMetadata("kernel_arg_addr_space",
+ llvm::MDNode::get(VMContext, addressQuals));
+ Fn->setMetadata("kernel_arg_access_qual",
+ llvm::MDNode::get(VMContext, accessQuals));
+ Fn->setMetadata("kernel_arg_type",
+ llvm::MDNode::get(VMContext, argTypeNames));
+ Fn->setMetadata("kernel_arg_base_type",
+ llvm::MDNode::get(VMContext, argBaseTypeNames));
+ Fn->setMetadata("kernel_arg_type_qual",
+ llvm::MDNode::get(VMContext, argTypeQuals));
+ }
+ if (getCodeGenOpts().EmitOpenCLArgMetadata ||
+ getCodeGenOpts().HIPSaveKernelArgName)
Fn->setMetadata("kernel_arg_name",
llvm::MDNode::get(VMContext, argNames));
}
@@ -1826,12 +1890,28 @@ CodeGenModule::getMostBaseClasses(const CXXRecordDecl *RD) {
return MostBases.takeVector();
}
+llvm::GlobalVariable *
+CodeGenModule::GetOrCreateRTTIProxyGlobalVariable(llvm::Constant *Addr) {
+ auto It = RTTIProxyMap.find(Addr);
+ if (It != RTTIProxyMap.end())
+ return It->second;
+
+ auto *FTRTTIProxy = new llvm::GlobalVariable(
+ TheModule, Addr->getType(),
+ /*isConstant=*/true, llvm::GlobalValue::PrivateLinkage, Addr,
+ "__llvm_rtti_proxy");
+ FTRTTIProxy->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
+
+ RTTIProxyMap[Addr] = FTRTTIProxy;
+ return FTRTTIProxy;
+}
+
void CodeGenModule::SetLLVMFunctionAttributesForDefinition(const Decl *D,
llvm::Function *F) {
llvm::AttrBuilder B(F->getContext());
if (CodeGenOpts.UnwindTables)
- B.addAttribute(llvm::Attribute::UWTable);
+ B.addUWTableAttr(llvm::UWTableKind(CodeGenOpts.UnwindTables));
if (CodeGenOpts.StackClashProtector)
B.addAttribute("probe-stack", "inline-asm");
@@ -2052,6 +2132,13 @@ bool CodeGenModule::GetCPUAndFeaturesAttributes(GlobalDecl GD,
getTarget().isValidCPUName(ParsedAttr.Tune))
TuneCPU = ParsedAttr.Tune;
}
+
+ if (SD) {
+ // Apply the given CPU name as the 'tune-cpu' so that the optimizer can
+ // favor this processor.
+ TuneCPU = getTarget().getCPUSpecificTuneName(
+ SD->getCPUName(GD.getMultiVersionIndex())->getName());
+ }
} else {
// Otherwise just add the existing target cpu and target features to the
// function.
@@ -2489,8 +2576,8 @@ void CodeGenModule::EmitDeferred() {
// Note we should not clear CUDADeviceVarODRUsedByHost since it is still
// needed for further handling.
if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice)
- for (const auto *V : getContext().CUDADeviceVarODRUsedByHost)
- DeferredDeclsToEmit.push_back(V);
+ llvm::append_range(DeferredDeclsToEmit,
+ getContext().CUDADeviceVarODRUsedByHost);
// Stop if we're out of both deferred vtables and deferred declarations.
if (DeferredDeclsToEmit.empty())
@@ -2694,21 +2781,14 @@ bool CodeGenModule::isInNoSanitizeList(SanitizerMask Kind, llvm::Function *Fn,
return false;
}
-bool CodeGenModule::isInNoSanitizeList(llvm::GlobalVariable *GV,
+bool CodeGenModule::isInNoSanitizeList(SanitizerMask Kind,
+ llvm::GlobalVariable *GV,
SourceLocation Loc, QualType Ty,
StringRef Category) const {
- // For now globals can be ignored only in ASan and KASan.
- const SanitizerMask EnabledAsanMask =
- LangOpts.Sanitize.Mask &
- (SanitizerKind::Address | SanitizerKind::KernelAddress |
- SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress |
- SanitizerKind::MemTag);
- if (!EnabledAsanMask)
- return false;
const auto &NoSanitizeL = getContext().getNoSanitizeList();
- if (NoSanitizeL.containsGlobal(EnabledAsanMask, GV->getName(), Category))
+ if (NoSanitizeL.containsGlobal(Kind, GV->getName(), Category))
return true;
- if (NoSanitizeL.containsLocation(EnabledAsanMask, Loc, Category))
+ if (NoSanitizeL.containsLocation(Kind, Loc, Category))
return true;
// Check global type.
if (!Ty.isNull()) {
@@ -2720,7 +2800,7 @@ bool CodeGenModule::isInNoSanitizeList(llvm::GlobalVariable *GV,
// Only record types (classes, structs etc.) are ignored.
if (Ty->isRecordType()) {
std::string TypeStr = Ty.getAsString(getContext().getPrintingPolicy());
- if (NoSanitizeL.containsType(EnabledAsanMask, TypeStr, Category))
+ if (NoSanitizeL.containsType(Kind, TypeStr, Category))
return true;
}
}
@@ -2762,12 +2842,12 @@ bool CodeGenModule::isProfileInstrExcluded(llvm::Function *Fn,
CodeGenOptions::ProfileInstrKind Kind = getCodeGenOpts().getProfileInstr();
// First, check the function name.
Optional<bool> V = ProfileList.isFunctionExcluded(Fn->getName(), Kind);
- if (V.hasValue())
+ if (V)
return *V;
// Next, check the source location.
if (Loc.isValid()) {
Optional<bool> V = ProfileList.isLocationExcluded(Loc, Kind);
- if (V.hasValue())
+ if (V)
return *V;
}
// If location is unknown, this may be a compiler-generated function. Assume
@@ -2775,7 +2855,7 @@ bool CodeGenModule::isProfileInstrExcluded(llvm::Function *Fn,
auto &SM = Context.getSourceManager();
if (const auto *MainFile = SM.getFileEntryForID(SM.getMainFileID())) {
Optional<bool> V = ProfileList.isFileExcluded(MainFile->getName(), Kind);
- if (V.hasValue())
+ if (V)
return *V;
}
return ProfileList.getDefault();
@@ -2886,6 +2966,37 @@ ConstantAddress CodeGenModule::GetAddrOfMSGuidDecl(const MSGuidDecl *GD) {
return ConstantAddress(Addr, Ty, Alignment);
}
+ConstantAddress CodeGenModule::GetAddrOfUnnamedGlobalConstantDecl(
+ const UnnamedGlobalConstantDecl *GCD) {
+ CharUnits Alignment = getContext().getTypeAlignInChars(GCD->getType());
+
+ llvm::GlobalVariable **Entry = nullptr;
+ Entry = &UnnamedGlobalConstantDeclMap[GCD];
+ if (*Entry)
+ return ConstantAddress(*Entry, (*Entry)->getValueType(), Alignment);
+
+ ConstantEmitter Emitter(*this);
+ llvm::Constant *Init;
+
+ const APValue &V = GCD->getValue();
+
+ assert(!V.isAbsent());
+ Init = Emitter.emitForInitializer(V, GCD->getType().getAddressSpace(),
+ GCD->getType());
+
+ auto *GV = new llvm::GlobalVariable(getModule(), Init->getType(),
+ /*isConstant=*/true,
+ llvm::GlobalValue::PrivateLinkage, Init,
+ ".constant");
+ GV->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
+ GV->setAlignment(Alignment.getAsAlign());
+
+ Emitter.finalize(GV);
+
+ *Entry = GV;
+ return ConstantAddress(GV, GV->getValueType(), Alignment);
+}
+
ConstantAddress CodeGenModule::GetAddrOfTemplateParamObject(
const TemplateParamObjectDecl *TPO) {
StringRef Name = getMangledName(TPO);
@@ -3270,13 +3381,13 @@ void CodeGenModule::EmitMultiVersionFunctionDefinition(GlobalDecl GD,
auto *Spec = FD->getAttr<CPUSpecificAttr>();
for (unsigned I = 0; I < Spec->cpus_size(); ++I)
EmitGlobalFunctionDefinition(GD.getWithMultiVersionIndex(I), nullptr);
- // Requires multiple emits.
} else if (FD->isTargetClonesMultiVersion()) {
auto *Clone = FD->getAttr<TargetClonesAttr>();
for (unsigned I = 0; I < Clone->featuresStrs_size(); ++I)
if (Clone->isFirstOfVersion(I))
EmitGlobalFunctionDefinition(GD.getWithMultiVersionIndex(I), nullptr);
- EmitTargetClonesResolver(GD);
+ // Ensure that the resolver function is also emitted.
+ GetOrCreateMultiVersionResolver(GD);
} else
EmitGlobalFunctionDefinition(GD, GV);
}
@@ -3358,112 +3469,94 @@ llvm::GlobalValue::LinkageTypes getMultiversionLinkage(CodeGenModule &CGM,
return llvm::GlobalValue::WeakODRLinkage;
}
-void CodeGenModule::EmitTargetClonesResolver(GlobalDecl GD) {
- const auto *FD = cast<FunctionDecl>(GD.getDecl());
- assert(FD && "Not a FunctionDecl?");
- const auto *TC = FD->getAttr<TargetClonesAttr>();
- assert(TC && "Not a target_clones Function?");
-
- QualType CanonTy = Context.getCanonicalType(FD->getType());
- llvm::Type *DeclTy = getTypes().ConvertType(CanonTy);
-
- if (const auto *CXXFD = dyn_cast<CXXMethodDecl>(FD)) {
- const CGFunctionInfo &FInfo = getTypes().arrangeCXXMethodDeclaration(CXXFD);
- DeclTy = getTypes().GetFunctionType(FInfo);
- }
-
- llvm::Function *ResolverFunc;
- if (getTarget().supportsIFunc()) {
- auto *IFunc = cast<llvm::GlobalIFunc>(
- GetOrCreateMultiVersionResolver(GD, DeclTy, FD));
- ResolverFunc = cast<llvm::Function>(IFunc->getResolver());
- } else
- ResolverFunc =
- cast<llvm::Function>(GetOrCreateMultiVersionResolver(GD, DeclTy, FD));
-
- SmallVector<CodeGenFunction::MultiVersionResolverOption, 10> Options;
- for (unsigned VersionIndex = 0; VersionIndex < TC->featuresStrs_size();
- ++VersionIndex) {
- if (!TC->isFirstOfVersion(VersionIndex))
- continue;
- StringRef Version = TC->getFeatureStr(VersionIndex);
- StringRef MangledName =
- getMangledName(GD.getWithMultiVersionIndex(VersionIndex));
- llvm::Constant *Func = GetGlobalValue(MangledName);
- assert(Func &&
- "Should have already been created before calling resolver emit");
-
- StringRef Architecture;
- llvm::SmallVector<StringRef, 1> Feature;
-
- if (Version.startswith("arch="))
- Architecture = Version.drop_front(sizeof("arch=") - 1);
- else if (Version != "default")
- Feature.push_back(Version);
-
- Options.emplace_back(cast<llvm::Function>(Func), Architecture, Feature);
- }
-
- const TargetInfo &TI = getTarget();
- std::stable_sort(
- Options.begin(), Options.end(),
- [&TI](const CodeGenFunction::MultiVersionResolverOption &LHS,
- const CodeGenFunction::MultiVersionResolverOption &RHS) {
- return TargetMVPriority(TI, LHS) > TargetMVPriority(TI, RHS);
- });
- CodeGenFunction CGF(*this);
- CGF.EmitMultiVersionResolver(ResolverFunc, Options);
-}
-
void CodeGenModule::emitMultiVersionFunctions() {
std::vector<GlobalDecl> MVFuncsToEmit;
MultiVersionFuncs.swap(MVFuncsToEmit);
for (GlobalDecl GD : MVFuncsToEmit) {
+ const auto *FD = cast<FunctionDecl>(GD.getDecl());
+ assert(FD && "Expected a FunctionDecl");
+
SmallVector<CodeGenFunction::MultiVersionResolverOption, 10> Options;
- const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
- getContext().forEachMultiversionedFunctionVersion(
- FD, [this, &GD, &Options](const FunctionDecl *CurFD) {
- GlobalDecl CurGD{
- (CurFD->isDefined() ? CurFD->getDefinition() : CurFD)};
- StringRef MangledName = getMangledName(CurGD);
- llvm::Constant *Func = GetGlobalValue(MangledName);
- if (!Func) {
- if (CurFD->isDefined()) {
- EmitGlobalFunctionDefinition(CurGD, nullptr);
- Func = GetGlobalValue(MangledName);
- } else {
- const CGFunctionInfo &FI =
- getTypes().arrangeGlobalDeclaration(GD);
- llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
- Func = GetAddrOfFunction(CurGD, Ty, /*ForVTable=*/false,
- /*DontDefer=*/false, ForDefinition);
+ if (FD->isTargetMultiVersion()) {
+ getContext().forEachMultiversionedFunctionVersion(
+ FD, [this, &GD, &Options](const FunctionDecl *CurFD) {
+ GlobalDecl CurGD{
+ (CurFD->isDefined() ? CurFD->getDefinition() : CurFD)};
+ StringRef MangledName = getMangledName(CurGD);
+ llvm::Constant *Func = GetGlobalValue(MangledName);
+ if (!Func) {
+ if (CurFD->isDefined()) {
+ EmitGlobalFunctionDefinition(CurGD, nullptr);
+ Func = GetGlobalValue(MangledName);
+ } else {
+ const CGFunctionInfo &FI =
+ getTypes().arrangeGlobalDeclaration(GD);
+ llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
+ Func = GetAddrOfFunction(CurGD, Ty, /*ForVTable=*/false,
+ /*DontDefer=*/false, ForDefinition);
+ }
+ assert(Func && "This should have just been created");
}
- assert(Func && "This should have just been created");
- }
- const auto *TA = CurFD->getAttr<TargetAttr>();
- llvm::SmallVector<StringRef, 8> Feats;
- TA->getAddedFeatures(Feats);
+ const auto *TA = CurFD->getAttr<TargetAttr>();
+ llvm::SmallVector<StringRef, 8> Feats;
+ TA->getAddedFeatures(Feats);
+
+ Options.emplace_back(cast<llvm::Function>(Func),
+ TA->getArchitecture(), Feats);
+ });
+ } else if (FD->isTargetClonesMultiVersion()) {
+ const auto *TC = FD->getAttr<TargetClonesAttr>();
+ for (unsigned VersionIndex = 0; VersionIndex < TC->featuresStrs_size();
+ ++VersionIndex) {
+ if (!TC->isFirstOfVersion(VersionIndex))
+ continue;
+ GlobalDecl CurGD{(FD->isDefined() ? FD->getDefinition() : FD),
+ VersionIndex};
+ StringRef Version = TC->getFeatureStr(VersionIndex);
+ StringRef MangledName = getMangledName(CurGD);
+ llvm::Constant *Func = GetGlobalValue(MangledName);
+ if (!Func) {
+ if (FD->isDefined()) {
+ EmitGlobalFunctionDefinition(CurGD, nullptr);
+ Func = GetGlobalValue(MangledName);
+ } else {
+ const CGFunctionInfo &FI =
+ getTypes().arrangeGlobalDeclaration(CurGD);
+ llvm::FunctionType *Ty = getTypes().GetFunctionType(FI);
+ Func = GetAddrOfFunction(CurGD, Ty, /*ForVTable=*/false,
+ /*DontDefer=*/false, ForDefinition);
+ }
+ assert(Func && "This should have just been created");
+ }
- Options.emplace_back(cast<llvm::Function>(Func),
- TA->getArchitecture(), Feats);
- });
+ StringRef Architecture;
+ llvm::SmallVector<StringRef, 1> Feature;
- llvm::Function *ResolverFunc;
- const TargetInfo &TI = getTarget();
+ if (Version.startswith("arch="))
+ Architecture = Version.drop_front(sizeof("arch=") - 1);
+ else if (Version != "default")
+ Feature.push_back(Version);
- if (TI.supportsIFunc() || FD->isTargetMultiVersion()) {
- ResolverFunc = cast<llvm::Function>(
- GetGlobalValue((getMangledName(GD) + ".resolver").str()));
- ResolverFunc->setLinkage(getMultiversionLinkage(*this, GD));
+ Options.emplace_back(cast<llvm::Function>(Func), Architecture, Feature);
+ }
} else {
- ResolverFunc = cast<llvm::Function>(GetGlobalValue(getMangledName(GD)));
+ assert(0 && "Expected a target or target_clones multiversion function");
+ continue;
}
+ llvm::Constant *ResolverConstant = GetOrCreateMultiVersionResolver(GD);
+ if (auto *IFunc = dyn_cast<llvm::GlobalIFunc>(ResolverConstant))
+ ResolverConstant = IFunc->getResolver();
+ llvm::Function *ResolverFunc = cast<llvm::Function>(ResolverConstant);
+
+ ResolverFunc->setLinkage(getMultiversionLinkage(*this, GD));
+
if (supportsCOMDAT())
ResolverFunc->setComdat(
getModule().getOrInsertComdat(ResolverFunc->getName()));
+ const TargetInfo &TI = getTarget();
llvm::stable_sort(
Options, [&TI](const CodeGenFunction::MultiVersionResolverOption &LHS,
const CodeGenFunction::MultiVersionResolverOption &RHS) {
@@ -3491,12 +3584,9 @@ void CodeGenModule::emitCPUDispatchDefinition(GlobalDecl GD) {
assert(FD->isCPUDispatchMultiVersion() && "Not a multiversion function?");
const auto *DD = FD->getAttr<CPUDispatchAttr>();
assert(DD && "Not a cpu_dispatch Function?");
- llvm::Type *DeclTy = getTypes().ConvertType(FD->getType());
- if (const auto *CXXFD = dyn_cast<CXXMethodDecl>(FD)) {
- const CGFunctionInfo &FInfo = getTypes().arrangeCXXMethodDeclaration(CXXFD);
- DeclTy = getTypes().GetFunctionType(FInfo);
- }
+ const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
+ llvm::FunctionType *DeclTy = getTypes().GetFunctionType(FI);
StringRef ResolverName = getMangledName(GD);
UpdateMultiVersionNames(GD, FD, ResolverName);
@@ -3585,16 +3675,27 @@ void CodeGenModule::emitCPUDispatchDefinition(GlobalDecl GD) {
CGF.EmitMultiVersionResolver(ResolverFunc, Options);
if (getTarget().supportsIFunc()) {
+ llvm::GlobalValue::LinkageTypes Linkage = getMultiversionLinkage(*this, GD);
+ auto *IFunc = cast<llvm::GlobalValue>(GetOrCreateMultiVersionResolver(GD));
+
+ // Fix up function declarations that were created for cpu_specific before
+ // cpu_dispatch was known
+ if (!isa<llvm::GlobalIFunc>(IFunc)) {
+ assert(cast<llvm::Function>(IFunc)->isDeclaration());
+ auto *GI = llvm::GlobalIFunc::create(DeclTy, 0, Linkage, "", ResolverFunc,
+ &getModule());
+ GI->takeName(IFunc);
+ IFunc->replaceAllUsesWith(GI);
+ IFunc->eraseFromParent();
+ IFunc = GI;
+ }
+
std::string AliasName = getMangledNameImpl(
*this, GD, FD, /*OmitMultiVersionMangling=*/true);
llvm::Constant *AliasFunc = GetGlobalValue(AliasName);
if (!AliasFunc) {
- auto *IFunc = cast<llvm::GlobalIFunc>(GetOrCreateLLVMFunction(
- AliasName, DeclTy, GD, /*ForVTable=*/false, /*DontDefer=*/true,
- /*IsThunk=*/false, llvm::AttributeList(), NotForDefinition));
- auto *GA = llvm::GlobalAlias::create(DeclTy, 0,
- getMultiversionLinkage(*this, GD),
- AliasName, IFunc, &getModule());
+ auto *GA = llvm::GlobalAlias::create(DeclTy, 0, Linkage, AliasName, IFunc,
+ &getModule());
SetCommonAttributes(GD, GA);
}
}
@@ -3602,8 +3703,10 @@ void CodeGenModule::emitCPUDispatchDefinition(GlobalDecl GD) {
/// If a dispatcher for the specified mangled name is not in the module, create
/// and return an llvm Function with the specified type.
-llvm::Constant *CodeGenModule::GetOrCreateMultiVersionResolver(
- GlobalDecl GD, llvm::Type *DeclTy, const FunctionDecl *FD) {
+llvm::Constant *CodeGenModule::GetOrCreateMultiVersionResolver(GlobalDecl GD) {
+ const auto *FD = cast<FunctionDecl>(GD.getDecl());
+ assert(FD && "Not a FunctionDecl?");
+
std::string MangledName =
getMangledNameImpl(*this, GD, FD, /*OmitMultiVersionMangling=*/true);
@@ -3615,34 +3718,21 @@ llvm::Constant *CodeGenModule::GetOrCreateMultiVersionResolver(
else if (FD->isTargetMultiVersion())
ResolverName += ".resolver";
- // If this already exists, just return that one.
+ // If the resolver has already been created, just return it.
if (llvm::GlobalValue *ResolverGV = GetGlobalValue(ResolverName))
return ResolverGV;
- // Since this is the first time we've created this IFunc, make sure
- // that we put this multiversioned function into the list to be
- // replaced later if necessary (target multiversioning only).
- if (FD->isTargetMultiVersion())
+ const CGFunctionInfo &FI = getTypes().arrangeGlobalDeclaration(GD);
+ llvm::FunctionType *DeclTy = getTypes().GetFunctionType(FI);
+
+ // The resolver needs to be created. For target and target_clones, defer
+ // creation until the end of the TU.
+ if (FD->isTargetMultiVersion() || FD->isTargetClonesMultiVersion())
MultiVersionFuncs.push_back(GD);
- else if (FD->isTargetClonesMultiVersion()) {
- // In target_clones multiversioning, make sure we emit this if used.
- auto DDI =
- DeferredDecls.find(getMangledName(GD.getWithMultiVersionIndex(0)));
- if (DDI != DeferredDecls.end()) {
- addDeferredDeclToEmit(GD);
- DeferredDecls.erase(DDI);
- } else {
- // Emit the symbol of the 1st variant, so that the deferred decls know we
- // need it, otherwise the only global value will be the resolver/ifunc,
- // which end up getting broken if we search for them with GetGlobalValue'.
- GetOrCreateLLVMFunction(
- getMangledName(GD.getWithMultiVersionIndex(0)), DeclTy, FD,
- /*ForVTable=*/false, /*DontDefer=*/true,
- /*IsThunk=*/false, llvm::AttributeList(), ForDefinition);
- }
- }
- if (getTarget().supportsIFunc()) {
+ // For cpu_specific, don't create an ifunc yet because we don't know if the
+ // cpu_dispatch will be emitted in this translation unit.
+ if (getTarget().supportsIFunc() && !FD->isCPUSpecificMultiVersion()) {
llvm::Type *ResolverType = llvm::FunctionType::get(
llvm::PointerType::get(
DeclTy, getContext().getTargetAddressSpace(FD->getType())),
@@ -3700,9 +3790,9 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
}
if (FD->isMultiVersion()) {
- UpdateMultiVersionNames(GD, FD, MangledName);
+ UpdateMultiVersionNames(GD, FD, MangledName);
if (!IsForDefinition)
- return GetOrCreateMultiVersionResolver(GD, Ty, FD);
+ return GetOrCreateMultiVersionResolver(GD);
}
}
@@ -3716,7 +3806,8 @@ llvm::Constant *CodeGenModule::GetOrCreateLLVMFunction(
}
// Handle dropped DLL attributes.
- if (D && !D->hasAttr<DLLImportAttr>() && !D->hasAttr<DLLExportAttr>()) {
+ if (D && !D->hasAttr<DLLImportAttr>() && !D->hasAttr<DLLExportAttr>() &&
+ !shouldMapVisibilityToDLLExport(cast_or_null<NamedDecl>(D))) {
Entry->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
setDSOLocal(Entry);
}
@@ -4028,7 +4119,8 @@ CodeGenModule::GetOrCreateLLVMGlobal(StringRef MangledName, llvm::Type *Ty,
}
// Handle dropped DLL attributes.
- if (D && !D->hasAttr<DLLImportAttr>() && !D->hasAttr<DLLExportAttr>())
+ if (D && !D->hasAttr<DLLImportAttr>() && !D->hasAttr<DLLExportAttr>() &&
+ !shouldMapVisibilityToDLLExport(D))
Entry->setDLLStorageClass(llvm::GlobalValue::DefaultStorageClass);
if (LangOpts.OpenMP && !LangOpts.OpenMPSimd && D)
@@ -4390,7 +4482,7 @@ LangAS CodeGenModule::GetGlobalConstantAddressSpace() const {
// casted to Generic pointers which are used to model HIP's "flat" pointers.
return LangAS::cuda_device;
if (auto AS = getTarget().getConstantAddressSpace())
- return AS.getValue();
+ return *AS;
return LangAS::Default;
}
@@ -4550,6 +4642,8 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
T = D->getType();
if (getLangOpts().CPlusPlus) {
+ if (InitDecl->hasFlexibleArrayInit(getContext()))
+ ErrorUnsupported(D, "flexible array initializer");
Init = EmitNullConstant(T);
NeedsGlobalCtor = true;
} else {
@@ -4563,6 +4657,14 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
// also don't need to register a destructor.
if (getLangOpts().CPlusPlus && !NeedsGlobalDtor)
DelayedCXXInitPosition.erase(D);
+
+#ifndef NDEBUG
+ CharUnits VarSize = getContext().getTypeSizeInChars(ASTTy) +
+ InitDecl->getFlexibleArrayInitChars(getContext());
+ CharUnits CstSize = CharUnits::fromQuantity(
+ getDataLayout().getTypeAllocSize(Init->getType()));
+ assert(VarSize == CstSize && "Emitted constant has unexpected size");
+#endif
}
}
@@ -4651,7 +4753,12 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
GV->setConstant(true);
}
- GV->setAlignment(getContext().getDeclAlign(D).getAsAlign());
+ CharUnits AlignVal = getContext().getDeclAlign(D);
+ // Check for alignment specifed in an 'omp allocate' directive.
+ if (llvm::Optional<CharUnits> AlignValFromAllocate =
+ getOMPAllocateAlignment(D))
+ AlignVal = *AlignValFromAllocate;
+ GV->setAlignment(AlignVal.getAsAlign());
// On Darwin, unlike other Itanium C++ ABI platforms, the thread-wrapper
// function is only defined alongside the variable, not also alongside
@@ -4706,7 +4813,7 @@ void CodeGenModule::EmitGlobalVarDefinition(const VarDecl *D,
if (NeedsGlobalCtor || NeedsGlobalDtor)
EmitCXXGlobalVarDeclInitFunc(D, GV, NeedsGlobalCtor);
- SanitizerMD->reportGlobalToASan(GV, *D, NeedsGlobalCtor);
+ SanitizerMD->reportGlobal(GV, *D, NeedsGlobalCtor);
// Emit global variable debug information.
if (CGDebugInfo *DI = getModuleDebugInfo())
@@ -4807,12 +4914,8 @@ llvm::GlobalValue::LinkageTypes CodeGenModule::getLLVMLinkageForDeclarator(
if (Linkage == GVA_Internal)
return llvm::Function::InternalLinkage;
- if (D->hasAttr<WeakAttr>()) {
- if (IsConstantVariable)
- return llvm::GlobalVariable::WeakODRLinkage;
- else
- return llvm::GlobalVariable::WeakAnyLinkage;
- }
+ if (D->hasAttr<WeakAttr>())
+ return llvm::GlobalVariable::WeakAnyLinkage;
if (const auto *FD = D->getAsFunction())
if (FD->isMultiVersion() && Linkage == GVA_AvailableExternally)
@@ -5142,6 +5245,11 @@ void CodeGenModule::EmitAliasDefinition(GlobalDecl GD) {
setTLSMode(GA, *VD);
SetCommonAttributes(GD, GA);
+
+ // Emit global alias debug information.
+ if (isa<VarDecl>(D))
+ if (CGDebugInfo *DI = getModuleDebugInfo())
+ DI->EmitGlobalAlias(cast<llvm::GlobalValue>(GA->getAliasee()), GD);
}
void CodeGenModule::emitIFuncDefinition(GlobalDecl GD) {
@@ -5343,7 +5451,7 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
auto Fields = Builder.beginStruct(STy);
// Class pointer.
- Fields.add(cast<llvm::ConstantExpr>(CFConstantStringClassRef));
+ Fields.add(cast<llvm::Constant>(CFConstantStringClassRef));
// Flags.
if (IsSwiftABI) {
@@ -5422,10 +5530,11 @@ CodeGenModule::GetAddrOfConstantCFString(const StringLiteral *Literal) {
switch (Triple.getObjectFormat()) {
case llvm::Triple::UnknownObjectFormat:
llvm_unreachable("unknown file format");
+ case llvm::Triple::DXContainer:
case llvm::Triple::GOFF:
- llvm_unreachable("GOFF is not yet implemented");
+ case llvm::Triple::SPIRV:
case llvm::Triple::XCOFF:
- llvm_unreachable("XCOFF is not yet implemented");
+ llvm_unreachable("unimplemented");
case llvm::Triple::COFF:
case llvm::Triple::ELF:
case llvm::Triple::Wasm:
@@ -5578,11 +5687,15 @@ CodeGenModule::GetAddrOfConstantStringFromLiteral(const StringLiteral *S,
}
auto GV = GenerateStringLiteral(C, LT, *this, GlobalVariableName, Alignment);
+
+ CGDebugInfo *DI = getModuleDebugInfo();
+ if (DI && getCodeGenOpts().hasReducedDebugInfo())
+ DI->AddStringLiteralDebugInfo(GV, S);
+
if (Entry)
*Entry = GV;
- SanitizerMD->reportGlobalToASan(GV, S->getStrTokenLoc(0), "<string literal>",
- QualType());
+ SanitizerMD->reportGlobal(GV, S->getStrTokenLoc(0), "<string literal>");
return ConstantAddress(castStringLiteralToDefaultAddressSpace(*this, GV),
GV->getValueType(), Alignment);
@@ -5662,9 +5775,11 @@ ConstantAddress CodeGenModule::GetAddrOfGlobalTemporary(
getModule(), Type, false, llvm::GlobalVariable::InternalLinkage,
nullptr);
}
- return ConstantAddress(
- InsertResult.first->second,
- InsertResult.first->second->getType()->getPointerElementType(), Align);
+ return ConstantAddress(InsertResult.first->second,
+ llvm::cast<llvm::GlobalVariable>(
+ InsertResult.first->second->stripPointerCasts())
+ ->getValueType(),
+ Align);
}
// FIXME: If an externally-visible declaration extends multiple temporaries,
@@ -5733,6 +5848,9 @@ ConstantAddress CodeGenModule::GetAddrOfGlobalTemporary(
/*InsertBefore=*/nullptr, llvm::GlobalVariable::NotThreadLocal, TargetAS);
if (emitter) emitter->finalize(GV);
setGVProperties(GV, VD);
+ if (GV->getDLLStorageClass() == llvm::GlobalVariable::DLLExportStorageClass)
+ // The reference temporary should never be dllexport.
+ GV->setDLLStorageClass(llvm::GlobalVariable::DefaultStorageClass);
GV->setAlignment(Align.getAsAlign());
if (supportsCOMDAT() && GV->isWeakForLinker())
GV->setComdat(TheModule.getOrInsertComdat(GV->getName()));
@@ -6249,8 +6367,10 @@ void CodeGenModule::EmitMainVoidAlias() {
// new-style no-argument main is in used.
if (llvm::Function *F = getModule().getFunction("main")) {
if (!F->isDeclaration() && F->arg_size() == 0 && !F->isVarArg() &&
- F->getReturnType()->isIntegerTy(Context.getTargetInfo().getIntWidth()))
- addUsedGlobal(llvm::GlobalAlias::create("__main_void", F));
+ F->getReturnType()->isIntegerTy(Context.getTargetInfo().getIntWidth())) {
+ auto *GA = llvm::GlobalAlias::create("__main_void", F);
+ GA->setVisibility(llvm::GlobalValue::HiddenVisibility);
+ }
}
}
@@ -6277,6 +6397,72 @@ static void EmitGlobalDeclMetadata(CodeGenModule &CGM,
GlobalMetadata->addOperand(llvm::MDNode::get(CGM.getLLVMContext(), Ops));
}
+bool CodeGenModule::CheckAndReplaceExternCIFuncs(llvm::GlobalValue *Elem,
+ llvm::GlobalValue *CppFunc) {
+ // Store the list of ifuncs we need to replace uses in.
+ llvm::SmallVector<llvm::GlobalIFunc *> IFuncs;
+ // List of ConstantExprs that we should be able to delete when we're done
+ // here.
+ llvm::SmallVector<llvm::ConstantExpr *> CEs;
+
+ // It isn't valid to replace the extern-C ifuncs if all we find is itself!
+ if (Elem == CppFunc)
+ return false;
+
+ // First make sure that all users of this are ifuncs (or ifuncs via a
+ // bitcast), and collect the list of ifuncs and CEs so we can work on them
+ // later.
+ for (llvm::User *User : Elem->users()) {
+ // Users can either be a bitcast ConstExpr that is used by the ifuncs, OR an
+ // ifunc directly. In any other case, just give up, as we don't know what we
+ // could break by changing those.
+ if (auto *ConstExpr = dyn_cast<llvm::ConstantExpr>(User)) {
+ if (ConstExpr->getOpcode() != llvm::Instruction::BitCast)
+ return false;
+
+ for (llvm::User *CEUser : ConstExpr->users()) {
+ if (auto *IFunc = dyn_cast<llvm::GlobalIFunc>(CEUser)) {
+ IFuncs.push_back(IFunc);
+ } else {
+ return false;
+ }
+ }
+ CEs.push_back(ConstExpr);
+ } else if (auto *IFunc = dyn_cast<llvm::GlobalIFunc>(User)) {
+ IFuncs.push_back(IFunc);
+ } else {
+ // This user is one we don't know how to handle, so fail redirection. This
+ // will result in an ifunc retaining a resolver name that will ultimately
+ // fail to be resolved to a defined function.
+ return false;
+ }
+ }
+
+ // Now we know this is a valid case where we can do this alias replacement, we
+ // need to remove all of the references to Elem (and the bitcasts!) so we can
+ // delete it.
+ for (llvm::GlobalIFunc *IFunc : IFuncs)
+ IFunc->setResolver(nullptr);
+ for (llvm::ConstantExpr *ConstExpr : CEs)
+ ConstExpr->destroyConstant();
+
+ // We should now be out of uses for the 'old' version of this function, so we
+ // can erase it as well.
+ Elem->eraseFromParent();
+
+ for (llvm::GlobalIFunc *IFunc : IFuncs) {
+ // The type of the resolver is always just a function-type that returns the
+ // type of the IFunc, so create that here. If the type of the actual
+ // resolver doesn't match, it just gets bitcast to the right thing.
+ auto *ResolverTy =
+ llvm::FunctionType::get(IFunc->getType(), /*isVarArg*/ false);
+ llvm::Constant *Resolver = GetOrCreateLLVMFunction(
+ CppFunc->getName(), ResolverTy, {}, /*ForVTable*/ false);
+ IFunc->setResolver(Resolver);
+ }
+ return true;
+}
+
/// For each function which is declared within an extern "C" region and marked
/// as 'used', but has internal linkage, create an alias from the unmangled
/// name to the mangled name if possible. People expect to be able to refer
@@ -6288,7 +6474,19 @@ void CodeGenModule::EmitStaticExternCAliases() {
for (auto &I : StaticExternCValues) {
IdentifierInfo *Name = I.first;
llvm::GlobalValue *Val = I.second;
- if (Val && !getModule().getNamedValue(Name->getName()))
+
+ // If Val is null, that implies there were multiple declarations that each
+ // had a claim to the unmangled name. In this case, generation of the alias
+ // is suppressed. See CodeGenModule::MaybeHandleStaticInExternC.
+ if (!Val)
+ break;
+
+ llvm::GlobalValue *ExistingElem =
+ getModule().getNamedValue(Name->getName());
+
+ // If there is either not something already by this name, or we were able to
+ // replace all uses from IFuncs, create the alias.
+ if (!ExistingElem || CheckAndReplaceExternCIFuncs(ExistingElem, Val))
addCompilerUsedGlobal(llvm::GlobalAlias::create(Name->getName(), Val));
}
}
@@ -6418,7 +6616,9 @@ void CodeGenModule::EmitOMPThreadPrivateDecl(const OMPThreadPrivateDecl *D) {
!VD->getAnyInitializer()->isConstantInitializer(getContext(),
/*ForRef=*/false);
- Address Addr(GetAddrOfGlobalVar(VD), getContext().getDeclAlign(VD));
+ Address Addr(GetAddrOfGlobalVar(VD),
+ getTypes().ConvertTypeForMem(VD->getType()),
+ getContext().getDeclAlign(VD));
if (auto InitFunction = getOpenMPRuntime().emitThreadPrivateVarDefinition(
VD, Addr, RefExpr->getBeginLoc(), PerformInit))
CXXGlobalInits.push_back(InitFunction);
@@ -6647,12 +6847,38 @@ bool CodeGenModule::stopAutoInit() {
void CodeGenModule::printPostfixForExternalizedDecl(llvm::raw_ostream &OS,
const Decl *D) const {
- StringRef Tag;
// ptxas does not allow '.' in symbol names. On the other hand, HIP prefers
// postfix beginning with '.' since the symbol name can be demangled.
if (LangOpts.HIP)
- Tag = (isa<VarDecl>(D) ? ".static." : ".intern.");
+ OS << (isa<VarDecl>(D) ? ".static." : ".intern.");
else
- Tag = (isa<VarDecl>(D) ? "__static__" : "__intern__");
- OS << Tag << getContext().getCUIDHash();
+ OS << (isa<VarDecl>(D) ? "__static__" : "__intern__");
+
+ // If the CUID is not specified we try to generate a unique postfix.
+ if (getLangOpts().CUID.empty()) {
+ SourceManager &SM = getContext().getSourceManager();
+ PresumedLoc PLoc = SM.getPresumedLoc(D->getLocation());
+ assert(PLoc.isValid() && "Source location is expected to be valid.");
+
+ // Get the hash of the user defined macros.
+ llvm::MD5 Hash;
+ llvm::MD5::MD5Result Result;
+ for (const auto &Arg : PreprocessorOpts.Macros)
+ Hash.update(Arg.first);
+ Hash.final(Result);
+
+ // Get the UniqueID for the file containing the decl.
+ llvm::sys::fs::UniqueID ID;
+ if (auto EC = llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID)) {
+ PLoc = SM.getPresumedLoc(D->getLocation(), /*UseLineDirectives=*/false);
+ assert(PLoc.isValid() && "Source location is expected to be valid.");
+ if (auto EC = llvm::sys::fs::getUniqueID(PLoc.getFilename(), ID))
+ SM.getDiagnostics().Report(diag::err_cannot_open_file)
+ << PLoc.getFilename() << EC.message();
+ }
+ OS << llvm::format("%x", ID.getFile()) << llvm::format("%x", ID.getDevice())
+ << "_" << llvm::utohexstr(Result.low(), /*LowerCase=*/true, /*Width=*/8);
+ } else {
+ OS << getContext().getCUIDHash();
+ }
}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h b/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h
index a8a63c8da57f..da43b9616c88 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenModule.h
@@ -85,6 +85,7 @@ class CGObjCRuntime;
class CGOpenCLRuntime;
class CGOpenMPRuntime;
class CGCUDARuntime;
+class CGHLSLRuntime;
class CoverageMappingModuleGen;
class TargetCodeGenInfo;
@@ -319,6 +320,7 @@ private:
std::unique_ptr<CGOpenCLRuntime> OpenCLRuntime;
std::unique_ptr<CGOpenMPRuntime> OpenMPRuntime;
std::unique_ptr<CGCUDARuntime> CUDARuntime;
+ std::unique_ptr<CGHLSLRuntime> HLSLRuntime;
std::unique_ptr<CGDebugInfo> DebugInfo;
std::unique_ptr<ObjCEntrypoints> ObjCData;
llvm::MDNode *NoObjCARCExceptionsMetadata = nullptr;
@@ -348,8 +350,9 @@ private:
/// is defined once we get to the end of the of the translation unit.
std::vector<GlobalDecl> Aliases;
- /// List of multiversion functions that have to be emitted. Used to make sure
- /// we properly emit the iFunc.
+ /// List of multiversion functions to be emitted. This list is processed in
+ /// conjunction with other deferred symbols and is used to ensure that
+ /// multiversion function resolvers and ifuncs are defined and emitted.
std::vector<GlobalDecl> MultiVersionFuncs;
typedef llvm::StringMap<llvm::TrackingVH<llvm::Constant> > ReplacementsTy;
@@ -406,6 +409,8 @@ private:
llvm::StringMap<llvm::GlobalVariable *> CFConstantStringMap;
llvm::DenseMap<llvm::Constant *, llvm::GlobalVariable *> ConstantStringMap;
+ llvm::DenseMap<const UnnamedGlobalConstantDecl *, llvm::GlobalVariable *>
+ UnnamedGlobalConstantDeclMap;
llvm::DenseMap<const Decl*, llvm::Constant *> StaticLocalDeclMap;
llvm::DenseMap<const Decl*, llvm::GlobalVariable*> StaticLocalDeclGuardMap;
llvm::DenseMap<const Expr*, llvm::Constant *> MaterializedGlobalTemporaryMap;
@@ -509,6 +514,7 @@ private:
void createOpenCLRuntime();
void createOpenMPRuntime();
void createCUDARuntime();
+ void createHLSLRuntime();
bool isTriviallyRecursive(const FunctionDecl *F);
bool shouldEmitFunction(GlobalDecl GD);
@@ -561,6 +567,8 @@ private:
MetadataTypeMap VirtualMetadataIdMap;
MetadataTypeMap GeneralizedMetadataIdMap;
+ llvm::DenseMap<const llvm::Constant *, llvm::GlobalVariable *> RTTIProxyMap;
+
public:
CodeGenModule(ASTContext &C, const HeaderSearchOptions &headersearchopts,
const PreprocessorOptions &ppopts,
@@ -607,6 +615,12 @@ public:
return *CUDARuntime;
}
+ /// Return a reference to the configured HLSL runtime.
+ CGHLSLRuntime &getHLSLRuntime() {
+ assert(HLSLRuntime != nullptr);
+ return *HLSLRuntime;
+ }
+
ObjCEntrypoints &getObjCEntrypoints() const {
assert(ObjCData != nullptr);
return *ObjCData;
@@ -786,6 +800,14 @@ public:
void setDSOLocal(llvm::GlobalValue *GV) const;
+ bool shouldMapVisibilityToDLLExport(const NamedDecl *D) const {
+ return getLangOpts().hasDefaultVisibilityExportMapping() && D &&
+ (D->getLinkageAndVisibility().getVisibility() ==
+ DefaultVisibility) &&
+ (getLangOpts().isAllDefaultVisibilityExportMapping() ||
+ (getLangOpts().isExplicitDefaultVisibilityExportMapping() &&
+ D->getLinkageAndVisibility().isVisibilityExplicit()));
+ }
void setDLLImportDLLExport(llvm::GlobalValue *GV, GlobalDecl D) const;
void setDLLImportDLLExport(llvm::GlobalValue *GV, const NamedDecl *D) const;
/// Set visibility, dllimport/dllexport and dso_local.
@@ -826,7 +848,9 @@ public:
llvm::Function *CreateGlobalInitOrCleanUpFunction(
llvm::FunctionType *ty, const Twine &name, const CGFunctionInfo &FI,
- SourceLocation Loc = SourceLocation(), bool TLS = false);
+ SourceLocation Loc = SourceLocation(), bool TLS = false,
+ llvm::GlobalVariable::LinkageTypes Linkage =
+ llvm::GlobalVariable::InternalLinkage);
/// Return the AST address space of the underlying global variable for D, as
/// determined by its declaration. Normally this is the same as the address
@@ -873,6 +897,10 @@ public:
/// Get the address of a GUID.
ConstantAddress GetAddrOfMSGuidDecl(const MSGuidDecl *GD);
+ /// Get the address of a UnnamedGlobalConstant
+ ConstantAddress
+ GetAddrOfUnnamedGlobalConstantDecl(const UnnamedGlobalConstantDecl *GCD);
+
/// Get the address of a template parameter object.
ConstantAddress
GetAddrOfTemplateParamObject(const TemplateParamObjectDecl *TPO);
@@ -1213,6 +1241,7 @@ public:
StringRef getMangledName(GlobalDecl GD);
StringRef getBlockMangledName(GlobalDecl GD, const BlockDecl *BD);
+ const GlobalDecl getMangledNameDecl(StringRef);
void EmitTentativeDefinition(const VarDecl *D);
@@ -1287,8 +1316,9 @@ public:
bool isInNoSanitizeList(SanitizerMask Kind, llvm::Function *Fn,
SourceLocation Loc) const;
- bool isInNoSanitizeList(llvm::GlobalVariable *GV, SourceLocation Loc,
- QualType Ty, StringRef Category = StringRef()) const;
+ bool isInNoSanitizeList(SanitizerMask Kind, llvm::GlobalVariable *GV,
+ SourceLocation Loc, QualType Ty,
+ StringRef Category = StringRef()) const;
/// Imbue XRay attributes to a function, applying the always/never attribute
/// lists in the process. Returns true if we did imbue attributes this way,
@@ -1346,15 +1376,18 @@ public:
/// \param D The allocate declaration
void EmitOMPAllocateDecl(const OMPAllocateDecl *D);
+ /// Return the alignment specified in an allocate directive, if present.
+ llvm::Optional<CharUnits> getOMPAllocateAlignment(const VarDecl *VD);
+
/// Returns whether the given record has hidden LTO visibility and therefore
/// may participate in (single-module) CFI and whole-program vtable
/// optimization.
bool HasHiddenLTOVisibility(const CXXRecordDecl *RD);
- /// Returns whether the given record has public std LTO visibility
- /// and therefore may not participate in (single-module) CFI and whole-program
- /// vtable optimization.
- bool HasLTOVisibilityPublicStd(const CXXRecordDecl *RD);
+ /// Returns whether the given record has public LTO visibility (regardless of
+ /// -lto-whole-program-visibility) and therefore may not participate in
+ /// (single-module) CFI and whole-program vtable optimization.
+ bool AlwaysHasLTOVisibilityPublic(const CXXRecordDecl *RD);
/// Returns the vcall visibility of the given type. This is the scope in which
/// a virtual function call could be made which ends up being dispatched to a
@@ -1411,6 +1444,9 @@ public:
std::vector<const CXXRecordDecl *>
getMostBaseClasses(const CXXRecordDecl *RD);
+ llvm::GlobalVariable *
+ GetOrCreateRTTIProxyGlobalVariable(llvm::Constant *Addr);
+
/// Get the declaration of std::terminate for the platform.
llvm::FunctionCallee getTerminateFn();
@@ -1429,7 +1465,7 @@ public:
/// \param FN is a pointer to IR function being generated.
/// \param FD is a pointer to function declaration if any.
/// \param CGF is a pointer to CodeGenFunction that generates this function.
- void GenOpenCLArgMetadata(llvm::Function *FN,
+ void GenKernelArgMetadata(llvm::Function *FN,
const FunctionDecl *FD = nullptr,
CodeGenFunction *CGF = nullptr);
@@ -1448,10 +1484,40 @@ public:
bool stopAutoInit();
/// Print the postfix for externalized static variable or kernels for single
- /// source offloading languages CUDA and HIP.
+ /// source offloading languages CUDA and HIP. The unique postfix is created
+ /// using either the CUID argument, or the file's UniqueID and active macros.
+ /// The fallback method without a CUID requires that the offloading toolchain
+ /// does not define separate macros via the -cc1 options.
void printPostfixForExternalizedDecl(llvm::raw_ostream &OS,
const Decl *D) const;
+ /// Move some lazily-emitted states to the NewBuilder. This is especially
+ /// essential for the incremental parsing environment like Clang Interpreter,
+ /// because we'll lose all important information after each repl.
+ void moveLazyEmissionStates(CodeGenModule *NewBuilder) {
+ assert(DeferredDeclsToEmit.empty() &&
+ "Should have emitted all decls deferred to emit.");
+ assert(NewBuilder->DeferredDecls.empty() &&
+ "Newly created module should not have deferred decls");
+ NewBuilder->DeferredDecls = std::move(DeferredDecls);
+
+ assert(NewBuilder->DeferredVTables.empty() &&
+ "Newly created module should not have deferred vtables");
+ NewBuilder->DeferredVTables = std::move(DeferredVTables);
+
+ assert(NewBuilder->MangledDeclNames.empty() &&
+ "Newly created module should not have mangled decl names");
+ assert(NewBuilder->Manglings.empty() &&
+ "Newly created module should not have manglings");
+ NewBuilder->Manglings = std::move(Manglings);
+
+ assert(WeakRefReferences.empty() &&
+ "Not all WeakRefRefs have been applied");
+ NewBuilder->WeakRefReferences = std::move(WeakRefReferences);
+
+ NewBuilder->TBAA = std::move(TBAA);
+ }
+
private:
llvm::Constant *GetOrCreateLLVMFunction(
StringRef MangledName, llvm::Type *Ty, GlobalDecl D, bool ForVTable,
@@ -1459,9 +1525,18 @@ private:
llvm::AttributeList ExtraAttrs = llvm::AttributeList(),
ForDefinition_t IsForDefinition = NotForDefinition);
- llvm::Constant *GetOrCreateMultiVersionResolver(GlobalDecl GD,
- llvm::Type *DeclTy,
- const FunctionDecl *FD);
+ // References to multiversion functions are resolved through an implicitly
+ // defined resolver function. This function is responsible for creating
+ // the resolver symbol for the provided declaration. The value returned
+ // will be for an ifunc (llvm::GlobalIFunc) if the current target supports
+ // that feature and for a regular function (llvm::GlobalValue) otherwise.
+ llvm::Constant *GetOrCreateMultiVersionResolver(GlobalDecl GD);
+
+ // In scenarios where a function is not known to be a multiversion function
+ // until a later declaration, it is sometimes necessary to change the
+ // previously created mangled name to align with requirements of whatever
+ // multiversion function kind the function is now known to be. This function
+ // is responsible for performing such mangled name updates.
void UpdateMultiVersionNames(GlobalDecl GD, const FunctionDecl *FD,
StringRef &CurName);
@@ -1488,7 +1563,6 @@ private:
void EmitAliasDefinition(GlobalDecl GD);
void emitIFuncDefinition(GlobalDecl GD);
void emitCPUDispatchDefinition(GlobalDecl GD);
- void EmitTargetClonesResolver(GlobalDecl GD);
void EmitObjCPropertyImplementations(const ObjCImplementationDecl *D);
void EmitObjCIvarInitializations(ObjCImplementationDecl *D);
@@ -1554,6 +1628,7 @@ private:
// registered by the atexit subroutine using unatexit.
void unregisterGlobalDtorsWithUnAtExit();
+ /// Emit deferred multiversion function resolvers and associated variants.
void emitMultiVersionFunctions();
/// Emit any vtables which we deferred and still have a use for.
@@ -1569,6 +1644,16 @@ private:
/// Emit the link options introduced by imported modules.
void EmitModuleLinkOptions();
+ /// Helper function for EmitStaticExternCAliases() to redirect ifuncs that
+ /// have a resolver name that matches 'Elem' to instead resolve to the name of
+ /// 'CppFunc'. This redirection is necessary in cases where 'Elem' has a name
+ /// that will be emitted as an alias of the name bound to 'CppFunc'; ifuncs
+ /// may not reference aliases. Redirection is only performed if 'Elem' is only
+ /// used by ifuncs in which case, 'Elem' is destroyed. 'true' is returned if
+ /// redirection is successful, and 'false' is returned otherwise.
+ bool CheckAndReplaceExternCIFuncs(llvm::GlobalValue *Elem,
+ llvm::GlobalValue *CppFunc);
+
/// Emit aliases for internal-linkage declarations inside "C" language
/// linkage specifications, giving them the "expected" name where possible.
void EmitStaticExternCAliases();
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.cpp
index 6657f2a91e3d..587bcef78ee5 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenPGO.cpp
@@ -23,7 +23,7 @@
#include "llvm/Support/MD5.h"
static llvm::cl::opt<bool>
- EnableValueProfiling("enable-value-profiling", llvm::cl::ZeroOrMore,
+ EnableValueProfiling("enable-value-profiling",
llvm::cl::desc("Enable value profiling"),
llvm::cl::Hidden, llvm::cl::init(false));
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp
index 7a8a7c916473..fcce424747f1 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.cpp
@@ -29,16 +29,6 @@
using namespace clang;
using namespace CodeGen;
-#ifndef NDEBUG
-#include "llvm/Support/CommandLine.h"
-// TODO: turn on by default when defined(EXPENSIVE_CHECKS) once check-clang is
-// -verify-type-cache clean.
-static llvm::cl::opt<bool> VerifyTypeCache(
- "verify-type-cache",
- llvm::cl::desc("Verify that the type cache matches the computed type"),
- llvm::cl::init(false), llvm::cl::Hidden);
-#endif
-
CodeGenTypes::CodeGenTypes(CodeGenModule &cgm)
: CGM(cgm), Context(cgm.getContext()), TheModule(cgm.getModule()),
Target(cgm.getTarget()), TheCXXABI(cgm.getCXXABI()),
@@ -108,6 +98,14 @@ llvm::Type *CodeGenTypes::ConvertTypeForMem(QualType T, bool ForBitField) {
llvm::Type *R = ConvertType(T);
+ // Check for the boolean vector case.
+ if (T->isExtVectorBoolType()) {
+ auto *FixedVT = cast<llvm::FixedVectorType>(R);
+ // Pad to at least one byte.
+ uint64_t BytePadded = std::max<uint64_t>(FixedVT->getNumElements(), 8);
+ return llvm::IntegerType::get(FixedVT->getContext(), BytePadded);
+ }
+
// If this is a bool type, or a bit-precise integer type in a bitfield
// representation, map this integer to the target-specified size.
if ((ForBitField && T->isBitIntType()) ||
@@ -437,14 +435,12 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
TypeCache.find(Ty);
if (TCI != TypeCache.end())
CachedType = TCI->second;
- if (CachedType) {
-#ifndef NDEBUG
- if (!VerifyTypeCache)
- return CachedType;
-#else
+ // With expensive checks, check that the type we compute matches the
+ // cached type.
+#ifndef EXPENSIVE_CHECKS
+ if (CachedType)
return CachedType;
#endif
- }
}
// If we don't have it in the cache, convert it now.
@@ -713,9 +709,12 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
}
case Type::ExtVector:
case Type::Vector: {
- const VectorType *VT = cast<VectorType>(Ty);
- ResultType = llvm::FixedVectorType::get(ConvertType(VT->getElementType()),
- VT->getNumElements());
+ const auto *VT = cast<VectorType>(Ty);
+ // An ext_vector_type of Bool is really a vector of bits.
+ llvm::Type *IRElemTy = VT->isExtVectorBoolType()
+ ? llvm::Type::getInt1Ty(getLLVMContext())
+ : ConvertType(VT->getElementType());
+ ResultType = llvm::FixedVectorType::get(IRElemTy, VT->getNumElements());
break;
}
case Type::ConstantMatrix: {
@@ -784,8 +783,11 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
case Type::MemberPointer: {
auto *MPTy = cast<MemberPointerType>(Ty);
if (!getCXXABI().isMemberPointerConvertible(MPTy)) {
- RecordsWithOpaqueMemberPointers.insert(MPTy->getClass());
- ResultType = llvm::StructType::create(getLLVMContext());
+ auto *C = MPTy->getClass();
+ auto Insertion = RecordsWithOpaqueMemberPointers.insert({C, nullptr});
+ if (Insertion.second)
+ Insertion.first->second = llvm::StructType::create(getLLVMContext());
+ ResultType = Insertion.first->second;
} else {
ResultType = getCXXABI().ConvertMemberPointerType(MPTy);
}
@@ -822,13 +824,8 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
}
assert(ResultType && "Didn't convert a type?");
-
-#ifndef NDEBUG
- if (CachedType) {
- assert(CachedType == ResultType &&
- "Cached type doesn't match computed type");
- }
-#endif
+ assert((!CachedType || CachedType == ResultType) &&
+ "Cached type doesn't match computed type");
if (ShouldUseCache)
TypeCache[Ty] = ResultType;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.h b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.h
index 28b831222943..cd20563cbf75 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CodeGenTypes.h
@@ -76,7 +76,7 @@ class CodeGenTypes {
llvm::DenseMap<const Type*, llvm::StructType *> RecordDeclTypes;
/// Hold memoized CGFunctionInfo results.
- llvm::FoldingSet<CGFunctionInfo> FunctionInfos;
+ llvm::FoldingSet<CGFunctionInfo> FunctionInfos{FunctionInfosLog2InitSize};
/// This set keeps track of records that we're currently converting
/// to an IR type. For example, when converting:
@@ -96,8 +96,9 @@ class CodeGenTypes {
/// corresponding llvm::Type.
llvm::DenseMap<const Type *, llvm::Type *> TypeCache;
- llvm::SmallSet<const Type *, 8> RecordsWithOpaqueMemberPointers;
+ llvm::DenseMap<const Type *, llvm::Type *> RecordsWithOpaqueMemberPointers;
+ static constexpr unsigned FunctionInfosLog2InitSize = 9;
/// Helper for ConvertType.
llvm::Type *ConvertFunctionTypeInternal(QualType FT);
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ConstantInitBuilder.cpp b/contrib/llvm-project/clang/lib/CodeGen/ConstantInitBuilder.cpp
index 24e3ca19709c..06d3e44f01b1 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/ConstantInitBuilder.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/ConstantInitBuilder.cpp
@@ -114,7 +114,7 @@ void ConstantInitBuilderBase::abandon(size_t newEnd) {
if (newEnd == 0) {
for (auto &entry : SelfReferences) {
auto dummy = entry.Dummy;
- dummy->replaceAllUsesWith(llvm::UndefValue::get(dummy->getType()));
+ dummy->replaceAllUsesWith(llvm::PoisonValue::get(dummy->getType()));
dummy->eraseFromParent();
}
SelfReferences.clear();
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.cpp b/contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.cpp
index 9b81c8a670f5..0fe084b628da 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.cpp
@@ -60,26 +60,27 @@ CoverageMappingModuleGen::setUpCoverageCallbacks(Preprocessor &PP) {
return CoverageInfo;
}
-void CoverageSourceInfo::AddSkippedRange(SourceRange Range) {
+void CoverageSourceInfo::AddSkippedRange(SourceRange Range,
+ SkippedRange::Kind RangeKind) {
if (EmptyLineCommentCoverage && !SkippedRanges.empty() &&
PrevTokLoc == SkippedRanges.back().PrevTokLoc &&
SourceMgr.isWrittenInSameFile(SkippedRanges.back().Range.getEnd(),
Range.getBegin()))
SkippedRanges.back().Range.setEnd(Range.getEnd());
else
- SkippedRanges.push_back({Range, PrevTokLoc});
+ SkippedRanges.push_back({Range, RangeKind, PrevTokLoc});
}
void CoverageSourceInfo::SourceRangeSkipped(SourceRange Range, SourceLocation) {
- AddSkippedRange(Range);
+ AddSkippedRange(Range, SkippedRange::PPIfElse);
}
void CoverageSourceInfo::HandleEmptyline(SourceRange Range) {
- AddSkippedRange(Range);
+ AddSkippedRange(Range, SkippedRange::EmptyLine);
}
bool CoverageSourceInfo::HandleComment(Preprocessor &PP, SourceRange Range) {
- AddSkippedRange(Range);
+ AddSkippedRange(Range, SkippedRange::Comment);
return false;
}
@@ -129,7 +130,7 @@ public:
void setCounter(Counter C) { Count = C; }
- bool hasStartLoc() const { return LocStart.hasValue(); }
+ bool hasStartLoc() const { return LocStart.has_value(); }
void setStartLoc(SourceLocation Loc) { LocStart = Loc; }
@@ -138,7 +139,7 @@ public:
return *LocStart;
}
- bool hasEndLoc() const { return LocEnd.hasValue(); }
+ bool hasEndLoc() const { return LocEnd.has_value(); }
void setEndLoc(SourceLocation Loc) {
assert(Loc.isValid() && "Setting an invalid end location");
@@ -154,7 +155,7 @@ public:
void setGap(bool Gap) { GapRegion = Gap; }
- bool isBranch() const { return FalseCount.hasValue(); }
+ bool isBranch() const { return FalseCount.has_value(); }
};
/// Spelling locations for the start and end of a source region.
@@ -335,6 +336,8 @@ public:
/// This shrinks the skipped range if it spans a line that contains a
/// non-comment token. If shrinking the skipped range would make it empty,
/// this returns None.
+ /// Note this function can potentially be expensive because
+ /// getSpellingLineNumber uses getLineNumber, which is expensive.
Optional<SpellingRegion> adjustSkippedRange(SourceManager &SM,
SourceLocation LocStart,
SourceLocation LocEnd,
@@ -382,9 +385,14 @@ public:
auto CovFileID = getCoverageFileID(LocStart);
if (!CovFileID)
continue;
- Optional<SpellingRegion> SR =
- adjustSkippedRange(SM, LocStart, LocEnd, I.PrevTokLoc, I.NextTokLoc);
- if (!SR.hasValue())
+ Optional<SpellingRegion> SR;
+ if (I.isComment())
+ SR = adjustSkippedRange(SM, LocStart, LocEnd, I.PrevTokLoc,
+ I.NextTokLoc);
+ else if (I.isPPIfElse() || I.isEmptyLine())
+ SR = {SM, LocStart, LocEnd};
+
+ if (!SR)
continue;
auto Region = CounterMappingRegion::makeSkipped(
*CovFileID, SR->LineStart, SR->ColumnStart, SR->LineEnd,
@@ -550,17 +558,18 @@ struct CounterCoverageMappingBuilder
Counter GapRegionCounter;
/// Return a counter for the subtraction of \c RHS from \c LHS
- Counter subtractCounters(Counter LHS, Counter RHS) {
- return Builder.subtract(LHS, RHS);
+ Counter subtractCounters(Counter LHS, Counter RHS, bool Simplify = true) {
+ return Builder.subtract(LHS, RHS, Simplify);
}
/// Return a counter for the sum of \c LHS and \c RHS.
- Counter addCounters(Counter LHS, Counter RHS) {
- return Builder.add(LHS, RHS);
+ Counter addCounters(Counter LHS, Counter RHS, bool Simplify = true) {
+ return Builder.add(LHS, RHS, Simplify);
}
- Counter addCounters(Counter C1, Counter C2, Counter C3) {
- return addCounters(addCounters(C1, C2), C3);
+ Counter addCounters(Counter C1, Counter C2, Counter C3,
+ bool Simplify = true) {
+ return addCounters(addCounters(C1, C2, Simplify), C3, Simplify);
}
/// Return the region counter for the given statement.
@@ -578,7 +587,7 @@ struct CounterCoverageMappingBuilder
Optional<SourceLocation> EndLoc = None,
Optional<Counter> FalseCount = None) {
- if (StartLoc && !FalseCount.hasValue()) {
+ if (StartLoc && !FalseCount) {
MostRecentLocation = *StartLoc;
}
@@ -1317,11 +1326,16 @@ struct CounterCoverageMappingBuilder
const SwitchCase *Case = S->getSwitchCaseList();
for (; Case; Case = Case->getNextSwitchCase()) {
HasDefaultCase = HasDefaultCase || isa<DefaultStmt>(Case);
- CaseCountSum = addCounters(CaseCountSum, getRegionCounter(Case));
+ CaseCountSum =
+ addCounters(CaseCountSum, getRegionCounter(Case), /*Simplify=*/false);
createSwitchCaseRegion(
Case, getRegionCounter(Case),
subtractCounters(ParentCount, getRegionCounter(Case)));
}
+ // Simplify is skipped while building the counters above: it can get really
+ // slow on top of switches with thousands of cases. Instead, trigger
+ // simplification by adding zero to the last counter.
+ CaseCountSum = addCounters(CaseCountSum, Counter::getZero());
// If no explicit default case exists, create a branch region to represent
// the hidden branch, which will be added later by the CodeGen. This region
diff --git a/contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.h b/contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.h
index ae4f435d4ff3..f5282601b640 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/CoverageMappingGen.h
@@ -31,15 +31,29 @@ class Decl;
class Stmt;
struct SkippedRange {
+ enum Kind {
+ PPIfElse, // Preprocessor #if/#else ...
+ EmptyLine,
+ Comment,
+ };
+
SourceRange Range;
// The location of token before the skipped source range.
SourceLocation PrevTokLoc;
// The location of token after the skipped source range.
SourceLocation NextTokLoc;
+ // The nature of this skipped range
+ Kind RangeKind;
+
+ bool isComment() { return RangeKind == Comment; }
+ bool isEmptyLine() { return RangeKind == EmptyLine; }
+ bool isPPIfElse() { return RangeKind == PPIfElse; }
- SkippedRange(SourceRange Range, SourceLocation PrevTokLoc = SourceLocation(),
+ SkippedRange(SourceRange Range, Kind K,
+ SourceLocation PrevTokLoc = SourceLocation(),
SourceLocation NextTokLoc = SourceLocation())
- : Range(Range), PrevTokLoc(PrevTokLoc), NextTokLoc(NextTokLoc) {}
+ : Range(Range), PrevTokLoc(PrevTokLoc), NextTokLoc(NextTokLoc),
+ RangeKind(K) {}
};
/// Stores additional source code information like skipped ranges which
@@ -62,7 +76,7 @@ public:
std::vector<SkippedRange> &getSkippedRanges() { return SkippedRanges; }
- void AddSkippedRange(SourceRange Range);
+ void AddSkippedRange(SourceRange Range, SkippedRange::Kind RangeKind);
void SourceRangeSkipped(SourceRange Range, SourceLocation EndifLoc) override;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp b/contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp
index 2979d92c8417..f0003c4aab78 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/ItaniumCXXABI.cpp
@@ -668,8 +668,8 @@ CGCallee ItaniumCXXABI::EmitLoadOfMemberFunctionPointer(
CGM.HasHiddenLTOVisibility(RD);
bool ShouldEmitWPDInfo =
CGM.getCodeGenOpts().WholeProgramVTables &&
- // Don't insert type tests if we are forcing public std visibility.
- !CGM.HasLTOVisibilityPublicStd(RD);
+ // Don't insert type tests if we are forcing public visibility.
+ !CGM.AlwaysHasLTOVisibilityPublic(RD);
llvm::Value *VirtualFn = nullptr;
{
@@ -955,14 +955,16 @@ ItaniumCXXABI::EmitMemberPointerConversion(const CastExpr *E,
adj = llvm::ConstantInt::get(adj->getType(), offset);
}
- llvm::Constant *srcAdj = llvm::ConstantExpr::getExtractValue(src, 1);
+ llvm::Constant *srcAdj = src->getAggregateElement(1);
llvm::Constant *dstAdj;
if (isDerivedToBase)
dstAdj = llvm::ConstantExpr::getNSWSub(srcAdj, adj);
else
dstAdj = llvm::ConstantExpr::getNSWAdd(srcAdj, adj);
- return llvm::ConstantExpr::getInsertValue(src, dstAdj, 1);
+ llvm::Constant *res = ConstantFoldInsertValueInstruction(src, dstAdj, 1);
+ assert(res != nullptr && "Folding must succeed");
+ return res;
}
llvm::Constant *
@@ -1925,7 +1927,7 @@ CGCallee ItaniumCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
llvm::Value *VFunc;
if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) {
VFunc = CGF.EmitVTableTypeCheckedLoad(
- MethodDecl->getParent(), VTable,
+ MethodDecl->getParent(), VTable, TyPtr,
VTableIndex * CGM.getContext().getTargetInfo().getPointerWidth(0) / 8);
} else {
CGF.EmitTypeMetadataCodeForVCall(MethodDecl->getParent(), VTable, Loc);
@@ -3680,12 +3682,14 @@ llvm::Constant *ItaniumRTTIBuilder::BuildTypeInfo(QualType Ty) {
llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
llvm::GlobalValue::DefaultStorageClass;
- if (CGM.getTriple().isWindowsItaniumEnvironment()) {
- auto RD = Ty->getAsCXXRecordDecl();
- if (RD && RD->hasAttr<DLLExportAttr>())
+ if (auto RD = Ty->getAsCXXRecordDecl()) {
+ if ((CGM.getTriple().isWindowsItaniumEnvironment() &&
+ RD->hasAttr<DLLExportAttr>()) ||
+ (CGM.shouldMapVisibilityToDLLExport(RD) &&
+ !llvm::GlobalValue::isLocalLinkage(Linkage) &&
+ llvmVisibility == llvm::GlobalValue::DefaultVisibility))
DLLStorageClass = llvm::GlobalValue::DLLExportStorageClass;
}
-
return BuildTypeInfo(Ty, Linkage, llvmVisibility, DLLStorageClass);
}
@@ -4168,9 +4172,9 @@ void ItaniumCXXABI::EmitFundamentalRTTIDescriptors(const CXXRecordDecl *RD) {
getContext().Char32Ty
};
llvm::GlobalValue::DLLStorageClassTypes DLLStorageClass =
- RD->hasAttr<DLLExportAttr>()
- ? llvm::GlobalValue::DLLExportStorageClass
- : llvm::GlobalValue::DefaultStorageClass;
+ RD->hasAttr<DLLExportAttr>() || CGM.shouldMapVisibilityToDLLExport(RD)
+ ? llvm::GlobalValue::DLLExportStorageClass
+ : llvm::GlobalValue::DefaultStorageClass;
llvm::GlobalValue::VisibilityTypes Visibility =
CodeGenModule::GetLLVMVisibility(RD->getVisibility());
for (const QualType &FundamentalType : FundamentalTypes) {
@@ -4472,7 +4476,7 @@ static void InitCatchParam(CodeGenFunction &CGF,
// pad. The best solution is to fix the personality function.
} else {
// Pull the pointer for the reference type off.
- llvm::Type *PtrTy = LLVMCatchTy->getPointerElementType();
+ llvm::Type *PtrTy = CGF.ConvertTypeForMem(CaughtType);
// Create the temporary and write the adjusted pointer into it.
Address ExnPtrTmp =
@@ -4555,7 +4559,7 @@ static void InitCatchParam(CodeGenFunction &CGF,
if (!copyExpr) {
llvm::Value *rawAdjustedExn = CallBeginCatch(CGF, Exn, true);
Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
- caughtExnAlignment);
+ LLVMCatchTy, caughtExnAlignment);
LValue Dest = CGF.MakeAddrLValue(ParamAddr, CatchType);
LValue Src = CGF.MakeAddrLValue(adjustedExn, CatchType);
CGF.EmitAggregateCopy(Dest, Src, CatchType, AggValueSlot::DoesNotOverlap);
@@ -4569,7 +4573,7 @@ static void InitCatchParam(CodeGenFunction &CGF,
// Cast that to the appropriate type.
Address adjustedExn(CGF.Builder.CreateBitCast(rawAdjustedExn, PtrTy),
- caughtExnAlignment);
+ LLVMCatchTy, caughtExnAlignment);
// The copy expression is defined in terms of an OpaqueValueExpr.
// Find it and map it to the adjusted expression.
diff --git a/contrib/llvm-project/clang/lib/CodeGen/MacroPPCallbacks.cpp b/contrib/llvm-project/clang/lib/CodeGen/MacroPPCallbacks.cpp
index 92800e738b62..2f09fd2b6c15 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/MacroPPCallbacks.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/MacroPPCallbacks.cpp
@@ -167,7 +167,7 @@ void MacroPPCallbacks::FileChanged(SourceLocation Loc, FileChangeReason Reason,
void MacroPPCallbacks::InclusionDirective(
SourceLocation HashLoc, const Token &IncludeTok, StringRef FileName,
- bool IsAngled, CharSourceRange FilenameRange, const FileEntry *File,
+ bool IsAngled, CharSourceRange FilenameRange, Optional<FileEntryRef> File,
StringRef SearchPath, StringRef RelativePath, const Module *Imported,
SrcMgr::CharacteristicKind FileType) {
diff --git a/contrib/llvm-project/clang/lib/CodeGen/MacroPPCallbacks.h b/contrib/llvm-project/clang/lib/CodeGen/MacroPPCallbacks.h
index d249b5b0eb88..01041b16e4b7 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/MacroPPCallbacks.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/MacroPPCallbacks.h
@@ -100,9 +100,9 @@ public:
/// Callback invoked whenever a directive (#xxx) is processed.
void InclusionDirective(SourceLocation HashLoc, const Token &IncludeTok,
StringRef FileName, bool IsAngled,
- CharSourceRange FilenameRange, const FileEntry *File,
- StringRef SearchPath, StringRef RelativePath,
- const Module *Imported,
+ CharSourceRange FilenameRange,
+ Optional<FileEntryRef> File, StringRef SearchPath,
+ StringRef RelativePath, const Module *Imported,
SrcMgr::CharacteristicKind FileType) override;
/// Hook called whenever a macro definition is seen.
diff --git a/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp b/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp
index e00ff2b68719..2bc1e8e8c5b9 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/MicrosoftCXXABI.cpp
@@ -47,7 +47,11 @@ public:
: CGCXXABI(CGM), BaseClassDescriptorType(nullptr),
ClassHierarchyDescriptorType(nullptr),
CompleteObjectLocatorType(nullptr), CatchableTypeType(nullptr),
- ThrowInfoType(nullptr) {}
+ ThrowInfoType(nullptr) {
+ assert(!(CGM.getLangOpts().isExplicitDefaultVisibilityExportMapping() ||
+ CGM.getLangOpts().isAllDefaultVisibilityExportMapping()) &&
+ "visibility export mapping option unimplemented in this ABI");
+ }
bool HasThisReturn(GlobalDecl GD) const override;
bool hasMostDerivedReturn(GlobalDecl GD) const override;
@@ -948,7 +952,8 @@ MicrosoftCXXABI::performBaseAdjustment(CodeGenFunction &CGF, Address Value,
Value.getElementType(), Value.getPointer(), Offset);
CharUnits VBaseAlign =
CGF.CGM.getVBaseAlignment(Value.getAlignment(), SrcDecl, PolymorphicBase);
- return std::make_tuple(Address(Ptr, VBaseAlign), Offset, PolymorphicBase);
+ return std::make_tuple(Address(Ptr, CGF.Int8Ty, VBaseAlign), Offset,
+ PolymorphicBase);
}
bool MicrosoftCXXABI::shouldTypeidBeNullChecked(bool IsDeref,
@@ -1470,7 +1475,7 @@ Address MicrosoftCXXABI::adjustThisArgumentForVirtualFunctionCall(
Result.getElementType(), Result.getPointer(), VBaseOffset);
CharUnits VBaseAlign =
CGF.CGM.getVBaseAlignment(Result.getAlignment(), Derived, VBase);
- Result = Address(VBasePtr, VBaseAlign);
+ Result = Address(VBasePtr, CGF.Int8Ty, VBaseAlign);
}
if (!StaticOffset.isZero()) {
assert(StaticOffset.isPositive());
@@ -1946,7 +1951,7 @@ CGCallee MicrosoftCXXABI::getVirtualFunctionPointer(CodeGenFunction &CGF,
llvm::Value *VFunc;
if (CGF.ShouldEmitVTableTypeCheckedLoad(MethodDecl->getParent())) {
VFunc = CGF.EmitVTableTypeCheckedLoad(
- getObjectWithVPtr(), VTable,
+ getObjectWithVPtr(), VTable, Ty,
ML.Index * CGM.getContext().getTargetInfo().getPointerWidth(0) / 8);
} else {
if (CGM.getCodeGenOpts().PrepareForLTO)
@@ -2217,10 +2222,10 @@ llvm::Value *MicrosoftCXXABI::performThisAdjustment(CodeGenFunction &CGF,
assert(TA.Virtual.Microsoft.VBPtrOffset > 0);
assert(TA.Virtual.Microsoft.VBOffsetOffset >= 0);
llvm::Value *VBPtr;
- llvm::Value *VBaseOffset =
- GetVBaseOffsetFromVBPtr(CGF, Address(V, CGF.getPointerAlign()),
- -TA.Virtual.Microsoft.VBPtrOffset,
- TA.Virtual.Microsoft.VBOffsetOffset, &VBPtr);
+ llvm::Value *VBaseOffset = GetVBaseOffsetFromVBPtr(
+ CGF, Address(V, CGF.Int8Ty, CGF.getPointerAlign()),
+ -TA.Virtual.Microsoft.VBPtrOffset,
+ TA.Virtual.Microsoft.VBOffsetOffset, &VBPtr);
V = CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, VBPtr, VBaseOffset);
}
}
@@ -2432,7 +2437,7 @@ static void emitTlsGuardCheck(CodeGenFunction &CGF, llvm::GlobalValue *TlsGuard,
llvm::BasicBlock *DynInitBB,
llvm::BasicBlock *ContinueBB) {
llvm::LoadInst *TlsGuardValue =
- CGF.Builder.CreateLoad(Address(TlsGuard, CharUnits::One()));
+ CGF.Builder.CreateLoad(Address(TlsGuard, CGF.Int8Ty, CharUnits::One()));
llvm::Value *CmpResult =
CGF.Builder.CreateICmpEQ(TlsGuardValue, CGF.Builder.getInt8(0));
CGF.Builder.CreateCondBr(CmpResult, DynInitBB, ContinueBB);
@@ -2483,7 +2488,7 @@ LValue MicrosoftCXXABI::EmitThreadLocalVarDeclLValue(CodeGenFunction &CGF,
V = CGF.Builder.CreateBitCast(V, RealVarTy->getPointerTo(AS));
CharUnits Alignment = CGF.getContext().getDeclAlign(VD);
- Address Addr(V, Alignment);
+ Address Addr(V, RealVarTy, Alignment);
LValue LV = VD->getType()->isReferenceType()
? CGF.EmitLoadOfReferenceLValue(Addr, VD->getType(),
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ModuleBuilder.cpp b/contrib/llvm-project/clang/lib/CodeGen/ModuleBuilder.cpp
index f6642a79e1e4..8e97a298ce7f 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/ModuleBuilder.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/ModuleBuilder.cpp
@@ -134,7 +134,14 @@ namespace {
llvm::LLVMContext &C) {
assert(!M && "Replacing existing Module?");
M.reset(new llvm::Module(ExpandModuleName(ModuleName, CodeGenOpts), C));
+
+ std::unique_ptr<CodeGenModule> OldBuilder = std::move(Builder);
+
Initialize(*Ctx);
+
+ if (OldBuilder)
+ OldBuilder->moveLazyEmissionStates(Builder.get());
+
return M.get();
}
@@ -146,6 +153,11 @@ namespace {
const auto &SDKVersion = Ctx->getTargetInfo().getSDKVersion();
if (!SDKVersion.empty())
M->setSDKVersion(SDKVersion);
+ if (const auto *TVT = Ctx->getTargetInfo().getDarwinTargetVariantTriple())
+ M->setDarwinTargetVariantTriple(TVT->getTriple());
+ if (auto TVSDKVersion =
+ Ctx->getTargetInfo().getDarwinTargetVariantSDKVersion())
+ M->setDarwinTargetVariantSDKVersion(*TVSDKVersion);
Builder.reset(new CodeGen::CodeGenModule(Context, HeaderSearchOpts,
PreprocessorOpts, CodeGenOpts,
*M, Diags, CoverageInfo));
diff --git a/contrib/llvm-project/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp b/contrib/llvm-project/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
index 9fe7e5d1f5c3..d03e5bd50873 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/ObjectFilePCHContainerOperations.cpp
@@ -96,6 +96,10 @@ class PCHContainerGenerator : public ASTConsumer {
}
bool VisitFunctionDecl(FunctionDecl *D) {
+ // Skip deduction guides.
+ if (isa<CXXDeductionGuideDecl>(D))
+ return true;
+
if (isa<CXXMethodDecl>(D))
// This is not yet supported. Constructing the `this' argument
// mandates a CodeGenFunction.
diff --git a/contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.cpp b/contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.cpp
index 009965a36c39..5f4eb9be981f 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.cpp
@@ -22,84 +22,87 @@ using namespace CodeGen;
SanitizerMetadata::SanitizerMetadata(CodeGenModule &CGM) : CGM(CGM) {}
-static bool isAsanHwasanOrMemTag(const SanitizerSet& SS) {
+static bool isAsanHwasanOrMemTag(const SanitizerSet &SS) {
return SS.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress |
- SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress |
- SanitizerKind::MemTag);
+ SanitizerKind::HWAddress | SanitizerKind::MemTag);
}
-void SanitizerMetadata::reportGlobalToASan(llvm::GlobalVariable *GV,
- SourceLocation Loc, StringRef Name,
- QualType Ty, bool IsDynInit,
- bool IsExcluded) {
- if (!isAsanHwasanOrMemTag(CGM.getLangOpts().Sanitize))
+SanitizerMask expandKernelSanitizerMasks(SanitizerMask Mask) {
+ if (Mask & (SanitizerKind::Address | SanitizerKind::KernelAddress))
+ Mask |= SanitizerKind::Address | SanitizerKind::KernelAddress;
+ // Note: KHWASan doesn't support globals.
+ return Mask;
+}
+
+void SanitizerMetadata::reportGlobal(llvm::GlobalVariable *GV,
+ SourceLocation Loc, StringRef Name,
+ QualType Ty,
+ SanitizerMask NoSanitizeAttrMask,
+ bool IsDynInit) {
+ SanitizerSet FsanitizeArgument = CGM.getLangOpts().Sanitize;
+ if (!isAsanHwasanOrMemTag(FsanitizeArgument))
return;
- IsDynInit &= !CGM.isInNoSanitizeList(GV, Loc, Ty, "init");
- IsExcluded |= CGM.isInNoSanitizeList(GV, Loc, Ty);
-
- llvm::Metadata *LocDescr = nullptr;
- llvm::Metadata *GlobalName = nullptr;
- llvm::LLVMContext &VMContext = CGM.getLLVMContext();
- if (!IsExcluded) {
- // Don't generate source location and global name if it is on
- // the NoSanitizeList - it won't be instrumented anyway.
- LocDescr = getLocationMetadata(Loc);
- if (!Name.empty())
- GlobalName = llvm::MDString::get(VMContext, Name);
+
+ FsanitizeArgument.Mask = expandKernelSanitizerMasks(FsanitizeArgument.Mask);
+ NoSanitizeAttrMask = expandKernelSanitizerMasks(NoSanitizeAttrMask);
+ SanitizerSet NoSanitizeAttrSet = {NoSanitizeAttrMask &
+ FsanitizeArgument.Mask};
+
+ llvm::GlobalVariable::SanitizerMetadata Meta;
+ if (GV->hasSanitizerMetadata())
+ Meta = GV->getSanitizerMetadata();
+
+ Meta.NoAddress |= NoSanitizeAttrSet.hasOneOf(SanitizerKind::Address);
+ Meta.NoAddress |= CGM.isInNoSanitizeList(
+ FsanitizeArgument.Mask & SanitizerKind::Address, GV, Loc, Ty);
+
+ Meta.NoHWAddress |= NoSanitizeAttrSet.hasOneOf(SanitizerKind::HWAddress);
+ Meta.NoHWAddress |= CGM.isInNoSanitizeList(
+ FsanitizeArgument.Mask & SanitizerKind::HWAddress, GV, Loc, Ty);
+
+ Meta.NoMemtag |= NoSanitizeAttrSet.hasOneOf(SanitizerKind::MemTag);
+ Meta.NoMemtag |= CGM.isInNoSanitizeList(
+ FsanitizeArgument.Mask & SanitizerKind::MemTag, GV, Loc, Ty);
+
+ if (FsanitizeArgument.has(SanitizerKind::Address)) {
+ // TODO(hctim): Make this conditional when we migrate off llvm.asan.globals.
+ IsDynInit &= !CGM.isInNoSanitizeList(SanitizerKind::Address |
+ SanitizerKind::KernelAddress,
+ GV, Loc, Ty, "init");
+ Meta.IsDynInit = IsDynInit;
}
- llvm::Metadata *GlobalMetadata[] = {
- llvm::ConstantAsMetadata::get(GV), LocDescr, GlobalName,
- llvm::ConstantAsMetadata::get(
- llvm::ConstantInt::get(llvm::Type::getInt1Ty(VMContext), IsDynInit)),
- llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
- llvm::Type::getInt1Ty(VMContext), IsExcluded))};
-
- llvm::MDNode *ThisGlobal = llvm::MDNode::get(VMContext, GlobalMetadata);
- llvm::NamedMDNode *AsanGlobals =
- CGM.getModule().getOrInsertNamedMetadata("llvm.asan.globals");
- AsanGlobals->addOperand(ThisGlobal);
+ GV->setSanitizerMetadata(Meta);
}
-void SanitizerMetadata::reportGlobalToASan(llvm::GlobalVariable *GV,
- const VarDecl &D, bool IsDynInit) {
+void SanitizerMetadata::reportGlobal(llvm::GlobalVariable *GV, const VarDecl &D,
+ bool IsDynInit) {
if (!isAsanHwasanOrMemTag(CGM.getLangOpts().Sanitize))
return;
std::string QualName;
llvm::raw_string_ostream OS(QualName);
D.printQualifiedName(OS);
- bool IsExcluded = false;
- for (auto Attr : D.specific_attrs<NoSanitizeAttr>())
- if (Attr->getMask() & SanitizerKind::Address)
- IsExcluded = true;
- reportGlobalToASan(GV, D.getLocation(), OS.str(), D.getType(), IsDynInit,
- IsExcluded);
+ auto getNoSanitizeMask = [](const VarDecl &D) {
+ if (D.hasAttr<DisableSanitizerInstrumentationAttr>())
+ return SanitizerKind::All;
+
+ SanitizerMask NoSanitizeMask;
+ for (auto *Attr : D.specific_attrs<NoSanitizeAttr>())
+ NoSanitizeMask |= Attr->getMask();
+
+ return NoSanitizeMask;
+ };
+
+ reportGlobal(GV, D.getLocation(), OS.str(), D.getType(), getNoSanitizeMask(D),
+ IsDynInit);
}
void SanitizerMetadata::disableSanitizerForGlobal(llvm::GlobalVariable *GV) {
- // For now, just make sure the global is not modified by the ASan
- // instrumentation.
- if (isAsanHwasanOrMemTag(CGM.getLangOpts().Sanitize))
- reportGlobalToASan(GV, SourceLocation(), "", QualType(), false, true);
+ reportGlobal(GV, SourceLocation(), "", QualType(), SanitizerKind::All);
}
void SanitizerMetadata::disableSanitizerForInstruction(llvm::Instruction *I) {
- I->setMetadata(CGM.getModule().getMDKindID("nosanitize"),
+ I->setMetadata(llvm::LLVMContext::MD_nosanitize,
llvm::MDNode::get(CGM.getLLVMContext(), None));
}
-
-llvm::MDNode *SanitizerMetadata::getLocationMetadata(SourceLocation Loc) {
- PresumedLoc PLoc = CGM.getContext().getSourceManager().getPresumedLoc(Loc);
- if (!PLoc.isValid())
- return nullptr;
- llvm::LLVMContext &VMContext = CGM.getLLVMContext();
- llvm::Metadata *LocMetadata[] = {
- llvm::MDString::get(VMContext, PLoc.getFilename()),
- llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(VMContext), PLoc.getLine())),
- llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
- llvm::Type::getInt32Ty(VMContext), PLoc.getColumn())),
- };
- return llvm::MDNode::get(VMContext, LocMetadata);
-}
diff --git a/contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.h b/contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.h
index 440a54590acc..bcad32ce31df 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/SanitizerMetadata.h
@@ -14,13 +14,14 @@
#include "clang/AST/Type.h"
#include "clang/Basic/LLVM.h"
+#include "clang/Basic/Sanitizers.h"
#include "clang/Basic/SourceLocation.h"
namespace llvm {
class GlobalVariable;
class Instruction;
class MDNode;
-}
+} // namespace llvm
namespace clang {
class VarDecl;
@@ -34,19 +35,19 @@ class SanitizerMetadata {
void operator=(const SanitizerMetadata &) = delete;
CodeGenModule &CGM;
+
public:
SanitizerMetadata(CodeGenModule &CGM);
- void reportGlobalToASan(llvm::GlobalVariable *GV, const VarDecl &D,
- bool IsDynInit = false);
- void reportGlobalToASan(llvm::GlobalVariable *GV, SourceLocation Loc,
- StringRef Name, QualType Ty, bool IsDynInit = false,
- bool IsExcluded = false);
+ void reportGlobal(llvm::GlobalVariable *GV, const VarDecl &D,
+ bool IsDynInit = false);
+ void reportGlobal(llvm::GlobalVariable *GV, SourceLocation Loc,
+ StringRef Name, QualType Ty = {},
+ SanitizerMask NoSanitizeAttrMask = {},
+ bool IsDynInit = false);
void disableSanitizerForGlobal(llvm::GlobalVariable *GV);
void disableSanitizerForInstruction(llvm::Instruction *I);
-private:
- llvm::MDNode *getLocationMetadata(SourceLocation Loc);
};
-} // end namespace CodeGen
-} // end namespace clang
+} // end namespace CodeGen
+} // end namespace clang
#endif
diff --git a/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp b/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp
index d83bc9e529a6..8eaed1db8e7d 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp
+++ b/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.cpp
@@ -101,6 +101,11 @@ Address ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
return Address::invalid();
}
+static llvm::Type *getVAListElementType(CodeGenFunction &CGF) {
+ return CGF.ConvertTypeForMem(
+ CGF.getContext().getBuiltinVaListType()->getPointeeType());
+}
+
bool ABIInfo::isPromotableIntegerTypeForABI(QualType Ty) const {
if (Ty->isPromotableIntegerType())
return true;
@@ -235,6 +240,11 @@ bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
return false;
}
+bool ABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate() const {
+ // For compatibility with GCC, ignore empty bitfields in C++ mode.
+ return getContext().getLangOpts().CPlusPlus;
+}
+
LLVM_DUMP_METHOD void ABIArgInfo::dump() const {
raw_ostream &OS = llvm::errs();
OS << "(ABIArgInfo Kind=";
@@ -325,9 +335,9 @@ static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF,
Address Addr = Address::invalid();
if (AllowHigherAlign && DirectAlign > SlotSize) {
Addr = Address(emitRoundPointerUpToAlignment(CGF, Ptr, DirectAlign),
- DirectAlign);
+ CGF.Int8Ty, DirectAlign);
} else {
- Addr = Address(Ptr, SlotSize);
+ Addr = Address(Ptr, CGF.Int8Ty, SlotSize);
}
// Advance the pointer past the argument, then store that back.
@@ -376,21 +386,19 @@ static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr,
}
// Cast the address we've calculated to the right type.
- llvm::Type *DirectTy = CGF.ConvertTypeForMem(ValueTy);
+ llvm::Type *DirectTy = CGF.ConvertTypeForMem(ValueTy), *ElementTy = DirectTy;
if (IsIndirect)
DirectTy = DirectTy->getPointerTo(0);
- Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, DirectTy,
- DirectSize, DirectAlign,
- SlotSizeAndAlign,
- AllowHigherAlign);
+ Address Addr =
+ emitVoidPtrDirectVAArg(CGF, VAListAddr, DirectTy, DirectSize, DirectAlign,
+ SlotSizeAndAlign, AllowHigherAlign);
if (IsIndirect) {
- Addr = Address(CGF.Builder.CreateLoad(Addr), ValueInfo.Align);
+ Addr = Address(CGF.Builder.CreateLoad(Addr), ElementTy, ValueInfo.Align);
}
return Addr;
-
}
static Address complexTempStructure(CodeGenFunction &CGF, Address VAListAddr,
@@ -691,11 +699,11 @@ Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
auto TyInfo = CGF.getContext().getTypeInfoInChars(Ty);
CharUnits TyAlignForABI = TyInfo.Align;
- llvm::Type *BaseTy =
- llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
+ llvm::Type *ElementTy = CGF.ConvertTypeForMem(Ty);
+ llvm::Type *BaseTy = llvm::PointerType::getUnqual(ElementTy);
llvm::Value *Addr =
CGF.Builder.CreateVAArg(VAListAddr.getPointer(), BaseTy);
- return Address(Addr, TyAlignForABI);
+ return Address(Addr, ElementTy, TyAlignForABI);
} else {
assert((AI.isDirect() || AI.isExtend()) &&
"Unexpected ArgInfo Kind in generic VAArg emitter!");
@@ -710,7 +718,8 @@ Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
"Unexpected CoerceToType seen in arginfo in generic VAArg emitter!");
Address Temp = CGF.CreateMemTemp(Ty, "varet");
- Val = CGF.Builder.CreateVAArg(VAListAddr.getPointer(), CGF.ConvertType(Ty));
+ Val = CGF.Builder.CreateVAArg(VAListAddr.getPointer(),
+ CGF.ConvertTypeForMem(Ty));
CGF.Builder.CreateStore(Val, Temp);
return Temp;
}
@@ -1352,8 +1361,8 @@ void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
ResultTruncRegTypes.push_back(CoerceTy);
// Coerce the integer by bitcasting the return slot pointer.
- ReturnSlot.setAddress(CGF.Builder.CreateBitCast(ReturnSlot.getAddress(CGF),
- CoerceTy->getPointerTo()));
+ ReturnSlot.setAddress(
+ CGF.Builder.CreateElementBitCast(ReturnSlot.getAddress(CGF), CoerceTy));
ResultRegDests.push_back(ReturnSlot);
rewriteInputConstraintReferences(NumOutputs, 1, AsmString);
@@ -2293,6 +2302,8 @@ class X86_64ABIInfo : public SwiftABIInfo {
/// \param isNamedArg - Whether the argument in question is a "named"
/// argument, as used in AMD64-ABI 3.5.7.
///
+ /// \param IsRegCall - Whether the calling conversion is regcall.
+ ///
/// If a word is unused its result will be NoClass; if a type should
/// be passed in Memory then at least the classification of \arg Lo
/// will be Memory.
@@ -2302,7 +2313,7 @@ class X86_64ABIInfo : public SwiftABIInfo {
/// If the \arg Lo class is ComplexX87, then the \arg Hi class will
/// also be ComplexX87.
void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
- bool isNamedArg) const;
+ bool isNamedArg, bool IsRegCall = false) const;
llvm::Type *GetByteVectorType(QualType Ty) const;
llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
@@ -2327,13 +2338,16 @@ class X86_64ABIInfo : public SwiftABIInfo {
ABIArgInfo classifyArgumentType(QualType Ty, unsigned freeIntRegs,
unsigned &neededInt, unsigned &neededSSE,
- bool isNamedArg) const;
+ bool isNamedArg,
+ bool IsRegCall = false) const;
ABIArgInfo classifyRegCallStructType(QualType Ty, unsigned &NeededInt,
- unsigned &NeededSSE) const;
+ unsigned &NeededSSE,
+ unsigned &MaxVectorWidth) const;
ABIArgInfo classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
- unsigned &NeededSSE) const;
+ unsigned &NeededSSE,
+ unsigned &MaxVectorWidth) const;
bool IsIllegalVectorType(QualType Ty) const;
@@ -2355,7 +2369,7 @@ class X86_64ABIInfo : public SwiftABIInfo {
return false;
const llvm::Triple &Triple = getTarget().getTriple();
- if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::PS4)
+ if (Triple.isOSDarwin() || Triple.isPS())
return false;
if (Triple.isOSFreeBSD() && Triple.getOSMajorVersion() >= 10)
return false;
@@ -2828,8 +2842,8 @@ X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
return SSE;
}
-void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
- Class &Lo, Class &Hi, bool isNamedArg) const {
+void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase, Class &Lo,
+ Class &Hi, bool isNamedArg, bool IsRegCall) const {
// FIXME: This code can be simplified by introducing a simple value class for
// Class pairs with appropriate constructor methods for the various
// situations.
@@ -3027,7 +3041,9 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
// AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
// than eight eightbytes, ..., it has class MEMORY.
- if (Size > 512)
+ // regcall ABI doesn't have limitation to an object. The only limitation
+ // is the free registers, which will be checked in computeInfo.
+ if (!IsRegCall && Size > 512)
return;
// AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
@@ -3120,7 +3136,7 @@ void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
unsigned idx = 0;
bool UseClang11Compat = getContext().getLangOpts().getClangABICompat() <=
LangOptions::ClangABI::Ver11 ||
- getContext().getTargetInfo().getTriple().isPS4();
+ getContext().getTargetInfo().getTriple().isPS();
bool IsUnion = RT->isUnionType() && !UseClang11Compat;
for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
@@ -3734,15 +3750,14 @@ classifyReturnType(QualType RetTy) const {
return ABIArgInfo::getDirect(ResType);
}
-ABIArgInfo X86_64ABIInfo::classifyArgumentType(
- QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE,
- bool isNamedArg)
- const
-{
+ABIArgInfo
+X86_64ABIInfo::classifyArgumentType(QualType Ty, unsigned freeIntRegs,
+ unsigned &neededInt, unsigned &neededSSE,
+ bool isNamedArg, bool IsRegCall) const {
Ty = useFirstFieldIfTransparentUnion(Ty);
X86_64ABIInfo::Class Lo, Hi;
- classify(Ty, 0, Lo, Hi, isNamedArg);
+ classify(Ty, 0, Lo, Hi, isNamedArg, IsRegCall);
// Check some invariants.
// FIXME: Enforce these by construction.
@@ -3865,7 +3880,8 @@ ABIArgInfo X86_64ABIInfo::classifyArgumentType(
ABIArgInfo
X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
- unsigned &NeededSSE) const {
+ unsigned &NeededSSE,
+ unsigned &MaxVectorWidth) const {
auto RT = Ty->getAs<RecordType>();
assert(RT && "classifyRegCallStructType only valid with struct types");
@@ -3880,7 +3896,8 @@ X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
}
for (const auto &I : CXXRD->bases())
- if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE)
+ if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE,
+ MaxVectorWidth)
.isIndirect()) {
NeededInt = NeededSSE = 0;
return getIndirectReturnResult(Ty);
@@ -3889,20 +3906,27 @@ X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
// Sum up members
for (const auto *FD : RT->getDecl()->fields()) {
- if (FD->getType()->isRecordType() && !FD->getType()->isUnionType()) {
- if (classifyRegCallStructTypeImpl(FD->getType(), NeededInt, NeededSSE)
+ QualType MTy = FD->getType();
+ if (MTy->isRecordType() && !MTy->isUnionType()) {
+ if (classifyRegCallStructTypeImpl(MTy, NeededInt, NeededSSE,
+ MaxVectorWidth)
.isIndirect()) {
NeededInt = NeededSSE = 0;
return getIndirectReturnResult(Ty);
}
} else {
unsigned LocalNeededInt, LocalNeededSSE;
- if (classifyArgumentType(FD->getType(), UINT_MAX, LocalNeededInt,
- LocalNeededSSE, true)
+ if (classifyArgumentType(MTy, UINT_MAX, LocalNeededInt, LocalNeededSSE,
+ true, true)
.isIndirect()) {
NeededInt = NeededSSE = 0;
return getIndirectReturnResult(Ty);
}
+ if (const auto *AT = getContext().getAsConstantArrayType(MTy))
+ MTy = AT->getElementType();
+ if (const auto *VT = MTy->getAs<VectorType>())
+ if (getContext().getTypeSize(VT) > MaxVectorWidth)
+ MaxVectorWidth = getContext().getTypeSize(VT);
NeededInt += LocalNeededInt;
NeededSSE += LocalNeededSSE;
}
@@ -3911,14 +3935,17 @@ X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
return ABIArgInfo::getDirect();
}
-ABIArgInfo X86_64ABIInfo::classifyRegCallStructType(QualType Ty,
- unsigned &NeededInt,
- unsigned &NeededSSE) const {
+ABIArgInfo
+X86_64ABIInfo::classifyRegCallStructType(QualType Ty, unsigned &NeededInt,
+ unsigned &NeededSSE,
+ unsigned &MaxVectorWidth) const {
NeededInt = 0;
NeededSSE = 0;
+ MaxVectorWidth = 0;
- return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE);
+ return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE,
+ MaxVectorWidth);
}
void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
@@ -3938,13 +3965,13 @@ void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
// Keep track of the number of assigned registers.
unsigned FreeIntRegs = IsRegCall ? 11 : 6;
unsigned FreeSSERegs = IsRegCall ? 16 : 8;
- unsigned NeededInt, NeededSSE;
+ unsigned NeededInt = 0, NeededSSE = 0, MaxVectorWidth = 0;
if (!::classifyReturnType(getCXXABI(), FI, *this)) {
if (IsRegCall && FI.getReturnType()->getTypePtr()->isRecordType() &&
!FI.getReturnType()->getTypePtr()->isUnionType()) {
- FI.getReturnInfo() =
- classifyRegCallStructType(FI.getReturnType(), NeededInt, NeededSSE);
+ FI.getReturnInfo() = classifyRegCallStructType(
+ FI.getReturnType(), NeededInt, NeededSSE, MaxVectorWidth);
if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
FreeIntRegs -= NeededInt;
FreeSSERegs -= NeededSSE;
@@ -3967,6 +3994,8 @@ void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
// integer register.
if (FI.getReturnInfo().isIndirect())
--FreeIntRegs;
+ else if (NeededSSE && MaxVectorWidth > 0)
+ FI.setMaxVectorWidth(MaxVectorWidth);
// The chain argument effectively gives us another free register.
if (FI.isChainCall())
@@ -3981,7 +4010,8 @@ void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
bool IsNamedArg = ArgNo < NumRequiredArgs;
if (IsRegCall && it->type->isStructureOrClassType())
- it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE);
+ it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE,
+ MaxVectorWidth);
else
it->info = classifyArgumentType(it->type, FreeIntRegs, NeededInt,
NeededSSE, IsNamedArg);
@@ -3993,6 +4023,8 @@ void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
FreeIntRegs -= NeededInt;
FreeSSERegs -= NeededSSE;
+ if (MaxVectorWidth > FI.getMaxVectorWidth())
+ FI.setMaxVectorWidth(MaxVectorWidth);
} else {
it->info = getIndirectResult(it->type, FreeIntRegs);
}
@@ -4828,7 +4860,7 @@ Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
- llvm::Type *DirectTy = CGF.ConvertType(Ty);
+ llvm::Type *DirectTy = CGF.ConvertType(Ty), *ElementTy = DirectTy;
if (isIndirect) DirectTy = DirectTy->getPointerTo(0);
// Case 1: consume registers.
@@ -4837,7 +4869,7 @@ Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
CGF.EmitBlock(UsingRegs);
Address RegSaveAreaPtr = Builder.CreateStructGEP(VAList, 4);
- RegAddr = Address(Builder.CreateLoad(RegSaveAreaPtr),
+ RegAddr = Address(Builder.CreateLoad(RegSaveAreaPtr), CGF.Int8Ty,
CharUnits::fromQuantity(8));
assert(RegAddr.getElementType() == CGF.Int8Ty);
@@ -4851,10 +4883,10 @@ Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
// registers we've used by the number of
CharUnits RegSize = CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8);
llvm::Value *RegOffset =
- Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity()));
- RegAddr = Address(Builder.CreateInBoundsGEP(CGF.Int8Ty,
- RegAddr.getPointer(), RegOffset),
- RegAddr.getAlignment().alignmentOfArrayElement(RegSize));
+ Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity()));
+ RegAddr = Address(
+ Builder.CreateInBoundsGEP(CGF.Int8Ty, RegAddr.getPointer(), RegOffset),
+ CGF.Int8Ty, RegAddr.getAlignment().alignmentOfArrayElement(RegSize));
RegAddr = Builder.CreateElementBitCast(RegAddr, DirectTy);
// Increase the used-register count.
@@ -4885,14 +4917,15 @@ Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
}
Address OverflowAreaAddr = Builder.CreateStructGEP(VAList, 3);
- Address OverflowArea(Builder.CreateLoad(OverflowAreaAddr, "argp.cur"),
- OverflowAreaAlign);
+ Address OverflowArea =
+ Address(Builder.CreateLoad(OverflowAreaAddr, "argp.cur"), CGF.Int8Ty,
+ OverflowAreaAlign);
// Round up address of argument to alignment
CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
if (Align > OverflowAreaAlign) {
llvm::Value *Ptr = OverflowArea.getPointer();
OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align),
- Align);
+ OverflowArea.getElementType(), Align);
}
MemAddr = Builder.CreateElementBitCast(OverflowArea, DirectTy);
@@ -4911,7 +4944,7 @@ Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
// Load the pointer if the argument was passed indirectly.
if (isIndirect) {
- Result = Address(Builder.CreateLoad(Result, "aggr"),
+ Result = Address(Builder.CreateLoad(Result, "aggr"), ElementTy,
getContext().getTypeAlignInChars(Ty));
}
@@ -5185,8 +5218,7 @@ bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
if (isEmptyRecord(getContext(), FT, true))
continue;
- // For compatibility with GCC, ignore empty bitfields in C++ mode.
- if (getContext().getLangOpts().CPlusPlus &&
+ if (isZeroLengthBitfieldPermittedInHomogeneousAggregate() &&
FD->isZeroLengthBitField(getContext()))
continue;
@@ -5483,6 +5515,7 @@ private:
bool isHomogeneousAggregateBaseType(QualType Ty) const override;
bool isHomogeneousAggregateSmallEnough(const Type *Ty,
uint64_t Members) const override;
+ bool isZeroLengthBitfieldPermittedInHomogeneousAggregate() const override;
bool isIllegalVectorType(QualType Ty) const;
@@ -5942,6 +5975,16 @@ bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
return Members <= 4;
}
+bool AArch64ABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate()
+ const {
+ // AAPCS64 says that the rule for whether something is a homogeneous
+ // aggregate is applied to the output of the data layout decision. So
+ // anything that doesn't affect the data layout also does not affect
+ // homogeneity. In particular, zero-length bitfields don't stop a struct
+ // being homogeneous.
+ return true;
+}
+
Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
CodeGenFunction &CGF) const {
ABIArgInfo AI = classifyArgumentType(Ty, /*IsVariadic=*/true,
@@ -6060,9 +6103,9 @@ Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p");
reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
Address BaseAddr(CGF.Builder.CreateInBoundsGEP(CGF.Int8Ty, reg_top, reg_offs),
- CharUnits::fromQuantity(IsFPR ? 16 : 8));
+ CGF.Int8Ty, CharUnits::fromQuantity(IsFPR ? 16 : 8));
Address RegAddr = Address::invalid();
- llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty);
+ llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty), *ElementTy = MemTy;
if (IsIndirect) {
// If it's been passed indirectly (actually a struct), whatever we find from
@@ -6145,8 +6188,8 @@ Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy);
}
- Address OnStackAddr(OnStackPtr,
- std::max(CharUnits::fromQuantity(8), TyAlign));
+ Address OnStackAddr = Address(OnStackPtr, CGF.Int8Ty,
+ std::max(CharUnits::fromQuantity(8), TyAlign));
// All stack slots are multiples of 8 bytes.
CharUnits StackSlotSize = CharUnits::fromQuantity(8);
@@ -6178,11 +6221,11 @@ Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
//=======================================
CGF.EmitBlock(ContBlock);
- Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock,
- OnStackAddr, OnStackBlock, "vaargs.addr");
+ Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, OnStackAddr,
+ OnStackBlock, "vaargs.addr");
if (IsIndirect)
- return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"),
+ return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"), ElementTy,
TyAlign);
return ResAddr;
@@ -6201,7 +6244,8 @@ Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty,
// Empty records are ignored for parameter passing purposes.
if (isEmptyRecord(getContext(), Ty, true)) {
- Address Addr(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize);
+ Address Addr = Address(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"),
+ getVAListElementType(CGF), SlotSize);
Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
return Addr;
}
@@ -6310,6 +6354,7 @@ private:
bool isHomogeneousAggregateBaseType(QualType Ty) const override;
bool isHomogeneousAggregateSmallEnough(const Type *Ty,
uint64_t Members) const override;
+ bool isZeroLengthBitfieldPermittedInHomogeneousAggregate() const override;
bool isEffectivelyAAPCS_VFP(unsigned callConvention, bool acceptHalf) const;
@@ -6973,6 +7018,15 @@ bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
return Members <= 4;
}
+bool ARMABIInfo::isZeroLengthBitfieldPermittedInHomogeneousAggregate() const {
+ // AAPCS32 says that the rule for whether something is a homogeneous
+ // aggregate is applied to the output of the data layout decision. So
+ // anything that doesn't affect the data layout also does not affect
+ // homogeneity. In particular, zero-length bitfields don't stop a struct
+ // being homogeneous.
+ return true;
+}
+
bool ARMABIInfo::isEffectivelyAAPCS_VFP(unsigned callConvention,
bool acceptHalf) const {
// Give precedence to user-specified calling conventions.
@@ -6989,7 +7043,8 @@ Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
// Empty records are ignored for parameter passing purposes.
if (isEmptyRecord(getContext(), Ty, true)) {
- Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize);
+ Address Addr = Address(CGF.Builder.CreateLoad(VAListAddr),
+ getVAListElementType(CGF), SlotSize);
Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
return Addr;
}
@@ -7484,12 +7539,9 @@ QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const {
// Check the fields.
for (const auto *FD : RD->fields()) {
- // For compatibility with GCC, ignore empty bitfields in C++ mode.
// Unlike isSingleElementStruct(), empty structure and array fields
// do count. So do anonymous bitfields that aren't zero-sized.
- if (getContext().getLangOpts().CPlusPlus &&
- FD->isZeroLengthBitField(getContext()))
- continue;
+
// Like isSingleElementStruct(), ignore C++20 empty data members.
if (FD->hasAttr<NoUniqueAddressAttr>() &&
isEmptyRecord(getContext(), FD->getType(), true))
@@ -7563,16 +7615,15 @@ Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
Address OverflowArgAreaPtr =
CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr");
Address OverflowArgArea =
- Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
- TyInfo.Align);
+ Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
+ CGF.Int8Ty, TyInfo.Align);
Address MemAddr =
- CGF.Builder.CreateElementBitCast(OverflowArgArea, DirectTy, "mem_addr");
+ CGF.Builder.CreateElementBitCast(OverflowArgArea, DirectTy, "mem_addr");
// Update overflow_arg_area_ptr pointer
- llvm::Value *NewOverflowArgArea =
- CGF.Builder.CreateGEP(OverflowArgArea.getElementType(),
- OverflowArgArea.getPointer(), PaddedSizeV,
- "overflow_arg_area");
+ llvm::Value *NewOverflowArgArea = CGF.Builder.CreateGEP(
+ OverflowArgArea.getElementType(), OverflowArgArea.getPointer(),
+ PaddedSizeV, "overflow_arg_area");
CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
return MemAddr;
@@ -7620,12 +7671,12 @@ Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
Address RegSaveAreaPtr =
CGF.Builder.CreateStructGEP(VAListAddr, 3, "reg_save_area_ptr");
llvm::Value *RegSaveArea =
- CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area");
- Address RawRegAddr(CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, RegOffset,
- "raw_reg_addr"),
- PaddedSize);
+ CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area");
+ Address RawRegAddr(
+ CGF.Builder.CreateGEP(CGF.Int8Ty, RegSaveArea, RegOffset, "raw_reg_addr"),
+ CGF.Int8Ty, PaddedSize);
Address RegAddr =
- CGF.Builder.CreateElementBitCast(RawRegAddr, DirectTy, "reg_addr");
+ CGF.Builder.CreateElementBitCast(RawRegAddr, DirectTy, "reg_addr");
// Update the register count
llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
@@ -7641,10 +7692,10 @@ Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
Address OverflowArgAreaPtr =
CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr");
Address OverflowArgArea =
- Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
- PaddedSize);
+ Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
+ CGF.Int8Ty, PaddedSize);
Address RawMemAddr =
- CGF.Builder.CreateConstByteGEP(OverflowArgArea, Padding, "raw_mem_addr");
+ CGF.Builder.CreateConstByteGEP(OverflowArgArea, Padding, "raw_mem_addr");
Address MemAddr =
CGF.Builder.CreateElementBitCast(RawMemAddr, DirectTy, "mem_addr");
@@ -7658,11 +7709,11 @@ Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
// Return the appropriate result.
CGF.EmitBlock(ContBlock);
- Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock,
- MemAddr, InMemBlock, "va_arg.addr");
+ Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock,
+ "va_arg.addr");
if (IsIndirect)
- ResAddr = Address(CGF.Builder.CreateLoad(ResAddr, "indirect_arg"),
+ ResAddr = Address(CGF.Builder.CreateLoad(ResAddr, "indirect_arg"), ArgTy,
TyInfo.Align);
return ResAddr;
@@ -8661,9 +8712,10 @@ Address HexagonABIInfo::EmitVAArgFromMemory(CodeGenFunction &CGF,
// Get the type of the argument from memory and bitcast
// overflow area pointer to the argument type.
llvm::Type *PTy = CGF.ConvertTypeForMem(Ty);
- Address AddrTyped = CGF.Builder.CreateBitCast(
- Address(__overflow_area_pointer, CharUnits::fromQuantity(Align)),
- llvm::PointerType::getUnqual(PTy));
+ Address AddrTyped = CGF.Builder.CreateElementBitCast(
+ Address(__overflow_area_pointer, CGF.Int8Ty,
+ CharUnits::fromQuantity(Align)),
+ PTy);
// Round up to the minimum stack alignment for varargs which is 4 bytes.
uint64_t Offset = llvm::alignTo(CGF.getContext().getTypeSize(Ty) / 8, 4);
@@ -8682,9 +8734,8 @@ Address HexagonABIInfo::EmitVAArgForHexagon(CodeGenFunction &CGF,
QualType Ty) const {
// FIXME: Need to handle alignment
llvm::Type *BP = CGF.Int8PtrTy;
- llvm::Type *BPP = CGF.Int8PtrPtrTy;
CGBuilderTy &Builder = CGF.Builder;
- Address VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
+ Address VAListAddrAsBPP = Builder.CreateElementBitCast(VAListAddr, BP, "ap");
llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
// Handle address alignment for type alignment > 32 bits
uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8;
@@ -8695,9 +8746,9 @@ Address HexagonABIInfo::EmitVAArgForHexagon(CodeGenFunction &CGF,
AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1)));
Addr = Builder.CreateIntToPtr(AddrAsInt, BP);
}
- llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
- Address AddrTyped = Builder.CreateBitCast(
- Address(Addr, CharUnits::fromQuantity(TyAlign)), PTy);
+ Address AddrTyped = Builder.CreateElementBitCast(
+ Address(Addr, CGF.Int8Ty, CharUnits::fromQuantity(TyAlign)),
+ CGF.ConvertType(Ty));
uint64_t Offset = llvm::alignTo(CGF.getContext().getTypeSize(Ty) / 8, 4);
llvm::Value *NextAddr = Builder.CreateGEP(
@@ -8850,12 +8901,13 @@ Address HexagonABIInfo::EmitVAArgForHexagonLinux(CodeGenFunction &CGF,
// Implement the ContBlock
CGF.EmitBlock(ContBlock);
- llvm::Type *MemPTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
+ llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty);
+ llvm::Type *MemPTy = llvm::PointerType::getUnqual(MemTy);
llvm::PHINode *ArgAddr = CGF.Builder.CreatePHI(MemPTy, 2, "vaarg.addr");
ArgAddr->addIncoming(__saved_reg_area_p, InRegBlock);
ArgAddr->addIncoming(__overflow_area_p, OnStackBlock);
- return Address(ArgAddr, CharUnits::fromQuantity(ArgAlign));
+ return Address(ArgAddr, MemTy, CharUnits::fromQuantity(ArgAlign));
}
Address HexagonABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
@@ -9275,7 +9327,7 @@ public:
llvm::Function *
createEnqueuedBlockKernel(CodeGenFunction &CGF,
llvm::Function *BlockInvokeFunc,
- llvm::Value *BlockLiteral) const override;
+ llvm::Type *BlockTy) const override;
bool shouldEmitStaticExternCAliases() const override;
void setCUDAKernelCallingConvention(const FunctionType *&FT) const override;
};
@@ -9443,7 +9495,7 @@ AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
if (CGM.isTypeConstant(D->getType(), false) &&
D->hasConstantInitialization()) {
if (auto ConstAS = CGM.getTarget().getConstantAddressSpace())
- return ConstAS.getValue();
+ return *ConstAS;
}
return DefaultGlobalAS;
}
@@ -9768,7 +9820,8 @@ Address SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
CharUnits SlotSize = CharUnits::fromQuantity(8);
CGBuilderTy &Builder = CGF.Builder;
- Address Addr(Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize);
+ Address Addr = Address(Builder.CreateLoad(VAListAddr, "ap.cur"),
+ getVAListElementType(CGF), SlotSize);
llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
auto TypeInfo = getContext().getTypeInfoInChars(Ty);
@@ -9799,19 +9852,19 @@ Address SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
case ABIArgInfo::IndirectAliased:
Stride = SlotSize;
ArgAddr = Builder.CreateElementBitCast(Addr, ArgPtrTy, "indirect");
- ArgAddr = Address(Builder.CreateLoad(ArgAddr, "indirect.arg"),
+ ArgAddr = Address(Builder.CreateLoad(ArgAddr, "indirect.arg"), ArgTy,
TypeInfo.Align);
break;
case ABIArgInfo::Ignore:
- return Address(llvm::UndefValue::get(ArgPtrTy), TypeInfo.Align);
+ return Address(llvm::UndefValue::get(ArgPtrTy), ArgTy, TypeInfo.Align);
}
// Update VAList.
Address NextPtr = Builder.CreateConstInBoundsByteGEP(Addr, Stride, "ap.next");
Builder.CreateStore(NextPtr.getPointer(), VAListAddr);
- return Builder.CreateBitCast(ArgAddr, ArgPtrTy, "arg.addr");
+ return Builder.CreateElementBitCast(ArgAddr, ArgTy, "arg.addr");
}
void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const {
@@ -10145,7 +10198,8 @@ Address XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
// Get the VAList.
CharUnits SlotSize = CharUnits::fromQuantity(4);
- Address AP(Builder.CreateLoad(VAListAddr), SlotSize);
+ Address AP = Address(Builder.CreateLoad(VAListAddr),
+ getVAListElementType(CGF), SlotSize);
// Handle the argument.
ABIArgInfo AI = classifyArgumentType(Ty);
@@ -10163,20 +10217,20 @@ Address XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
case ABIArgInfo::InAlloca:
llvm_unreachable("Unsupported ABI kind for va_arg");
case ABIArgInfo::Ignore:
- Val = Address(llvm::UndefValue::get(ArgPtrTy), TypeAlign);
+ Val = Address(llvm::UndefValue::get(ArgPtrTy), ArgTy, TypeAlign);
ArgSize = CharUnits::Zero();
break;
case ABIArgInfo::Extend:
case ABIArgInfo::Direct:
- Val = Builder.CreateBitCast(AP, ArgPtrTy);
+ Val = Builder.CreateElementBitCast(AP, ArgTy);
ArgSize = CharUnits::fromQuantity(
- getDataLayout().getTypeAllocSize(AI.getCoerceToType()));
+ getDataLayout().getTypeAllocSize(AI.getCoerceToType()));
ArgSize = ArgSize.alignTo(SlotSize);
break;
case ABIArgInfo::Indirect:
case ABIArgInfo::IndirectAliased:
Val = Builder.CreateElementBitCast(AP, ArgPtrTy);
- Val = Address(Builder.CreateLoad(Val), TypeAlign);
+ Val = Address(Builder.CreateLoad(Val), ArgTy, TypeAlign);
ArgSize = SlotSize;
break;
}
@@ -10380,10 +10434,10 @@ void CommonSPIRABIInfo::setCCs() {
}
ABIArgInfo SPIRVABIInfo::classifyKernelArgumentType(QualType Ty) const {
- if (getContext().getLangOpts().HIP) {
+ if (getContext().getLangOpts().CUDAIsDevice) {
// Coerce pointer arguments with default address space to CrossWorkGroup
- // pointers for HIPSPV. When the language mode is HIP, the SPIRTargetInfo
- // maps cuda_device to SPIR-V's CrossWorkGroup address space.
+ // pointers for HIPSPV/CUDASPV. When the language mode is HIP/CUDA, the
+ // SPIRTargetInfo maps cuda_device to SPIR-V's CrossWorkGroup address space.
llvm::Type *LTy = CGT.ConvertType(Ty);
auto DefaultAS = getContext().getTargetAddressSpace(LangAS::Default);
auto GlobalAS = getContext().getTargetAddressSpace(LangAS::cuda_device);
@@ -11190,7 +11244,8 @@ Address RISCVABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
// Empty records are ignored for parameter passing purposes.
if (isEmptyRecord(getContext(), Ty, true)) {
- Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize);
+ Address Addr = Address(CGF.Builder.CreateLoad(VAListAddr),
+ getVAListElementType(CGF), SlotSize);
Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
return Addr;
}
@@ -11296,6 +11351,165 @@ public:
} // end anonymous namespace
//===----------------------------------------------------------------------===//
+// CSKY ABI Implementation
+//===----------------------------------------------------------------------===//
+namespace {
+class CSKYABIInfo : public DefaultABIInfo {
+ static const int NumArgGPRs = 4;
+ static const int NumArgFPRs = 4;
+
+ static const unsigned XLen = 32;
+ unsigned FLen;
+
+public:
+ CSKYABIInfo(CodeGen::CodeGenTypes &CGT, unsigned FLen)
+ : DefaultABIInfo(CGT), FLen(FLen) {}
+
+ void computeInfo(CGFunctionInfo &FI) const override;
+ ABIArgInfo classifyArgumentType(QualType Ty, int &ArgGPRsLeft,
+ int &ArgFPRsLeft,
+ bool isReturnType = false) const;
+ ABIArgInfo classifyReturnType(QualType RetTy) const;
+
+ Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const override;
+};
+
+} // end anonymous namespace
+
+void CSKYABIInfo::computeInfo(CGFunctionInfo &FI) const {
+ QualType RetTy = FI.getReturnType();
+ if (!getCXXABI().classifyReturnType(FI))
+ FI.getReturnInfo() = classifyReturnType(RetTy);
+
+ bool IsRetIndirect = FI.getReturnInfo().getKind() == ABIArgInfo::Indirect;
+
+ // We must track the number of GPRs used in order to conform to the CSKY
+ // ABI, as integer scalars passed in registers should have signext/zeroext
+ // when promoted.
+ int ArgGPRsLeft = IsRetIndirect ? NumArgGPRs - 1 : NumArgGPRs;
+ int ArgFPRsLeft = FLen ? NumArgFPRs : 0;
+
+ for (auto &ArgInfo : FI.arguments()) {
+ ArgInfo.info = classifyArgumentType(ArgInfo.type, ArgGPRsLeft, ArgFPRsLeft);
+ }
+}
+
+Address CSKYABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
+ QualType Ty) const {
+ CharUnits SlotSize = CharUnits::fromQuantity(XLen / 8);
+
+ // Empty records are ignored for parameter passing purposes.
+ if (isEmptyRecord(getContext(), Ty, true)) {
+ Address Addr = Address(CGF.Builder.CreateLoad(VAListAddr),
+ getVAListElementType(CGF), SlotSize);
+ Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
+ return Addr;
+ }
+
+ auto TInfo = getContext().getTypeInfoInChars(Ty);
+
+ return emitVoidPtrVAArg(CGF, VAListAddr, Ty, false, TInfo, SlotSize,
+ /*AllowHigherAlign=*/true);
+}
+
+ABIArgInfo CSKYABIInfo::classifyArgumentType(QualType Ty, int &ArgGPRsLeft,
+ int &ArgFPRsLeft,
+ bool isReturnType) const {
+ assert(ArgGPRsLeft <= NumArgGPRs && "Arg GPR tracking underflow");
+ Ty = useFirstFieldIfTransparentUnion(Ty);
+
+ // Structures with either a non-trivial destructor or a non-trivial
+ // copy constructor are always passed indirectly.
+ if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
+ if (ArgGPRsLeft)
+ ArgGPRsLeft -= 1;
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
+ CGCXXABI::RAA_DirectInMemory);
+ }
+
+ // Ignore empty structs/unions.
+ if (isEmptyRecord(getContext(), Ty, true))
+ return ABIArgInfo::getIgnore();
+
+ if (!Ty->getAsUnionType())
+ if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
+ return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
+
+ uint64_t Size = getContext().getTypeSize(Ty);
+ // Pass floating point values via FPRs if possible.
+ if (Ty->isFloatingType() && !Ty->isComplexType() && FLen >= Size &&
+ ArgFPRsLeft) {
+ ArgFPRsLeft--;
+ return ABIArgInfo::getDirect();
+ }
+
+ // Complex types for the hard float ABI must be passed direct rather than
+ // using CoerceAndExpand.
+ if (Ty->isComplexType() && FLen && !isReturnType) {
+ QualType EltTy = Ty->castAs<ComplexType>()->getElementType();
+ if (getContext().getTypeSize(EltTy) <= FLen) {
+ ArgFPRsLeft -= 2;
+ return ABIArgInfo::getDirect();
+ }
+ }
+
+ if (!isAggregateTypeForABI(Ty)) {
+ // Treat an enum type as its underlying type.
+ if (const EnumType *EnumTy = Ty->getAs<EnumType>())
+ Ty = EnumTy->getDecl()->getIntegerType();
+
+ // All integral types are promoted to XLen width, unless passed on the
+ // stack.
+ if (Size < XLen && Ty->isIntegralOrEnumerationType())
+ return ABIArgInfo::getExtend(Ty);
+
+ if (const auto *EIT = Ty->getAs<BitIntType>()) {
+ if (EIT->getNumBits() < XLen)
+ return ABIArgInfo::getExtend(Ty);
+ }
+
+ return ABIArgInfo::getDirect();
+ }
+
+ // For argument type, the first 4*XLen parts of aggregate will be passed
+ // in registers, and the rest will be passed in stack.
+ // So we can coerce to integers directly and let backend handle it correctly.
+ // For return type, aggregate which <= 2*XLen will be returned in registers.
+ // Otherwise, aggregate will be returned indirectly.
+ if (!isReturnType || (isReturnType && Size <= 2 * XLen)) {
+ if (Size <= XLen) {
+ return ABIArgInfo::getDirect(
+ llvm::IntegerType::get(getVMContext(), XLen));
+ } else {
+ return ABIArgInfo::getDirect(llvm::ArrayType::get(
+ llvm::IntegerType::get(getVMContext(), XLen), (Size + 31) / XLen));
+ }
+ }
+ return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
+}
+
+ABIArgInfo CSKYABIInfo::classifyReturnType(QualType RetTy) const {
+ if (RetTy->isVoidType())
+ return ABIArgInfo::getIgnore();
+
+ int ArgGPRsLeft = 2;
+ int ArgFPRsLeft = FLen ? 1 : 0;
+
+ // The rules for return and argument types are the same, so defer to
+ // classifyArgumentType.
+ return classifyArgumentType(RetTy, ArgGPRsLeft, ArgFPRsLeft, true);
+}
+
+namespace {
+class CSKYTargetCodeGenInfo : public TargetCodeGenInfo {
+public:
+ CSKYTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, unsigned FLen)
+ : TargetCodeGenInfo(std::make_unique<CSKYABIInfo>(CGT, FLen)) {}
+};
+} // end anonymous namespace
+
+//===----------------------------------------------------------------------===//
// Driver code
//===----------------------------------------------------------------------===//
@@ -11515,6 +11729,14 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
return SetCGInfo(new SPIRVTargetCodeGenInfo(Types));
case llvm::Triple::ve:
return SetCGInfo(new VETargetCodeGenInfo(Types));
+ case llvm::Triple::csky: {
+ bool IsSoftFloat = !getTarget().hasFeature("hard-float-abi");
+ bool hasFP64 = getTarget().hasFeature("fpuv2_df") ||
+ getTarget().hasFeature("fpuv3_df");
+ return SetCGInfo(new CSKYTargetCodeGenInfo(Types, IsSoftFloat ? 0
+ : hasFP64 ? 64
+ : 32));
+ }
}
}
@@ -11526,23 +11748,19 @@ const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
llvm::Function *
TargetCodeGenInfo::createEnqueuedBlockKernel(CodeGenFunction &CGF,
llvm::Function *Invoke,
- llvm::Value *BlockLiteral) const {
+ llvm::Type *BlockTy) const {
auto *InvokeFT = Invoke->getFunctionType();
- llvm::SmallVector<llvm::Type *, 2> ArgTys;
- for (auto &P : InvokeFT->params())
- ArgTys.push_back(P);
auto &C = CGF.getLLVMContext();
std::string Name = Invoke->getName().str() + "_kernel";
- auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys, false);
+ auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C),
+ InvokeFT->params(), false);
auto *F = llvm::Function::Create(FT, llvm::GlobalValue::ExternalLinkage, Name,
&CGF.CGM.getModule());
auto IP = CGF.Builder.saveIP();
auto *BB = llvm::BasicBlock::Create(C, "entry", F);
auto &Builder = CGF.Builder;
Builder.SetInsertPoint(BB);
- llvm::SmallVector<llvm::Value *, 2> Args;
- for (auto &A : F->args())
- Args.push_back(&A);
+ llvm::SmallVector<llvm::Value *, 2> Args(llvm::make_pointer_range(F->args()));
llvm::CallInst *call = Builder.CreateCall(Invoke, Args);
call->setCallingConv(Invoke->getCallingConv());
Builder.CreateRetVoid();
@@ -11560,11 +11778,10 @@ TargetCodeGenInfo::createEnqueuedBlockKernel(CodeGenFunction &CGF,
/// has "enqueued-block" function attribute and kernel argument metadata.
llvm::Function *AMDGPUTargetCodeGenInfo::createEnqueuedBlockKernel(
CodeGenFunction &CGF, llvm::Function *Invoke,
- llvm::Value *BlockLiteral) const {
+ llvm::Type *BlockTy) const {
auto &Builder = CGF.Builder;
auto &C = CGF.getLLVMContext();
- auto *BlockTy = BlockLiteral->getType()->getPointerElementType();
auto *InvokeFT = Invoke->getFunctionType();
llvm::SmallVector<llvm::Type *, 2> ArgTys;
llvm::SmallVector<llvm::Metadata *, 8> AddressQuals;
diff --git a/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.h b/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.h
index dfdb2f5f55bb..bdd64977b475 100644
--- a/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.h
+++ b/contrib/llvm-project/clang/lib/CodeGen/TargetInfo.h
@@ -329,7 +329,7 @@ public:
virtual llvm::Function *
createEnqueuedBlockKernel(CodeGenFunction &CGF,
llvm::Function *BlockInvokeFunc,
- llvm::Value *BlockLiteral) const;
+ llvm::Type *BlockTy) const;
/// \return true if the target supports alias from the unmangled name to the
/// mangled name of functions declared within an extern "C" region and marked
diff --git a/contrib/llvm-project/clang/lib/CrossTU/CrossTranslationUnit.cpp b/contrib/llvm-project/clang/lib/CrossTU/CrossTranslationUnit.cpp
index 0aecad491ecc..8210e8d7c21b 100644
--- a/contrib/llvm-project/clang/lib/CrossTU/CrossTranslationUnit.cpp
+++ b/contrib/llvm-project/clang/lib/CrossTU/CrossTranslationUnit.cpp
@@ -149,6 +149,35 @@ std::error_code IndexError::convertToErrorCode() const {
return std::error_code(static_cast<int>(Code), *Category);
}
+/// Parse one line of the input CTU index file.
+///
+/// @param[in] LineRef The input CTU index item in format
+/// "<USR-Length>:<USR> <File-Path>".
+/// @param[out] LookupName The lookup name in format "<USR-Length>:<USR>".
+/// @param[out] FilePath The file path "<File-Path>".
+static bool parseCrossTUIndexItem(StringRef LineRef, StringRef &LookupName,
+ StringRef &FilePath) {
+ // `LineRef` is "<USR-Length>:<USR> <File-Path>" now.
+
+ size_t USRLength = 0;
+ if (LineRef.consumeInteger(10, USRLength))
+ return false;
+ assert(USRLength && "USRLength should be greater than zero.");
+
+ if (!LineRef.consume_front(":"))
+ return false;
+
+ // `LineRef` is now just "<USR> <File-Path>".
+
+ // Check LookupName length out of bound and incorrect delimiter.
+ if (USRLength >= LineRef.size() || ' ' != LineRef[USRLength])
+ return false;
+
+ LookupName = LineRef.substr(0, USRLength);
+ FilePath = LineRef.substr(USRLength + 1);
+ return true;
+}
+
llvm::Expected<llvm::StringMap<std::string>>
parseCrossTUIndex(StringRef IndexPath) {
std::ifstream ExternalMapFile{std::string(IndexPath)};
@@ -160,24 +189,23 @@ parseCrossTUIndex(StringRef IndexPath) {
std::string Line;
unsigned LineNo = 1;
while (std::getline(ExternalMapFile, Line)) {
- StringRef LineRef{Line};
- const size_t Delimiter = LineRef.find(' ');
- if (Delimiter > 0 && Delimiter != std::string::npos) {
- StringRef LookupName = LineRef.substr(0, Delimiter);
-
- // Store paths with posix-style directory separator.
- SmallString<32> FilePath(LineRef.substr(Delimiter + 1));
- llvm::sys::path::native(FilePath, llvm::sys::path::Style::posix);
-
- bool InsertionOccured;
- std::tie(std::ignore, InsertionOccured) =
- Result.try_emplace(LookupName, FilePath.begin(), FilePath.end());
- if (!InsertionOccured)
- return llvm::make_error<IndexError>(
- index_error_code::multiple_definitions, IndexPath.str(), LineNo);
- } else
+ // Split lookup name and file path
+ StringRef LookupName, FilePathInIndex;
+ if (!parseCrossTUIndexItem(Line, LookupName, FilePathInIndex))
return llvm::make_error<IndexError>(
index_error_code::invalid_index_format, IndexPath.str(), LineNo);
+
+ // Store paths with posix-style directory separator.
+ SmallString<32> FilePath(FilePathInIndex);
+ llvm::sys::path::native(FilePath, llvm::sys::path::Style::posix);
+
+ bool InsertionOccured;
+ std::tie(std::ignore, InsertionOccured) =
+ Result.try_emplace(LookupName, FilePath.begin(), FilePath.end());
+ if (!InsertionOccured)
+ return llvm::make_error<IndexError>(
+ index_error_code::multiple_definitions, IndexPath.str(), LineNo);
+
++LineNo;
}
return Result;
@@ -187,18 +215,14 @@ std::string
createCrossTUIndexString(const llvm::StringMap<std::string> &Index) {
std::ostringstream Result;
for (const auto &E : Index)
- Result << E.getKey().str() << " " << E.getValue() << '\n';
+ Result << E.getKey().size() << ':' << E.getKey().str() << ' '
+ << E.getValue() << '\n';
return Result.str();
}
-bool containsConst(const VarDecl *VD, const ASTContext &ACtx) {
+bool shouldImport(const VarDecl *VD, const ASTContext &ACtx) {
CanQualType CT = ACtx.getCanonicalType(VD->getType());
- if (!CT.isConstQualified()) {
- const RecordType *RTy = CT->getAs<RecordType>();
- if (!RTy || !RTy->hasConstFields())
- return false;
- }
- return true;
+ return CT.isConstQualified() && VD->getType().isTrivialType(ACtx);
}
static bool hasBodyOrInit(const FunctionDecl *D, const FunctionDecl *&DefD) {
@@ -428,7 +452,7 @@ CrossTranslationUnitContext::ASTUnitStorage::getASTUnitForFunction(
ensureCTUIndexLoaded(CrossTUDir, IndexName))
return std::move(IndexLoadError);
- // Check if there is and entry in the index for the function.
+ // Check if there is an entry in the index for the function.
if (!NameFileMap.count(FunctionName)) {
++NumNotInOtherTU;
return llvm::make_error<IndexError>(index_error_code::missing_definition);
@@ -711,20 +735,19 @@ CrossTranslationUnitContext::importDefinitionImpl(const T *D, ASTUnit *Unit) {
auto ToDeclOrError = Importer.Import(D);
if (!ToDeclOrError) {
- handleAllErrors(ToDeclOrError.takeError(),
- [&](const ImportError &IE) {
- switch (IE.Error) {
- case ImportError::NameConflict:
- ++NumNameConflicts;
- break;
- case ImportError::UnsupportedConstruct:
- ++NumUnsupportedNodeFound;
- break;
- case ImportError::Unknown:
- llvm_unreachable("Unknown import error happened.");
- break;
- }
- });
+ handleAllErrors(ToDeclOrError.takeError(), [&](const ASTImportError &IE) {
+ switch (IE.Error) {
+ case ASTImportError::NameConflict:
+ ++NumNameConflicts;
+ break;
+ case ASTImportError::UnsupportedConstruct:
+ ++NumUnsupportedNodeFound;
+ break;
+ case ASTImportError::Unknown:
+ llvm_unreachable("Unknown import error happened.");
+ break;
+ }
+ });
return llvm::make_error<IndexError>(index_error_code::failed_import);
}
auto *ToDecl = cast<T>(*ToDeclOrError);
@@ -777,5 +800,18 @@ CrossTranslationUnitContext::getMacroExpansionContextForSourceLocation(
return llvm::None;
}
+bool CrossTranslationUnitContext::isImportedAsNew(const Decl *ToDecl) const {
+ if (!ImporterSharedSt)
+ return false;
+ return ImporterSharedSt->isNewDecl(const_cast<Decl *>(ToDecl));
+}
+
+bool CrossTranslationUnitContext::hasError(const Decl *ToDecl) const {
+ if (!ImporterSharedSt)
+ return false;
+ return static_cast<bool>(
+ ImporterSharedSt->getImportDeclErrorIfAny(const_cast<Decl *>(ToDecl)));
+}
+
} // namespace cross_tu
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/DirectoryWatcher/DirectoryScanner.cpp b/contrib/llvm-project/clang/lib/DirectoryWatcher/DirectoryScanner.cpp
index 1bc286236a0e..fc4b493e7b4c 100644
--- a/contrib/llvm-project/clang/lib/DirectoryWatcher/DirectoryScanner.cpp
+++ b/contrib/llvm-project/clang/lib/DirectoryWatcher/DirectoryScanner.cpp
@@ -31,7 +31,7 @@ std::vector<std::string> scanDirectory(StringRef Path) {
End = fs::directory_iterator();
!EC && It != End; It.increment(EC)) {
auto status = getFileStatus(It->path());
- if (!status.hasValue())
+ if (!status)
continue;
Result.emplace_back(sys::path::filename(It->path()));
}
diff --git a/contrib/llvm-project/clang/lib/Driver/Action.cpp b/contrib/llvm-project/clang/lib/Driver/Action.cpp
index eb08bfe9cde5..ead3a23da418 100644
--- a/contrib/llvm-project/clang/lib/Driver/Action.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/Action.cpp
@@ -26,6 +26,8 @@ const char *Action::getClassName(ActionClass AC) {
case PreprocessJobClass: return "preprocessor";
case PrecompileJobClass: return "precompiler";
case HeaderModulePrecompileJobClass: return "header-module-precompiler";
+ case ExtractAPIJobClass:
+ return "api-extractor";
case AnalyzeJobClass: return "analyzer";
case MigrateJobClass: return "migrator";
case CompileJobClass: return "compiler";
@@ -43,6 +45,8 @@ const char *Action::getClassName(ActionClass AC) {
return "clang-offload-unbundler";
case OffloadWrapperJobClass:
return "clang-offload-wrapper";
+ case OffloadPackagerJobClass:
+ return "clang-offload-packager";
case LinkerWrapperJobClass:
return "clang-linker-wrapper";
case StaticLibJobClass:
@@ -52,7 +56,8 @@ const char *Action::getClassName(ActionClass AC) {
llvm_unreachable("invalid class");
}
-void Action::propagateDeviceOffloadInfo(OffloadKind OKind, const char *OArch) {
+void Action::propagateDeviceOffloadInfo(OffloadKind OKind, const char *OArch,
+ const ToolChain *OToolChain) {
// Offload action set its own kinds on their dependences.
if (Kind == OffloadClass)
return;
@@ -65,9 +70,10 @@ void Action::propagateDeviceOffloadInfo(OffloadKind OKind, const char *OArch) {
assert(!ActiveOffloadKindMask && "Setting a device kind in a host action??");
OffloadingDeviceKind = OKind;
OffloadingArch = OArch;
+ OffloadingToolChain = OToolChain;
for (auto *A : Inputs)
- A->propagateDeviceOffloadInfo(OffloadingDeviceKind, OArch);
+ A->propagateDeviceOffloadInfo(OffloadingDeviceKind, OArch, OToolChain);
}
void Action::propagateHostOffloadInfo(unsigned OKinds, const char *OArch) {
@@ -89,7 +95,8 @@ void Action::propagateOffloadInfo(const Action *A) {
propagateHostOffloadInfo(HK, A->getOffloadingArch());
else
propagateDeviceOffloadInfo(A->getOffloadingDeviceKind(),
- A->getOffloadingArch());
+ A->getOffloadingArch(),
+ A->getOffloadingToolChain());
}
std::string Action::getOffloadingKindPrefix() const {
@@ -190,6 +197,7 @@ OffloadAction::OffloadAction(const DeviceDependences &DDeps, types::ID Ty)
DevToolChains(DDeps.getToolChains()) {
auto &OKinds = DDeps.getOffloadKinds();
auto &BArchs = DDeps.getBoundArchs();
+ auto &OTCs = DDeps.getToolChains();
// If all inputs agree on the same kind, use it also for this action.
if (llvm::all_of(OKinds, [&](OffloadKind K) { return K == OKinds.front(); }))
@@ -201,7 +209,7 @@ OffloadAction::OffloadAction(const DeviceDependences &DDeps, types::ID Ty)
// Propagate info to the dependencies.
for (unsigned i = 0, e = getInputs().size(); i != e; ++i)
- getInputs()[i]->propagateDeviceOffloadInfo(OKinds[i], BArchs[i]);
+ getInputs()[i]->propagateDeviceOffloadInfo(OKinds[i], BArchs[i], OTCs[i]);
}
OffloadAction::OffloadAction(const HostDependence &HDep,
@@ -220,7 +228,8 @@ OffloadAction::OffloadAction(const HostDependence &HDep,
if (auto *A = DDeps.getActions()[i]) {
getInputs().push_back(A);
A->propagateDeviceOffloadInfo(DDeps.getOffloadKinds()[i],
- DDeps.getBoundArchs()[i]);
+ DDeps.getBoundArchs()[i],
+ DDeps.getToolChains()[i]);
}
}
@@ -339,6 +348,11 @@ HeaderModulePrecompileJobAction::HeaderModulePrecompileJobAction(
: PrecompileJobAction(HeaderModulePrecompileJobClass, Input, OutputType),
ModuleName(ModuleName) {}
+void ExtractAPIJobAction::anchor() {}
+
+ExtractAPIJobAction::ExtractAPIJobAction(Action *Inputs, types::ID OutputType)
+ : JobAction(ExtractAPIJobClass, Inputs, OutputType) {}
+
void AnalyzeJobAction::anchor() {}
AnalyzeJobAction::AnalyzeJobAction(Action *Input, types::ID OutputType)
@@ -420,6 +434,12 @@ OffloadWrapperJobAction::OffloadWrapperJobAction(ActionList &Inputs,
types::ID Type)
: JobAction(OffloadWrapperJobClass, Inputs, Type) {}
+void OffloadPackagerJobAction::anchor() {}
+
+OffloadPackagerJobAction::OffloadPackagerJobAction(ActionList &Inputs,
+ types::ID Type)
+ : JobAction(OffloadPackagerJobClass, Inputs, Type) {}
+
void LinkerWrapperJobAction::anchor() {}
LinkerWrapperJobAction::LinkerWrapperJobAction(ActionList &Inputs,
diff --git a/contrib/llvm-project/clang/lib/Driver/Compilation.cpp b/contrib/llvm-project/clang/lib/Driver/Compilation.cpp
index 67d941c6c2ab..b4367a0af81e 100644
--- a/contrib/llvm-project/clang/lib/Driver/Compilation.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/Compilation.cpp
@@ -162,7 +162,8 @@ bool Compilation::CleanupFileMap(const ArgStringMap &Files,
}
int Compilation::ExecuteCommand(const Command &C,
- const Command *&FailingCommand) const {
+ const Command *&FailingCommand,
+ bool LogOnly) const {
if ((getDriver().CCPrintOptions ||
getArgs().hasArg(options::OPT_v)) && !getDriver().CCGenDiagnostics) {
raw_ostream *OS = &llvm::errs();
@@ -191,6 +192,9 @@ int Compilation::ExecuteCommand(const Command &C,
C.Print(*OS, "\n", /*Quote=*/getDriver().CCPrintOptions);
}
+ if (LogOnly)
+ return 0;
+
std::string Error;
bool ExecutionFailed;
int Res = C.Execute(Redirects, &Error, &ExecutionFailed);
@@ -237,7 +241,8 @@ static bool InputsOk(const Command &C,
}
void Compilation::ExecuteJobs(const JobList &Jobs,
- FailingCommandList &FailingCommands) const {
+ FailingCommandList &FailingCommands,
+ bool LogOnly) const {
// According to UNIX standard, driver need to continue compiling all the
// inputs on the command line even one of them failed.
// In all but CLMode, execute all the jobs unless the necessary inputs for the
@@ -246,7 +251,7 @@ void Compilation::ExecuteJobs(const JobList &Jobs,
if (!InputsOk(Job, FailingCommands))
continue;
const Command *FailingCommand = nullptr;
- if (int Res = ExecuteCommand(Job, FailingCommand)) {
+ if (int Res = ExecuteCommand(Job, FailingCommand, LogOnly)) {
FailingCommands.push_back(std::make_pair(Res, FailingCommand));
// Bail as soon as one command fails in cl driver mode.
if (TheDriver.IsCLMode())
diff --git a/contrib/llvm-project/clang/lib/Driver/Distro.cpp b/contrib/llvm-project/clang/lib/Driver/Distro.cpp
index 5ac38c34d112..1898667279cc 100644
--- a/contrib/llvm-project/clang/lib/Driver/Distro.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/Distro.cpp
@@ -91,6 +91,7 @@ static Distro::DistroType DetectLsbRelease(llvm::vfs::FileSystem &VFS) {
.Case("hirsute", Distro::UbuntuHirsute)
.Case("impish", Distro::UbuntuImpish)
.Case("jammy", Distro::UbuntuJammy)
+ .Case("kinetic", Distro::UbuntuKinetic)
.Default(Distro::UnknownDistro);
return Version;
}
@@ -153,6 +154,8 @@ static Distro::DistroType DetectDistro(llvm::vfs::FileSystem &VFS) {
return Distro::DebianBullseye;
case 12:
return Distro::DebianBookworm;
+ case 13:
+ return Distro::DebianTrixie;
default:
return Distro::UnknownDistro;
}
diff --git a/contrib/llvm-project/clang/lib/Driver/Driver.cpp b/contrib/llvm-project/clang/lib/Driver/Driver.cpp
index 3bfddeefc7b2..0da32dae2ef6 100644
--- a/contrib/llvm-project/clang/lib/Driver/Driver.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/Driver.cpp
@@ -13,6 +13,7 @@
#include "ToolChains/AVR.h"
#include "ToolChains/Ananas.h"
#include "ToolChains/BareMetal.h"
+#include "ToolChains/CSKYToolChain.h"
#include "ToolChains/Clang.h"
#include "ToolChains/CloudABI.h"
#include "ToolChains/Contiki.h"
@@ -25,6 +26,7 @@
#include "ToolChains/Gnu.h"
#include "ToolChains/HIPAMD.h"
#include "ToolChains/HIPSPV.h"
+#include "ToolChains/HLSL.h"
#include "ToolChains/Haiku.h"
#include "ToolChains/Hexagon.h"
#include "ToolChains/Hurd.h"
@@ -59,6 +61,7 @@
#include "clang/Driver/InputInfo.h"
#include "clang/Driver/Job.h"
#include "clang/Driver/Options.h"
+#include "clang/Driver/Phases.h"
#include "clang/Driver/SanitizerArgs.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
@@ -188,12 +191,14 @@ Driver::Driver(StringRef ClangExecutable, StringRef TargetTriple,
DiagnosticsEngine &Diags, std::string Title,
IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS)
: Diags(Diags), VFS(std::move(VFS)), Mode(GCCMode),
- SaveTemps(SaveTempsNone), BitcodeEmbed(EmbedNone), LTOMode(LTOK_None),
+ SaveTemps(SaveTempsNone), BitcodeEmbed(EmbedNone),
+ Offload(OffloadHostDevice), CXX20HeaderType(HeaderMode_None),
+ ModulesModeCXX20(false), LTOMode(LTOK_None),
ClangExecutable(ClangExecutable), SysRoot(DEFAULT_SYSROOT),
DriverTitle(Title), CCCPrintBindings(false), CCPrintOptions(false),
CCPrintHeaders(false), CCLogDiagnostics(false), CCGenDiagnostics(false),
CCPrintProcessStats(false), TargetTriple(TargetTriple), Saver(Alloc),
- CheckInputsExist(true), GenReproducer(false),
+ CheckInputsExist(true), ProbePrecompiled(true),
SuppressMissingInputWarning(false) {
// Provide a sane fallback if no VFS is specified.
if (!this->VFS)
@@ -230,6 +235,7 @@ void Driver::setDriverMode(StringRef Value) {
.Case("cpp", CPPMode)
.Case("cl", CLMode)
.Case("flang", FlangMode)
+ .Case("dxc", DXCMode)
.Default(None))
Mode = *M;
else
@@ -333,11 +339,15 @@ phases::ID Driver::getFinalPhase(const DerivedArgList &DAL,
CCGenDiagnostics) {
FinalPhase = phases::Preprocess;
- // --precompile only runs up to precompilation.
- } else if ((PhaseArg = DAL.getLastArg(options::OPT__precompile))) {
+ // --precompile only runs up to precompilation.
+ // Options that cause the output of C++20 compiled module interfaces or
+ // header units have the same effect.
+ } else if ((PhaseArg = DAL.getLastArg(options::OPT__precompile)) ||
+ (PhaseArg = DAL.getLastArg(options::OPT_extract_api)) ||
+ (PhaseArg = DAL.getLastArg(options::OPT_fmodule_header,
+ options::OPT_fmodule_header_EQ))) {
FinalPhase = phases::Precompile;
-
- // -{fsyntax-only,-analyze,emit-ast} only run up to the compiler.
+ // -{fsyntax-only,-analyze,emit-ast} only run up to the compiler.
} else if ((PhaseArg = DAL.getLastArg(options::OPT_fsyntax_only)) ||
(PhaseArg = DAL.getLastArg(options::OPT_print_supported_cpus)) ||
(PhaseArg = DAL.getLastArg(options::OPT_module_file_info)) ||
@@ -346,8 +356,7 @@ phases::ID Driver::getFinalPhase(const DerivedArgList &DAL,
(PhaseArg = DAL.getLastArg(options::OPT_rewrite_legacy_objc)) ||
(PhaseArg = DAL.getLastArg(options::OPT__migrate)) ||
(PhaseArg = DAL.getLastArg(options::OPT__analyze)) ||
- (PhaseArg = DAL.getLastArg(options::OPT_emit_ast)) ||
- (PhaseArg = DAL.getLastArg(options::OPT_extract_api))) {
+ (PhaseArg = DAL.getLastArg(options::OPT_emit_ast))) {
FinalPhase = phases::Compile;
// -S only runs up to the backend.
@@ -773,76 +782,117 @@ void Driver::CreateOffloadingDeviceToolChains(Compilation &C,
// OpenMP
//
// We need to generate an OpenMP toolchain if the user specified targets with
- // the -fopenmp-targets option.
- if (Arg *OpenMPTargets =
- C.getInputArgs().getLastArg(options::OPT_fopenmp_targets_EQ)) {
- if (OpenMPTargets->getNumValues()) {
- // We expect that -fopenmp-targets is always used in conjunction with the
- // option -fopenmp specifying a valid runtime with offloading support,
- // i.e. libomp or libiomp.
- bool HasValidOpenMPRuntime = C.getInputArgs().hasFlag(
- options::OPT_fopenmp, options::OPT_fopenmp_EQ,
- options::OPT_fno_openmp, false);
- if (HasValidOpenMPRuntime) {
- OpenMPRuntimeKind OpenMPKind = getOpenMPRuntime(C.getInputArgs());
- HasValidOpenMPRuntime =
- OpenMPKind == OMPRT_OMP || OpenMPKind == OMPRT_IOMP5;
+ // the -fopenmp-targets option or used --offload-arch with OpenMP enabled.
+ bool IsOpenMPOffloading =
+ C.getInputArgs().hasFlag(options::OPT_fopenmp, options::OPT_fopenmp_EQ,
+ options::OPT_fno_openmp, false) &&
+ (C.getInputArgs().hasArg(options::OPT_fopenmp_targets_EQ) ||
+ C.getInputArgs().hasArg(options::OPT_offload_arch_EQ));
+ if (IsOpenMPOffloading) {
+ // We expect that -fopenmp-targets is always used in conjunction with the
+ // option -fopenmp specifying a valid runtime with offloading support, i.e.
+ // libomp or libiomp.
+ OpenMPRuntimeKind RuntimeKind = getOpenMPRuntime(C.getInputArgs());
+ if (RuntimeKind != OMPRT_OMP && RuntimeKind != OMPRT_IOMP5) {
+ Diag(clang::diag::err_drv_expecting_fopenmp_with_fopenmp_targets);
+ return;
+ }
+
+ llvm::StringMap<llvm::DenseSet<StringRef>> DerivedArchs;
+ llvm::StringMap<StringRef> FoundNormalizedTriples;
+ llvm::SmallVector<StringRef, 4> OpenMPTriples;
+
+ // If the user specified -fopenmp-targets= we create a toolchain for each
+ // valid triple. Otherwise, if only --offload-arch= was specified we instead
+ // attempt to derive the appropriate toolchains from the arguments.
+ if (Arg *OpenMPTargets =
+ C.getInputArgs().getLastArg(options::OPT_fopenmp_targets_EQ)) {
+ if (OpenMPTargets && !OpenMPTargets->getNumValues()) {
+ Diag(clang::diag::warn_drv_empty_joined_argument)
+ << OpenMPTargets->getAsString(C.getInputArgs());
+ return;
+ }
+ llvm::copy(OpenMPTargets->getValues(), std::back_inserter(OpenMPTriples));
+ } else if (C.getInputArgs().hasArg(options::OPT_offload_arch_EQ) &&
+ !IsHIP && !IsCuda) {
+ const ToolChain *HostTC = C.getSingleOffloadToolChain<Action::OFK_Host>();
+ auto AMDTriple = getHIPOffloadTargetTriple(*this, C.getInputArgs());
+ auto NVPTXTriple = getNVIDIAOffloadTargetTriple(*this, C.getInputArgs(),
+ HostTC->getTriple());
+
+ // Attempt to deduce the offloading triple from the set of architectures.
+ // We can only correctly deduce NVPTX / AMDGPU triples currently.
+ llvm::DenseSet<StringRef> Archs =
+ getOffloadArchs(C, C.getArgs(), Action::OFK_OpenMP, nullptr);
+ for (StringRef Arch : Archs) {
+ if (NVPTXTriple && IsNVIDIAGpuArch(StringToCudaArch(
+ getProcessorFromTargetID(*NVPTXTriple, Arch)))) {
+ DerivedArchs[NVPTXTriple->getTriple()].insert(Arch);
+ } else if (AMDTriple &&
+ IsAMDGpuArch(StringToCudaArch(
+ getProcessorFromTargetID(*AMDTriple, Arch)))) {
+ DerivedArchs[AMDTriple->getTriple()].insert(Arch);
+ } else {
+ Diag(clang::diag::err_drv_failed_to_deduce_target_from_arch) << Arch;
+ return;
+ }
}
- if (HasValidOpenMPRuntime) {
- llvm::StringMap<const char *> FoundNormalizedTriples;
- for (const char *Val : OpenMPTargets->getValues()) {
- llvm::Triple TT(ToolChain::getOpenMPTriple(Val));
- std::string NormalizedName = TT.normalize();
-
- // Make sure we don't have a duplicate triple.
- auto Duplicate = FoundNormalizedTriples.find(NormalizedName);
- if (Duplicate != FoundNormalizedTriples.end()) {
- Diag(clang::diag::warn_drv_omp_offload_target_duplicate)
- << Val << Duplicate->second;
- continue;
- }
+ for (const auto &TripleAndArchs : DerivedArchs)
+ OpenMPTriples.push_back(TripleAndArchs.first());
+ }
- // Store the current triple so that we can check for duplicates in the
- // following iterations.
- FoundNormalizedTriples[NormalizedName] = Val;
-
- // If the specified target is invalid, emit a diagnostic.
- if (TT.getArch() == llvm::Triple::UnknownArch)
- Diag(clang::diag::err_drv_invalid_omp_target) << Val;
- else {
- const ToolChain *TC;
- // Device toolchains have to be selected differently. They pair host
- // and device in their implementation.
- if (TT.isNVPTX() || TT.isAMDGCN()) {
- const ToolChain *HostTC =
- C.getSingleOffloadToolChain<Action::OFK_Host>();
- assert(HostTC && "Host toolchain should be always defined.");
- auto &DeviceTC =
- ToolChains[TT.str() + "/" + HostTC->getTriple().normalize()];
- if (!DeviceTC) {
- if (TT.isNVPTX())
- DeviceTC = std::make_unique<toolchains::CudaToolChain>(
- *this, TT, *HostTC, C.getInputArgs(), Action::OFK_OpenMP);
- else if (TT.isAMDGCN())
- DeviceTC =
- std::make_unique<toolchains::AMDGPUOpenMPToolChain>(
- *this, TT, *HostTC, C.getInputArgs());
- else
- assert(DeviceTC && "Device toolchain not defined.");
- }
-
- TC = DeviceTC.get();
- } else
- TC = &getToolChain(C.getInputArgs(), TT);
- C.addOffloadDeviceToolChain(TC, Action::OFK_OpenMP);
+ for (StringRef Val : OpenMPTriples) {
+ llvm::Triple TT(ToolChain::getOpenMPTriple(Val));
+ std::string NormalizedName = TT.normalize();
+
+ // Make sure we don't have a duplicate triple.
+ auto Duplicate = FoundNormalizedTriples.find(NormalizedName);
+ if (Duplicate != FoundNormalizedTriples.end()) {
+ Diag(clang::diag::warn_drv_omp_offload_target_duplicate)
+ << Val << Duplicate->second;
+ continue;
+ }
+
+ // Store the current triple so that we can check for duplicates in the
+ // following iterations.
+ FoundNormalizedTriples[NormalizedName] = Val;
+
+ // If the specified target is invalid, emit a diagnostic.
+ if (TT.getArch() == llvm::Triple::UnknownArch)
+ Diag(clang::diag::err_drv_invalid_omp_target) << Val;
+ else {
+ const ToolChain *TC;
+ // Device toolchains have to be selected differently. They pair host
+ // and device in their implementation.
+ if (TT.isNVPTX() || TT.isAMDGCN()) {
+ const ToolChain *HostTC =
+ C.getSingleOffloadToolChain<Action::OFK_Host>();
+ assert(HostTC && "Host toolchain should be always defined.");
+ auto &DeviceTC =
+ ToolChains[TT.str() + "/" + HostTC->getTriple().normalize()];
+ if (!DeviceTC) {
+ if (TT.isNVPTX())
+ DeviceTC = std::make_unique<toolchains::CudaToolChain>(
+ *this, TT, *HostTC, C.getInputArgs(), Action::OFK_OpenMP);
+ else if (TT.isAMDGCN())
+ DeviceTC = std::make_unique<toolchains::AMDGPUOpenMPToolChain>(
+ *this, TT, *HostTC, C.getInputArgs());
+ else
+ assert(DeviceTC && "Device toolchain not defined.");
}
- }
- } else
- Diag(clang::diag::err_drv_expecting_fopenmp_with_fopenmp_targets);
- } else
- Diag(clang::diag::warn_drv_empty_joined_argument)
- << OpenMPTargets->getAsString(C.getInputArgs());
+
+ TC = DeviceTC.get();
+ } else
+ TC = &getToolChain(C.getInputArgs(), TT);
+ C.addOffloadDeviceToolChain(TC, Action::OFK_OpenMP);
+ if (DerivedArchs.find(TT.getTriple()) != DerivedArchs.end())
+ KnownArchs[TC] = DerivedArchs[TT.getTriple()];
+ }
+ }
+ } else if (C.getInputArgs().hasArg(options::OPT_fopenmp_targets_EQ)) {
+ Diag(clang::diag::err_drv_expecting_fopenmp_with_fopenmp_targets);
+ return;
}
//
@@ -924,7 +974,7 @@ bool Driver::loadConfigFile() {
if (llvm::sys::fs::make_absolute(CfgDir).value() != 0)
SystemConfigDir.clear();
else
- SystemConfigDir = std::string(CfgDir.begin(), CfgDir.end());
+ SystemConfigDir = static_cast<std::string>(CfgDir);
}
}
if (CLOptions->hasArg(options::OPT_config_user_dir_EQ)) {
@@ -935,7 +985,7 @@ bool Driver::loadConfigFile() {
if (llvm::sys::fs::make_absolute(CfgDir).value() != 0)
UserConfigDir.clear();
else
- UserConfigDir = std::string(CfgDir.begin(), CfgDir.end());
+ UserConfigDir = static_cast<std::string>(CfgDir);
}
}
}
@@ -1167,9 +1217,6 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
CCCPrintBindings = Args.hasArg(options::OPT_ccc_print_bindings);
if (const Arg *A = Args.getLastArg(options::OPT_ccc_gcc_name))
CCCGenericGCCName = A->getValue();
- GenReproducer = Args.hasFlag(options::OPT_gen_reproducer,
- options::OPT_fno_crash_diagnostics,
- !!::getenv("FORCE_CLANG_DIAGNOSTICS_CRASH"));
// Process -fproc-stat-report options.
if (const Arg *A = Args.getLastArg(options::OPT_fproc_stat_report_EQ)) {
@@ -1189,7 +1236,22 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
T.setEnvironment(llvm::Triple::MSVC);
T.setObjectFormat(llvm::Triple::COFF);
TargetTriple = T.str();
+ } else if (IsDXCMode()) {
+ // Build TargetTriple from target_profile option for clang-dxc.
+ if (const Arg *A = Args.getLastArg(options::OPT_target_profile)) {
+ StringRef TargetProfile = A->getValue();
+ if (auto Triple =
+ toolchains::HLSLToolChain::parseTargetProfile(TargetProfile))
+ TargetTriple = *Triple;
+ else
+ Diag(diag::err_drv_invalid_directx_shader_module) << TargetProfile;
+
+ A->claim();
+ } else {
+ Diag(diag::err_drv_dxc_missing_target_profile);
+ }
}
+
if (const Arg *A = Args.getLastArg(options::OPT_target))
TargetTriple = A->getValue();
if (const Arg *A = Args.getLastArg(options::OPT_ccc_install_dir))
@@ -1223,6 +1285,17 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
.Default(SaveTempsCwd);
}
+ if (const Arg *A = Args.getLastArg(options::OPT_offload_host_only,
+ options::OPT_offload_device_only,
+ options::OPT_offload_host_device)) {
+ if (A->getOption().matches(options::OPT_offload_host_only))
+ Offload = OffloadHost;
+ else if (A->getOption().matches(options::OPT_offload_device_only))
+ Offload = OffloadDevice;
+ else
+ Offload = OffloadHostDevice;
+ }
+
setLTOMode(Args);
// Process -fembed-bitcode= flags.
@@ -1241,6 +1314,41 @@ Compilation *Driver::BuildCompilation(ArrayRef<const char *> ArgList) {
BitcodeEmbed = static_cast<BitcodeEmbedMode>(Model);
}
+ // Remove existing compilation database so that each job can append to it.
+ if (Arg *A = Args.getLastArg(options::OPT_MJ))
+ llvm::sys::fs::remove(A->getValue());
+
+ // Setting up the jobs for some precompile cases depends on whether we are
+ // treating them as PCH, implicit modules or C++20 ones.
+ // TODO: inferring the mode like this seems fragile (it meets the objective
+ // of not requiring anything new for operation, however).
+ const Arg *Std = Args.getLastArg(options::OPT_std_EQ);
+ ModulesModeCXX20 =
+ !Args.hasArg(options::OPT_fmodules) && Std &&
+ (Std->containsValue("c++20") || Std->containsValue("c++2b") ||
+ Std->containsValue("c++2a") || Std->containsValue("c++latest"));
+
+ // Process -fmodule-header{=} flags.
+ if (Arg *A = Args.getLastArg(options::OPT_fmodule_header_EQ,
+ options::OPT_fmodule_header)) {
+ // These flags force C++20 handling of headers.
+ ModulesModeCXX20 = true;
+ if (A->getOption().matches(options::OPT_fmodule_header))
+ CXX20HeaderType = HeaderMode_Default;
+ else {
+ StringRef ArgName = A->getValue();
+ unsigned Kind = llvm::StringSwitch<unsigned>(ArgName)
+ .Case("user", HeaderMode_User)
+ .Case("system", HeaderMode_System)
+ .Default(~0U);
+ if (Kind == ~0U) {
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << ArgName;
+ } else
+ CXX20HeaderType = static_cast<ModuleHeaderMode>(Kind);
+ }
+ }
+
std::unique_ptr<llvm::opt::InputArgList> UArgs =
std::make_unique<InputArgList>(std::move(Args));
@@ -1597,6 +1705,19 @@ void Driver::setUpResponseFiles(Compilation &C, Command &Cmd) {
int Driver::ExecuteCompilation(
Compilation &C,
SmallVectorImpl<std::pair<int, const Command *>> &FailingCommands) {
+ if (C.getArgs().hasArg(options::OPT_fdriver_only)) {
+ if (C.getArgs().hasArg(options::OPT_v))
+ C.getJobs().Print(llvm::errs(), "\n", true);
+
+ C.ExecuteJobs(C.getJobs(), FailingCommands, /*LogOnly=*/true);
+
+ // If there were errors building the compilation, quit now.
+ if (!FailingCommands.empty() || Diags.hasErrorOccurred())
+ return 1;
+
+ return 0;
+ }
+
// Just print if -### was present.
if (C.getArgs().hasArg(options::OPT__HASH_HASH_HASH)) {
C.getJobs().Print(llvm::errs(), "\n", true);
@@ -1918,6 +2039,13 @@ bool Driver::HandleImmediateArgs(const Compilation &C) {
return false;
}
+ if (C.getArgs().hasArg(options::OPT_print_diagnostic_options)) {
+ std::vector<std::string> Flags = DiagnosticIDs::getDiagnosticFlags();
+ for (std::size_t I = 0; I != Flags.size(); I += 2)
+ llvm::outs() << " " << Flags[I] << "\n " << Flags[I + 1] << "\n\n";
+ return false;
+ }
+
// FIXME: The following handlers should use a callback mechanism, we don't
// know what the client would like to do.
if (Arg *A = C.getArgs().getLastArg(options::OPT_print_file_name_EQ)) {
@@ -2209,6 +2337,14 @@ bool Driver::DiagnoseInputExistence(const DerivedArgList &Args, StringRef Value,
if (Value == "-")
return true;
+ // If it's a header to be found in the system or user search path, then defer
+ // complaints about its absence until those searches can be done. When we
+ // are definitely processing headers for C++20 header units, extend this to
+ // allow the user to put "-fmodule-header -xc++-header vector" for example.
+ if (Ty == types::TY_CXXSHeader || Ty == types::TY_CXXUHeader ||
+ (ModulesModeCXX20 && Ty == types::TY_CXXHeader))
+ return true;
+
if (getVFS().exists(Value))
return true;
@@ -2272,6 +2408,21 @@ bool Driver::DiagnoseInputExistence(const DerivedArgList &Args, StringRef Value,
return false;
}
+// Get the C++20 Header Unit type corresponding to the input type.
+static types::ID CXXHeaderUnitType(ModuleHeaderMode HM) {
+ switch (HM) {
+ case HeaderMode_User:
+ return types::TY_CXXUHeader;
+ case HeaderMode_System:
+ return types::TY_CXXSHeader;
+ case HeaderMode_Default:
+ break;
+ case HeaderMode_None:
+ llvm_unreachable("should not be called in this case");
+ }
+ return types::TY_CXXHUHeader;
+}
+
// Construct a the list of inputs and their types.
void Driver::BuildInputs(const ToolChain &TC, DerivedArgList &Args,
InputList &Inputs) const {
@@ -2308,6 +2459,14 @@ void Driver::BuildInputs(const ToolChain &TC, DerivedArgList &Args,
assert(!Args.hasArg(options::OPT_x) && "-x and /TC or /TP is not allowed");
}
+ // Warn -x after last input file has no effect
+ {
+ Arg *LastXArg = Args.getLastArgNoClaim(options::OPT_x);
+ Arg *LastInputArg = Args.getLastArgNoClaim(options::OPT_INPUT);
+ if (LastXArg && LastInputArg && LastInputArg->getIndex() < LastXArg->getIndex())
+ Diag(clang::diag::warn_drv_unused_x) << LastXArg->getValue();
+ }
+
for (Arg *A : Args) {
if (A->getOption().getKind() == Option::InputClass) {
const char *Value = A->getValue();
@@ -2360,7 +2519,9 @@ void Driver::BuildInputs(const ToolChain &TC, DerivedArgList &Args,
types::ID OldTy = Ty;
Ty = types::lookupCXXTypeForCType(Ty);
- if (Ty != OldTy)
+ // Do not complain about foo.h, when we are known to be processing
+ // it as a C++20 header unit.
+ if (Ty != OldTy && !(OldTy == types::TY_CHeader && hasHeaderMode()))
Diag(clang::diag::warn_drv_treating_input_as_cxx)
<< getTypeName(OldTy) << getTypeName(Ty);
}
@@ -2383,6 +2544,14 @@ void Driver::BuildInputs(const ToolChain &TC, DerivedArgList &Args,
else if (Args.hasArg(options::OPT_ObjCXX))
Ty = types::TY_ObjCXX;
}
+
+ // Disambiguate headers that are meant to be header units from those
+ // intended to be PCH. Avoid missing '.h' cases that are counted as
+ // C headers by default - we know we are in C++ mode and we do not
+ // want to issue a complaint about compiling things in the wrong mode.
+ if ((Ty == types::TY_CXXHeader || Ty == types::TY_CHeader) &&
+ hasHeaderMode())
+ Ty = CXXHeaderUnitType(CXX20HeaderType);
} else {
assert(InputTypeArg && "InputType set w/o InputTypeArg");
if (!InputTypeArg->getOption().matches(options::OPT_x)) {
@@ -2434,6 +2603,11 @@ void Driver::BuildInputs(const ToolChain &TC, DerivedArgList &Args,
Diag(clang::diag::err_drv_unknown_language) << A->getValue();
InputType = types::TY_Object;
}
+
+ // If the user has put -fmodule-header{,=} then we treat C++ headers as
+ // header unit inputs. So we 'promote' -xc++-header appropriately.
+ if (InputType == types::TY_CXXHeader && hasHeaderMode())
+ InputType = CXXHeaderUnitType(CXX20HeaderType);
} else if (A->getOption().getID() == options::OPT_U) {
assert(A->getNumValues() == 1 && "The /U option has one value.");
StringRef Val = A->getValue(0);
@@ -2465,6 +2639,9 @@ class OffloadingActionBuilder final {
/// Map between an input argument and the offload kinds used to process it.
std::map<const Arg *, unsigned> InputArgToOffloadKindMap;
+ /// Map between a host action and its originating input argument.
+ std::map<Action *, const Arg *> HostActionToInputArgMap;
+
/// Builder interface. It doesn't build anything or keep any state.
class DeviceActionBuilder {
public:
@@ -2692,6 +2869,7 @@ class OffloadingActionBuilder final {
UA->registerDependentActionInfo(ToolChains[0], Arch,
AssociatedOffloadKind);
}
+ IsActive = true;
return ABRT_Success;
}
@@ -2771,15 +2949,8 @@ class OffloadingActionBuilder final {
? C.getSingleOffloadToolChain<Action::OFK_Cuda>()
: C.getSingleOffloadToolChain<Action::OFK_HIP>());
- Arg *PartialCompilationArg = Args.getLastArg(
- options::OPT_cuda_host_only, options::OPT_cuda_device_only,
- options::OPT_cuda_compile_host_device);
- CompileHostOnly = PartialCompilationArg &&
- PartialCompilationArg->getOption().matches(
- options::OPT_cuda_host_only);
- CompileDeviceOnly = PartialCompilationArg &&
- PartialCompilationArg->getOption().matches(
- options::OPT_cuda_device_only);
+ CompileHostOnly = C.getDriver().offloadHostOnly();
+ CompileDeviceOnly = C.getDriver().offloadDeviceOnly();
EmitLLVM = Args.getLastArg(options::OPT_emit_llvm);
EmitAsm = Args.getLastArg(options::OPT_S);
FixedCUID = Args.getLastArgValue(options::OPT_cuid_EQ);
@@ -2806,7 +2977,7 @@ class OffloadingActionBuilder final {
<< "--offload";
}
- // Collect all cuda_gpu_arch parameters, removing duplicates.
+ // Collect all offload arch parameters, removing duplicates.
std::set<StringRef> GpuArchs;
bool Error = false;
for (Arg *A : Args) {
@@ -2815,28 +2986,28 @@ class OffloadingActionBuilder final {
continue;
A->claim();
- StringRef ArchStr = A->getValue();
- if (A->getOption().matches(options::OPT_no_offload_arch_EQ) &&
- ArchStr == "all") {
- GpuArchs.clear();
- continue;
+ for (StringRef ArchStr : llvm::split(A->getValue(), ",")) {
+ if (A->getOption().matches(options::OPT_no_offload_arch_EQ) &&
+ ArchStr == "all") {
+ GpuArchs.clear();
+ } else {
+ ArchStr = getCanonicalOffloadArch(ArchStr);
+ if (ArchStr.empty()) {
+ Error = true;
+ } else if (A->getOption().matches(options::OPT_offload_arch_EQ))
+ GpuArchs.insert(ArchStr);
+ else if (A->getOption().matches(options::OPT_no_offload_arch_EQ))
+ GpuArchs.erase(ArchStr);
+ else
+ llvm_unreachable("Unexpected option.");
+ }
}
- ArchStr = getCanonicalOffloadArch(ArchStr);
- if (ArchStr.empty()) {
- Error = true;
- } else if (A->getOption().matches(options::OPT_offload_arch_EQ))
- GpuArchs.insert(ArchStr);
- else if (A->getOption().matches(options::OPT_no_offload_arch_EQ))
- GpuArchs.erase(ArchStr);
- else
- llvm_unreachable("Unexpected option.");
}
auto &&ConflictingArchs = getConflictOffloadArchCombination(GpuArchs);
if (ConflictingArchs) {
C.getDriver().Diag(clang::diag::err_drv_bad_offload_arch_combo)
- << ConflictingArchs.getValue().first
- << ConflictingArchs.getValue().second;
+ << ConflictingArchs->first << ConflictingArchs->second;
C.setContainsError();
return true;
}
@@ -3007,7 +3178,7 @@ class OffloadingActionBuilder final {
if (Args.hasArg(options::OPT_gpu_bundle_output,
options::OPT_no_gpu_bundle_output))
BundleOutput = Args.hasFlag(options::OPT_gpu_bundle_output,
- options::OPT_no_gpu_bundle_output);
+ options::OPT_no_gpu_bundle_output, true);
}
bool canUseBundlerUnbundler() const override { return true; }
@@ -3024,7 +3195,7 @@ class OffloadingActionBuilder final {
C.setContainsError();
return StringRef();
}
- auto CanId = getCanonicalTargetID(ArchStr.getValue(), Features);
+ auto CanId = getCanonicalTargetID(*ArchStr, Features);
return Args.MakeArgStringRef(CanId);
};
@@ -3038,9 +3209,12 @@ class OffloadingActionBuilder final {
getDeviceDependences(OffloadAction::DeviceDependences &DA,
phases::ID CurPhase, phases::ID FinalPhase,
PhasesTy &Phases) override {
+ if (!IsActive)
+ return ABRT_Inactive;
+
// amdgcn does not support linking of object files, therefore we skip
// backend and assemble phases to output LLVM IR. Except for generating
- // non-relocatable device coee, where we generate fat binary for device
+ // non-relocatable device code, where we generate fat binary for device
// code and pass to host in Backend phase.
if (CudaDeviceActions.empty())
return ABRT_Success;
@@ -3049,7 +3223,7 @@ class OffloadingActionBuilder final {
CudaDeviceActions.size() == GpuArchList.size()) &&
"Expecting one action per GPU architecture.");
assert(!CompileHostOnly &&
- "Not expecting CUDA actions in host-only compilation.");
+ "Not expecting HIP actions in host-only compilation.");
if (!Relocatable && CurPhase == phases::Backend && !EmitLLVM &&
!EmitAsm) {
@@ -3108,8 +3282,7 @@ class OffloadingActionBuilder final {
DDep, CudaDeviceActions[I]->getType());
}
- if (!CompileDeviceOnly || !BundleOutput.hasValue() ||
- BundleOutput.getValue()) {
+ if (!CompileDeviceOnly || !BundleOutput || *BundleOutput) {
// Create HIP fat binary with a special "link" action.
CudaFatBinary = C.MakeAction<LinkJobAction>(CudaDeviceActions,
types::TY_HIP_FATBIN);
@@ -3152,8 +3325,8 @@ class OffloadingActionBuilder final {
A = C.getDriver().ConstructPhaseAction(C, Args, CurPhase, A,
AssociatedOffloadKind);
- if (CompileDeviceOnly && CurPhase == FinalPhase &&
- BundleOutput.hasValue() && BundleOutput.getValue()) {
+ if (CompileDeviceOnly && CurPhase == FinalPhase && BundleOutput &&
+ BundleOutput.getValue()) {
for (unsigned I = 0, E = GpuArchList.size(); I != E; ++I) {
OffloadAction::DeviceDependences DDep;
DDep.add(*CudaDeviceActions[I], *ToolChains.front(), GpuArchList[I],
@@ -3178,12 +3351,16 @@ class OffloadingActionBuilder final {
"Linker inputs and GPU arch list sizes do not match.");
ActionList Actions;
- // Append a new link action for each device.
unsigned I = 0;
+ // Append a new link action for each device.
+ // Each entry in DeviceLinkerInputs corresponds to a GPU arch.
for (auto &LI : DeviceLinkerInputs) {
- // Each entry in DeviceLinkerInputs corresponds to a GPU arch.
- auto *DeviceLinkAction =
- C.MakeAction<LinkJobAction>(LI, types::TY_Image);
+
+ types::ID Output = Args.hasArg(options::OPT_emit_llvm)
+ ? types::TY_LLVM_BC
+ : types::TY_Image;
+
+ auto *DeviceLinkAction = C.MakeAction<LinkJobAction>(LI, Output);
// Linking all inputs for the current GPU arch.
// LI contains all the inputs for the linker.
OffloadAction::DeviceDependences DeviceLinkDeps;
@@ -3195,12 +3372,17 @@ class OffloadingActionBuilder final {
}
DeviceLinkerInputs.clear();
+ // If emitting LLVM, do not generate final host/device compilation action
+ if (Args.hasArg(options::OPT_emit_llvm)) {
+ AL.append(Actions);
+ return;
+ }
+
// Create a host object from all the device images by embedding them
// in a fat binary for mixed host-device compilation. For device-only
// compilation, creates a fat binary.
OffloadAction::DeviceDependences DDeps;
- if (!CompileDeviceOnly || !BundleOutput.hasValue() ||
- BundleOutput.getValue()) {
+ if (!CompileDeviceOnly || !BundleOutput || *BundleOutput) {
auto *TopDeviceLinkAction = C.MakeAction<LinkJobAction>(
Actions,
CompileDeviceOnly ? types::TY_HIP_FATBIN : types::TY_Object);
@@ -3445,6 +3627,17 @@ public:
delete SB;
}
+ /// Record a host action and its originating input argument.
+ void recordHostAction(Action *HostAction, const Arg *InputArg) {
+ assert(HostAction && "Invalid host action");
+ assert(InputArg && "Invalid input argument");
+ auto Loc = HostActionToInputArgMap.find(HostAction);
+ if (Loc == HostActionToInputArgMap.end())
+ HostActionToInputArgMap[HostAction] = InputArg;
+ assert(HostActionToInputArgMap[HostAction] == InputArg &&
+ "host action mapped to multiple input arguments");
+ }
+
/// Generate an action that adds device dependences (if any) to a host action.
/// If no device dependence actions exist, just return the host action \a
/// HostAction. If an error is found or if no builder requires the host action
@@ -3460,6 +3653,7 @@ public:
return HostAction;
assert(HostAction && "Invalid host action!");
+ recordHostAction(HostAction, InputArg);
OffloadAction::DeviceDependences DDeps;
// Check if all the programming models agree we should not emit the host
@@ -3513,6 +3707,8 @@ public:
if (!IsValid)
return true;
+ recordHostAction(HostAction, InputArg);
+
// If we are supporting bundling/unbundling and the current action is an
// input action of non-source file, we replace the host action by the
// unbundling action. The bundler tool has the logic to detect if an input
@@ -3529,6 +3725,7 @@ public:
C.getSingleOffloadToolChain<Action::OFK_Host>(),
/*BoundArch=*/StringRef(), Action::OFK_Host);
HostAction = UnbundlingHostAction;
+ recordHostAction(HostAction, InputArg);
}
assert(HostAction && "Invalid host action!");
@@ -3565,6 +3762,9 @@ public:
/// programming models allow it.
bool appendTopLevelActions(ActionList &AL, Action *HostAction,
const Arg *InputArg) {
+ if (HostAction)
+ recordHostAction(HostAction, InputArg);
+
// Get the device actions to be appended.
ActionList OffloadAL;
for (auto *SB : SpecializedBuilders) {
@@ -3586,6 +3786,7 @@ public:
// before this method was called.
assert(HostAction == AL.back() && "Host action not in the list??");
HostAction = C.MakeAction<OffloadBundlingJobAction>(OffloadAL);
+ recordHostAction(HostAction, InputArg);
AL.back() = HostAction;
} else
AL.append(OffloadAL.begin(), OffloadAL.end());
@@ -3619,6 +3820,11 @@ public:
if (!SB->isValid())
continue;
HA = SB->appendLinkHostActions(DeviceAL);
+ // This created host action has no originating input argument, therefore
+ // needs to set its offloading kind directly.
+ if (HA)
+ HA->propagateHostOffloadInfo(SB->getAssociatedOffloadKind(),
+ /*BoundArch=*/nullptr);
}
return HA;
}
@@ -3645,10 +3851,22 @@ public:
// If we don't have device dependencies, we don't have to create an offload
// action.
if (DDeps.getActions().empty()) {
- // Propagate all the active kinds to host action. Given that it is a link
- // action it is assumed to depend on all actions generated so far.
- HostAction->propagateHostOffloadInfo(ActiveOffloadKinds,
- /*BoundArch=*/nullptr);
+ // Set all the active offloading kinds to the link action. Given that it
+ // is a link action it is assumed to depend on all actions generated so
+ // far.
+ HostAction->setHostOffloadInfo(ActiveOffloadKinds,
+ /*BoundArch=*/nullptr);
+ // Propagate active offloading kinds for each input to the link action.
+ // Each input may have different active offloading kind.
+ for (auto A : HostAction->inputs()) {
+ auto ArgLoc = HostActionToInputArgMap.find(A);
+ if (ArgLoc == HostActionToInputArgMap.end())
+ continue;
+ auto OFKLoc = InputArgToOffloadKindMap.find(ArgLoc->second);
+ if (OFKLoc == InputArgToOffloadKindMap.end())
+ continue;
+ A->propagateHostOffloadInfo(OFKLoc->second, /*BoundArch=*/nullptr);
+ }
return HostAction;
}
@@ -3686,7 +3904,8 @@ void Driver::handleArguments(Compilation &C, DerivedArgList &Args,
phases::ID FinalPhase = getFinalPhase(Args, &FinalPhaseArg);
if (FinalPhase == phases::Link) {
- if (Args.hasArg(options::OPT_emit_llvm))
+ // Emitting LLVM while linking disabled except in HIPAMD Toolchain
+ if (Args.hasArg(options::OPT_emit_llvm) && !Args.hasArg(options::OPT_hip_link))
Diag(clang::diag::err_drv_emit_llvm_link);
if (IsCLMode() && LTOMode != LTOK_None &&
!Args.getLastArgValue(options::OPT_fuse_ld_EQ)
@@ -3830,13 +4049,16 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
// Builder to be used to build offloading actions.
OffloadingActionBuilder OffloadBuilder(C, Args, Inputs);
- // Offload kinds active for this compilation.
- unsigned OffloadKinds = Action::OFK_None;
- if (C.hasOffloadToolChain<Action::OFK_OpenMP>())
- OffloadKinds |= Action::OFK_OpenMP;
+ bool UseNewOffloadingDriver =
+ (C.isOffloadingHostKind(Action::OFK_OpenMP) &&
+ Args.hasFlag(options::OPT_fopenmp_new_driver,
+ options::OPT_no_offload_new_driver, true)) ||
+ Args.hasFlag(options::OPT_offload_new_driver,
+ options::OPT_no_offload_new_driver, false);
// Construct the actions to perform.
HeaderModulePrecompileJobAction *HeaderModuleAction = nullptr;
+ ExtractAPIJobAction *ExtractAPIAction = nullptr;
ActionList LinkerInputs;
ActionList MergerInputs;
@@ -3855,14 +4077,14 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
// Use the current host action in any of the offloading actions, if
// required.
- if (!Args.hasArg(options::OPT_fopenmp_new_driver))
+ if (!UseNewOffloadingDriver)
if (OffloadBuilder.addHostDependenceToDeviceActions(Current, InputArg))
break;
for (phases::ID Phase : PL) {
// Add any offload action the host action depends on.
- if (!Args.hasArg(options::OPT_fopenmp_new_driver))
+ if (!UseNewOffloadingDriver)
Current = OffloadBuilder.addDeviceDependencesToHostAction(
Current, InputArg, Phase, PL.back(), FullPL);
if (!Current)
@@ -3871,7 +4093,10 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
// Queue linker inputs.
if (Phase == phases::Link) {
assert(Phase == PL.back() && "linking must be final compilation step.");
- LinkerInputs.push_back(Current);
+ // We don't need to generate additional link commands if emitting AMD bitcode
+ if (!(C.getInputArgs().hasArg(options::OPT_hip_link) &&
+ (C.getInputArgs().hasArg(options::OPT_emit_llvm))))
+ LinkerInputs.push_back(Current);
Current = nullptr;
break;
}
@@ -3897,10 +4122,11 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
break;
}
- // Try to build the offloading actions and add the result as a dependency
- // to the host.
- if (Args.hasArg(options::OPT_fopenmp_new_driver))
- Current = BuildOffloadingActions(C, Args, I, Current);
+ if (Phase == phases::Precompile && ExtractAPIAction) {
+ ExtractAPIAction->addHeaderInput(Current);
+ Current = nullptr;
+ break;
+ }
// FIXME: Should we include any prior module file outputs as inputs of
// later actions in the same command line?
@@ -3914,15 +4140,22 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
if (auto *HMA = dyn_cast<HeaderModulePrecompileJobAction>(NewCurrent))
HeaderModuleAction = HMA;
+ else if (auto *EAA = dyn_cast<ExtractAPIJobAction>(NewCurrent))
+ ExtractAPIAction = EAA;
Current = NewCurrent;
// Use the current host action in any of the offloading actions, if
// required.
- if (!Args.hasArg(options::OPT_fopenmp_new_driver))
+ if (!UseNewOffloadingDriver)
if (OffloadBuilder.addHostDependenceToDeviceActions(Current, InputArg))
break;
+ // Try to build the offloading actions and add the result as a dependency
+ // to the host.
+ if (UseNewOffloadingDriver)
+ Current = BuildOffloadingActions(C, Args, I, Current);
+
if (Current->getType() == types::TY_Nothing)
break;
}
@@ -3932,10 +4165,10 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
Actions.push_back(Current);
// Add any top level actions generated for offloading.
- if (!Args.hasArg(options::OPT_fopenmp_new_driver))
+ if (!UseNewOffloadingDriver)
OffloadBuilder.appendTopLevelActions(Actions, Current, InputArg);
else if (Current)
- Current->propagateHostOffloadInfo(OffloadKinds,
+ Current->propagateHostOffloadInfo(C.getActiveOffloadKinds(),
/*BoundArch=*/nullptr);
}
@@ -3944,26 +4177,27 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
if (LinkerInputs.empty()) {
Arg *FinalPhaseArg;
if (getFinalPhase(Args, &FinalPhaseArg) == phases::Link)
- OffloadBuilder.appendDeviceLinkActions(Actions);
+ if (!UseNewOffloadingDriver)
+ OffloadBuilder.appendDeviceLinkActions(Actions);
}
if (!LinkerInputs.empty()) {
- if (!Args.hasArg(options::OPT_fopenmp_new_driver))
+ if (!UseNewOffloadingDriver)
if (Action *Wrapper = OffloadBuilder.makeHostLinkAction())
LinkerInputs.push_back(Wrapper);
Action *LA;
// Check if this Linker Job should emit a static library.
if (ShouldEmitStaticLibrary(Args)) {
LA = C.MakeAction<StaticLibJobAction>(LinkerInputs, types::TY_Image);
- } else if (Args.hasArg(options::OPT_fopenmp_new_driver) &&
- OffloadKinds != Action::OFK_None) {
+ } else if (UseNewOffloadingDriver ||
+ Args.hasArg(options::OPT_offload_link)) {
LA = C.MakeAction<LinkerWrapperJobAction>(LinkerInputs, types::TY_Image);
- LA->propagateHostOffloadInfo(OffloadKinds,
+ LA->propagateHostOffloadInfo(C.getActiveOffloadKinds(),
/*BoundArch=*/nullptr);
} else {
LA = C.MakeAction<LinkJobAction>(LinkerInputs, types::TY_Image);
}
- if (!Args.hasArg(options::OPT_fopenmp_new_driver))
+ if (!UseNewOffloadingDriver)
LA = OffloadBuilder.processHostLinkAction(LA);
Actions.push_back(LA);
}
@@ -4043,73 +4277,216 @@ void Driver::BuildActions(Compilation &C, DerivedArgList &Args,
// Claim ignored clang-cl options.
Args.ClaimAllArgs(options::OPT_cl_ignored_Group);
+}
+
+/// Returns the canonical name for the offloading architecture when using a HIP
+/// or CUDA architecture.
+static StringRef getCanonicalArchString(Compilation &C,
+ const llvm::opt::DerivedArgList &Args,
+ StringRef ArchStr,
+ const llvm::Triple &Triple) {
+ // Lookup the CUDA / HIP architecture string. Only report an error if we were
+ // expecting the triple to be only NVPTX / AMDGPU.
+ CudaArch Arch = StringToCudaArch(getProcessorFromTargetID(Triple, ArchStr));
+ if (Triple.isNVPTX() &&
+ (Arch == CudaArch::UNKNOWN || !IsNVIDIAGpuArch(Arch))) {
+ C.getDriver().Diag(clang::diag::err_drv_offload_bad_gpu_arch)
+ << "CUDA" << ArchStr;
+ return StringRef();
+ } else if (Triple.isAMDGPU() &&
+ (Arch == CudaArch::UNKNOWN || !IsAMDGpuArch(Arch))) {
+ C.getDriver().Diag(clang::diag::err_drv_offload_bad_gpu_arch)
+ << "HIP" << ArchStr;
+ return StringRef();
+ }
+
+ if (IsNVIDIAGpuArch(Arch))
+ return Args.MakeArgStringRef(CudaArchToString(Arch));
+
+ if (IsAMDGpuArch(Arch)) {
+ llvm::StringMap<bool> Features;
+ auto HIPTriple = getHIPOffloadTargetTriple(C.getDriver(), C.getInputArgs());
+ if (!HIPTriple)
+ return StringRef();
+ auto Arch = parseTargetID(*HIPTriple, ArchStr, &Features);
+ if (!Arch) {
+ C.getDriver().Diag(clang::diag::err_drv_bad_target_id) << ArchStr;
+ C.setContainsError();
+ return StringRef();
+ }
+ return Args.MakeArgStringRef(getCanonicalTargetID(*Arch, Features));
+ }
+
+ // If the input isn't CUDA or HIP just return the architecture.
+ return ArchStr;
+}
+
+/// Checks if the set offloading architectures does not conflict. Returns the
+/// incompatible pair if a conflict occurs.
+static llvm::Optional<std::pair<llvm::StringRef, llvm::StringRef>>
+getConflictOffloadArchCombination(const llvm::DenseSet<StringRef> &Archs,
+ Action::OffloadKind Kind) {
+ if (Kind != Action::OFK_HIP)
+ return None;
+
+ std::set<StringRef> ArchSet;
+ llvm::copy(Archs, std::inserter(ArchSet, ArchSet.begin()));
+ return getConflictTargetIDCombination(ArchSet);
+}
+
+llvm::DenseSet<StringRef>
+Driver::getOffloadArchs(Compilation &C, const llvm::opt::DerivedArgList &Args,
+ Action::OffloadKind Kind, const ToolChain *TC) const {
+ if (!TC)
+ TC = &C.getDefaultToolChain();
+
+ // --offload and --offload-arch options are mutually exclusive.
+ if (Args.hasArgNoClaim(options::OPT_offload_EQ) &&
+ Args.hasArgNoClaim(options::OPT_offload_arch_EQ,
+ options::OPT_no_offload_arch_EQ)) {
+ C.getDriver().Diag(diag::err_opt_not_valid_with_opt)
+ << "--offload"
+ << (Args.hasArgNoClaim(options::OPT_offload_arch_EQ)
+ ? "--offload-arch"
+ : "--no-offload-arch");
+ }
+
+ if (KnownArchs.find(TC) != KnownArchs.end())
+ return KnownArchs.lookup(TC);
+
+ llvm::DenseSet<StringRef> Archs;
+ for (auto &Arg : Args) {
+ if (Arg->getOption().matches(options::OPT_offload_arch_EQ)) {
+ for (StringRef Arch : llvm::split(Arg->getValue(), ","))
+ Archs.insert(getCanonicalArchString(C, Args, Arch, TC->getTriple()));
+ } else if (Arg->getOption().matches(options::OPT_no_offload_arch_EQ)) {
+ for (StringRef Arch : llvm::split(Arg->getValue(), ",")) {
+ if (Arch == StringRef("all"))
+ Archs.clear();
+ else
+ Archs.erase(getCanonicalArchString(C, Args, Arch, TC->getTriple()));
+ }
+ }
+ }
+
+ if (auto ConflictingArchs = getConflictOffloadArchCombination(Archs, Kind)) {
+ C.getDriver().Diag(clang::diag::err_drv_bad_offload_arch_combo)
+ << ConflictingArchs->first << ConflictingArchs->second;
+ C.setContainsError();
+ }
+
+ if (Archs.empty()) {
+ if (Kind == Action::OFK_Cuda)
+ Archs.insert(CudaArchToString(CudaArch::CudaDefault));
+ else if (Kind == Action::OFK_HIP)
+ Archs.insert(CudaArchToString(CudaArch::HIPDefault));
+ else if (Kind == Action::OFK_OpenMP)
+ Archs.insert(StringRef());
+ } else {
+ Args.ClaimAllArgs(options::OPT_offload_arch_EQ);
+ Args.ClaimAllArgs(options::OPT_no_offload_arch_EQ);
+ }
- // Claim --cuda-host-only and --cuda-compile-host-device, which may be passed
- // to non-CUDA compilations and should not trigger warnings there.
- Args.ClaimAllArgs(options::OPT_cuda_host_only);
- Args.ClaimAllArgs(options::OPT_cuda_compile_host_device);
+ return Archs;
}
Action *Driver::BuildOffloadingActions(Compilation &C,
llvm::opt::DerivedArgList &Args,
const InputTy &Input,
Action *HostAction) const {
- if (!isa<CompileJobAction>(HostAction))
+ // Don't build offloading actions if explicitly disabled or we do not have a
+ // valid source input and compile action to embed it in. If preprocessing only
+ // ignore embedding.
+ if (offloadHostOnly() || !types::isSrcFile(Input.first) ||
+ !(isa<CompileJobAction>(HostAction) ||
+ getFinalPhase(Args) == phases::Preprocess))
return HostAction;
- SmallVector<const ToolChain *, 2> ToolChains;
- ActionList DeviceActions;
+ ActionList OffloadActions;
+ OffloadAction::DeviceDependences DDeps;
- types::ID InputType = Input.first;
- const Arg *InputArg = Input.second;
+ const Action::OffloadKind OffloadKinds[] = {
+ Action::OFK_OpenMP, Action::OFK_Cuda, Action::OFK_HIP};
- auto OpenMPTCRange = C.getOffloadToolChains<Action::OFK_OpenMP>();
- for (auto TI = OpenMPTCRange.first, TE = OpenMPTCRange.second; TI != TE; ++TI)
- ToolChains.push_back(TI->second);
+ for (Action::OffloadKind Kind : OffloadKinds) {
+ SmallVector<const ToolChain *, 2> ToolChains;
+ ActionList DeviceActions;
- for (unsigned I = 0; I < ToolChains.size(); ++I)
- DeviceActions.push_back(C.MakeAction<InputAction>(*InputArg, InputType));
+ auto TCRange = C.getOffloadToolChains(Kind);
+ for (auto TI = TCRange.first, TE = TCRange.second; TI != TE; ++TI)
+ ToolChains.push_back(TI->second);
- if (DeviceActions.empty())
- return HostAction;
+ if (ToolChains.empty())
+ continue;
- auto PL = types::getCompilationPhases(*this, Args, InputType);
+ types::ID InputType = Input.first;
+ const Arg *InputArg = Input.second;
- for (phases::ID Phase : PL) {
- if (Phase == phases::Link) {
- assert(Phase == PL.back() && "linking must be final compilation step.");
- break;
- }
+ // Get the product of all bound architectures and toolchains.
+ SmallVector<std::pair<const ToolChain *, StringRef>> TCAndArchs;
+ for (const ToolChain *TC : ToolChains)
+ for (StringRef Arch : getOffloadArchs(
+ C, C.getArgsForToolChain(TC, "generic", Kind), Kind, TC))
+ TCAndArchs.push_back(std::make_pair(TC, Arch));
- auto TC = ToolChains.begin();
- for (Action *&A : DeviceActions) {
- A = ConstructPhaseAction(C, Args, Phase, A, Action::OFK_OpenMP);
+ for (unsigned I = 0, E = TCAndArchs.size(); I != E; ++I)
+ DeviceActions.push_back(C.MakeAction<InputAction>(*InputArg, InputType));
- if (isa<CompileJobAction>(A)) {
- HostAction->setCannotBeCollapsedWithNextDependentAction();
- OffloadAction::HostDependence HDep(
- *HostAction, *C.getSingleOffloadToolChain<Action::OFK_Host>(),
- /*BourdArch=*/nullptr, Action::OFK_OpenMP);
- OffloadAction::DeviceDependences DDep;
- DDep.add(*A, **TC, /*BoundArch=*/nullptr, Action::OFK_OpenMP);
- A = C.MakeAction<OffloadAction>(HDep, DDep);
+ if (DeviceActions.empty())
+ return HostAction;
+
+ auto PL = types::getCompilationPhases(*this, Args, InputType);
+
+ for (phases::ID Phase : PL) {
+ if (Phase == phases::Link) {
+ assert(Phase == PL.back() && "linking must be final compilation step.");
+ break;
}
- ++TC;
- }
- }
- OffloadAction::DeviceDependences DDeps;
+ auto TCAndArch = TCAndArchs.begin();
+ for (Action *&A : DeviceActions) {
+ A = ConstructPhaseAction(C, Args, Phase, A, Kind);
+
+ if (isa<CompileJobAction>(A) && isa<CompileJobAction>(HostAction) &&
+ Kind == Action::OFK_OpenMP) {
+ // OpenMP offloading has a dependency on the host compile action to
+ // identify which declarations need to be emitted. This shouldn't be
+ // collapsed with any other actions so we can use it in the device.
+ HostAction->setCannotBeCollapsedWithNextDependentAction();
+ OffloadAction::HostDependence HDep(
+ *HostAction, *C.getSingleOffloadToolChain<Action::OFK_Host>(),
+ TCAndArch->second.data(), Kind);
+ OffloadAction::DeviceDependences DDep;
+ DDep.add(*A, *TCAndArch->first, TCAndArch->second.data(), Kind);
+ A = C.MakeAction<OffloadAction>(HDep, DDep);
+ }
+ ++TCAndArch;
+ }
+ }
- auto TC = ToolChains.begin();
- for (Action *A : DeviceActions) {
- DDeps.add(*A, **TC, /*BoundArch=*/nullptr, Action::OFK_OpenMP);
- TC++;
+ auto TCAndArch = TCAndArchs.begin();
+ for (Action *A : DeviceActions) {
+ DDeps.add(*A, *TCAndArch->first, TCAndArch->second.data(), Kind);
+ OffloadAction::DeviceDependences DDep;
+ DDep.add(*A, *TCAndArch->first, TCAndArch->second.data(), Kind);
+ OffloadActions.push_back(C.MakeAction<OffloadAction>(DDep, A->getType()));
+ ++TCAndArch;
+ }
}
+ if (offloadDeviceOnly())
+ return C.MakeAction<OffloadAction>(DDeps, types::TY_Nothing);
+
+ Action *OffloadPackager =
+ C.MakeAction<OffloadPackagerJobAction>(OffloadActions, types::TY_Image);
+ OffloadAction::DeviceDependences DDep;
+ DDep.add(*OffloadPackager, *C.getSingleOffloadToolChain<Action::OFK_Host>(),
+ nullptr, Action::OFK_None);
OffloadAction::HostDependence HDep(
*HostAction, *C.getSingleOffloadToolChain<Action::OFK_Host>(),
- /*BoundArch=*/nullptr, DDeps);
- return C.MakeAction<OffloadAction>(HDep, DDeps);
+ /*BoundArch=*/nullptr, isa<CompileJobAction>(HostAction) ? DDep : DDeps);
+ return C.MakeAction<OffloadAction>(
+ HDep, isa<CompileJobAction>(HostAction) ? DDep : DDeps);
}
Action *Driver::ConstructPhaseAction(
@@ -4138,10 +4515,14 @@ Action *Driver::ConstructPhaseAction(
OutputTy = types::TY_Dependencies;
} else {
OutputTy = Input->getType();
+ // For these cases, the preprocessor is only translating forms, the Output
+ // still needs preprocessing.
if (!Args.hasFlag(options::OPT_frewrite_includes,
options::OPT_fno_rewrite_includes, false) &&
!Args.hasFlag(options::OPT_frewrite_imports,
options::OPT_fno_rewrite_imports, false) &&
+ !Args.hasFlag(options::OPT_fdirectives_only,
+ options::OPT_fno_directives_only, false) &&
!CCGenDiagnostics)
OutputTy = types::getPreprocessedType(OutputTy);
assert(OutputTy != types::TY_INVALID &&
@@ -4150,6 +4531,10 @@ Action *Driver::ConstructPhaseAction(
return C.MakeAction<PreprocessJobAction>(Input, OutputTy);
}
case phases::Precompile: {
+ // API extraction should not generate an actual precompilation action.
+ if (Args.hasArg(options::OPT_extract_api))
+ return C.MakeAction<ExtractAPIJobAction>(Input, types::TY_API_INFO);
+
types::ID OutputTy = getPrecompiledType(Input->getType());
assert(OutputTy != types::TY_INVALID &&
"Cannot precompile this input type!");
@@ -4164,8 +4549,7 @@ Action *Driver::ConstructPhaseAction(
OutputTy = types::TY_ModuleFile;
}
- if (Args.hasArg(options::OPT_fsyntax_only) ||
- Args.hasArg(options::OPT_extract_api)) {
+ if (Args.hasArg(options::OPT_fsyntax_only)) {
// Syntax checks should not emit a PCH file
OutputTy = types::TY_Nothing;
}
@@ -4194,7 +4578,7 @@ Action *Driver::ConstructPhaseAction(
if (Args.hasArg(options::OPT_verify_pch))
return C.MakeAction<VerifyPCHJobAction>(Input, types::TY_Nothing);
if (Args.hasArg(options::OPT_extract_api))
- return C.MakeAction<CompileJobAction>(Input, types::TY_API_INFO);
+ return C.MakeAction<ExtractAPIJobAction>(Input, types::TY_API_INFO);
return C.MakeAction<CompileJobAction>(Input, types::TY_LLVM_BC);
}
case phases::Backend: {
@@ -4204,7 +4588,7 @@ Action *Driver::ConstructPhaseAction(
return C.MakeAction<BackendJobAction>(Input, Output);
}
if (isUsingLTO(/* IsOffload */ true) &&
- TargetDeviceOffloadKind == Action::OFK_OpenMP) {
+ TargetDeviceOffloadKind != Action::OFK_None) {
types::ID Output =
Args.hasArg(options::OPT_S) ? types::TY_LTO_IR : types::TY_LTO_BC;
return C.MakeAction<BackendJobAction>(Input, Output);
@@ -4372,6 +4756,8 @@ void Driver::BuildJobs(Compilation &C) const {
C.getArgs().hasArg(options::OPT_Qunused_arguments))
return;
+ // Claim -fdriver-only here.
+ (void)C.getArgs().hasArg(options::OPT_fdriver_only);
// Claim -### here.
(void)C.getArgs().hasArg(options::OPT__HASH_HASH_HASH);
@@ -5020,8 +5406,9 @@ InputInfoList Driver::BuildJobsForActionNoCache(
// action.
std::string OffloadingPrefix = Action::GetOffloadingFileNamePrefix(
A->getOffloadingDeviceKind(), TC->getTriple().normalize(),
- /*CreatePrefixForHost=*/!!A->getOffloadingHostActiveKinds() &&
- !AtTopLevel);
+ /*CreatePrefixForHost=*/isa<OffloadPackagerJobAction>(A) ||
+ !(A->getOffloadingHostActiveKinds() == Action::OFK_None ||
+ AtTopLevel));
if (isa<OffloadWrapperJobAction>(JA)) {
if (Arg *FinalOutput = C.getArgs().getLastArg(options::OPT_o))
BaseInput = FinalOutput->getValue();
@@ -5258,7 +5645,8 @@ const char *Driver::GetNamedOutputPath(Compilation &C, const JobAction &JA,
bool IsHIPNoRDC = JA.getOffloadingDeviceKind() == Action::OFK_HIP &&
!C.getArgs().hasFlag(options::OPT_fgpu_rdc,
options::OPT_fno_gpu_rdc, false);
- if (IsHIPNoRDC) {
+ bool UseOutExtension = IsHIPNoRDC || isa<OffloadPackagerJobAction>(JA);
+ if (UseOutExtension) {
Output = BaseName;
llvm::sys::path::replace_extension(Output, "");
}
@@ -5267,12 +5655,20 @@ const char *Driver::GetNamedOutputPath(Compilation &C, const JobAction &JA,
Output += "-";
Output.append(BoundArch);
}
- if (IsHIPNoRDC)
+ if (UseOutExtension)
Output += ".out";
NamedOutput = C.getArgs().MakeArgString(Output.c_str());
}
} else if (JA.getType() == types::TY_PCH && IsCLMode()) {
NamedOutput = C.getArgs().MakeArgString(GetClPchPath(C, BaseName));
+ } else if ((JA.getType() == types::TY_Plist || JA.getType() == types::TY_AST) &&
+ C.getArgs().hasArg(options::OPT__SLASH_o)) {
+ StringRef Val =
+ C.getArgs()
+ .getLastArg(options::OPT__SLASH_o)
+ ->getValue();
+ NamedOutput =
+ MakeCLOutputFilename(C.getArgs(), Val, BaseName, types::TY_Object);
} else {
const char *Suffix = types::getTypeTempSuffix(JA.getType(), IsCLMode());
assert(Suffix && "All types used for output should have a suffix.");
@@ -5519,6 +5915,7 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
case llvm::Triple::IOS:
case llvm::Triple::TvOS:
case llvm::Triple::WatchOS:
+ case llvm::Triple::DriverKit:
TC = std::make_unique<toolchains::DarwinClang>(*this, Target, Args);
break;
case llvm::Triple::DragonFly:
@@ -5606,6 +6003,9 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
case llvm::Triple::PS4:
TC = std::make_unique<toolchains::PS4CPU>(*this, Target, Args);
break;
+ case llvm::Triple::PS5:
+ TC = std::make_unique<toolchains::PS5CPU>(*this, Target, Args);
+ break;
case llvm::Triple::Contiki:
TC = std::make_unique<toolchains::Contiki>(*this, Target, Args);
break;
@@ -5615,6 +6015,9 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
case llvm::Triple::ZOS:
TC = std::make_unique<toolchains::ZOS>(*this, Target, Args);
break;
+ case llvm::Triple::ShaderModel:
+ TC = std::make_unique<toolchains::HLSLToolChain>(*this, Target, Args);
+ break;
default:
// Of these targets, Hexagon is the only one that might have
// an OS of Linux, in which case it got handled above already.
@@ -5661,6 +6064,9 @@ const ToolChain &Driver::getToolChain(const ArgList &Args,
case llvm::Triple::spirv64:
TC = std::make_unique<toolchains::SPIRVToolChain>(*this, Target, Args);
break;
+ case llvm::Triple::csky:
+ TC = std::make_unique<toolchains::CSKYToolChain>(*this, Target, Args);
+ break;
default:
if (Target.getVendor() == llvm::Triple::Myriad)
TC = std::make_unique<toolchains::MyriadToolChain>(*this, Target,
@@ -5725,7 +6131,8 @@ bool Driver::ShouldUseClangCompiler(const JobAction &JA) const {
// And say "no" if this is not a kind of action clang understands.
if (!isa<PreprocessJobAction>(JA) && !isa<PrecompileJobAction>(JA) &&
- !isa<CompileJobAction>(JA) && !isa<BackendJobAction>(JA))
+ !isa<CompileJobAction>(JA) && !isa<BackendJobAction>(JA) &&
+ !isa<ExtractAPIJobAction>(JA))
return false;
return true;
@@ -5734,11 +6141,12 @@ bool Driver::ShouldUseClangCompiler(const JobAction &JA) const {
bool Driver::ShouldUseFlangCompiler(const JobAction &JA) const {
// Say "no" if there is not exactly one input of a type flang understands.
if (JA.size() != 1 ||
- !types::isFortran((*JA.input_begin())->getType()))
+ !types::isAcceptedByFlang((*JA.input_begin())->getType()))
return false;
// And say "no" if this is not a kind of action flang understands.
- if (!isa<PreprocessJobAction>(JA) && !isa<CompileJobAction>(JA) && !isa<BackendJobAction>(JA))
+ if (!isa<PreprocessJobAction>(JA) && !isa<CompileJobAction>(JA) &&
+ !isa<BackendJobAction>(JA))
return false;
return true;
@@ -5829,7 +6237,13 @@ Driver::getIncludeExcludeOptionFlagMasks(bool IsClCompatMode) const {
} else {
ExcludedFlagsBitmask |= options::CLOption;
}
-
+ if (IsDXCMode()) {
+ // Include DXC and Core options.
+ IncludedFlagsBitmask |= options::DXCOption;
+ IncludedFlagsBitmask |= options::CoreOption;
+ } else {
+ ExcludedFlagsBitmask |= options::DXCOption;
+ }
return std::make_pair(IncludedFlagsBitmask, ExcludedFlagsBitmask);
}
diff --git a/contrib/llvm-project/clang/lib/Driver/SanitizerArgs.cpp b/contrib/llvm-project/clang/lib/Driver/SanitizerArgs.cpp
index 403fac76f060..68fe90c7a69d 100644
--- a/contrib/llvm-project/clang/lib/Driver/SanitizerArgs.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/SanitizerArgs.cpp
@@ -14,9 +14,9 @@
#include "clang/Driver/ToolChain.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Support/AArch64TargetParser.h"
#include "llvm/Support/Path.h"
#include "llvm/Support/SpecialCaseList.h"
-#include "llvm/Support/AArch64TargetParser.h"
#include "llvm/Support/TargetParser.h"
#include "llvm/Support/VirtualFileSystem.h"
#include "llvm/Transforms/Instrumentation/AddressSanitizerOptions.h"
@@ -44,7 +44,8 @@ static const SanitizerMask NeedsUnwindTables =
static const SanitizerMask SupportsCoverage =
SanitizerKind::Address | SanitizerKind::HWAddress |
SanitizerKind::KernelAddress | SanitizerKind::KernelHWAddress |
- SanitizerKind::MemTag | SanitizerKind::Memory |
+ SanitizerKind::MemtagStack | SanitizerKind::MemtagHeap |
+ SanitizerKind::MemtagGlobals | SanitizerKind::Memory |
SanitizerKind::KernelMemory | SanitizerKind::Leak |
SanitizerKind::Undefined | SanitizerKind::Integer | SanitizerKind::Bounds |
SanitizerKind::ImplicitConversion | SanitizerKind::Nullability |
@@ -73,7 +74,8 @@ static const SanitizerMask CFIClasses =
SanitizerKind::CFIUnrelatedCast;
static const SanitizerMask CompatibleWithMinimalRuntime =
TrappingSupported | SanitizerKind::Scudo | SanitizerKind::ShadowCallStack |
- SanitizerKind::MemTag;
+ SanitizerKind::MemtagStack | SanitizerKind::MemtagHeap |
+ SanitizerKind::MemtagGlobals;
enum CoverageFeature {
CoverageFunc = 1 << 0,
@@ -168,14 +170,14 @@ static void addDefaultIgnorelists(const Driver &D, SanitizerMask Kinds,
else if (BL.Mask == SanitizerKind::CFI && DiagnoseErrors)
// If cfi_ignorelist.txt cannot be found in the resource dir, driver
// should fail.
- D.Diag(clang::diag::err_drv_no_such_file) << Path;
+ D.Diag(clang::diag::err_drv_missing_sanitizer_ignorelist) << Path;
}
validateSpecialCaseListFormat(
D, IgnorelistFiles, clang::diag::err_drv_malformed_sanitizer_ignorelist,
DiagnoseErrors);
}
-/// Parse -f(no-)?sanitize-(coverage-)?(white|ignore)list argument's values,
+/// Parse -f(no-)?sanitize-(coverage-)?(allow|ignore)list argument's values,
/// diagnosing any invalid file paths and validating special case list format.
static void parseSpecialCaseListArg(const Driver &D,
const llvm::opt::ArgList &Args,
@@ -185,7 +187,7 @@ static void parseSpecialCaseListArg(const Driver &D,
unsigned MalformedSCLErrorDiagID,
bool DiagnoseErrors) {
for (const auto *Arg : Args) {
- // Match -fsanitize-(coverage-)?(white|ignore)list.
+ // Match -fsanitize-(coverage-)?(allow|ignore)list.
if (Arg->getOption().matches(SCLOptionID)) {
Arg->claim();
std::string SCLPath = Arg->getValue();
@@ -218,9 +220,9 @@ static SanitizerMask setGroupBits(SanitizerMask Kinds) {
static SanitizerMask parseSanitizeTrapArgs(const Driver &D,
const llvm::opt::ArgList &Args,
bool DiagnoseErrors) {
- SanitizerMask TrapRemove; // During the loop below, the accumulated set of
- // sanitizers disabled by the current sanitizer
- // argument or any argument after it.
+ SanitizerMask TrapRemove; // During the loop below, the accumulated set of
+ // sanitizers disabled by the current sanitizer
+ // argument or any argument after it.
SanitizerMask TrappingKinds;
SanitizerMask TrappingSupportedWithGroups = setGroupBits(TrappingSupported);
@@ -233,8 +235,8 @@ static SanitizerMask parseSanitizeTrapArgs(const Driver &D,
if (InvalidValues && DiagnoseErrors) {
SanitizerSet S;
S.Mask = InvalidValues;
- D.Diag(diag::err_drv_unsupported_option_argument) << "-fsanitize-trap"
- << toString(S);
+ D.Diag(diag::err_drv_unsupported_option_argument)
+ << Arg->getOption().getName() << toString(S);
}
TrappingKinds |= expandSanitizerGroups(Add) & ~TrapRemove;
} else if (Arg->getOption().matches(options::OPT_fno_sanitize_trap_EQ)) {
@@ -293,13 +295,13 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
SanitizerMask AllRemove; // During the loop below, the accumulated set of
// sanitizers disabled by the current sanitizer
// argument or any argument after it.
- SanitizerMask AllAddedKinds; // Mask of all sanitizers ever enabled by
- // -fsanitize= flags (directly or via group
- // expansion), some of which may be disabled
- // later. Used to carefully prune
- // unused-argument diagnostics.
- SanitizerMask DiagnosedKinds; // All Kinds we have diagnosed up to now.
- // Used to deduplicate diagnostics.
+ SanitizerMask AllAddedKinds; // Mask of all sanitizers ever enabled by
+ // -fsanitize= flags (directly or via group
+ // expansion), some of which may be disabled
+ // later. Used to carefully prune
+ // unused-argument diagnostics.
+ SanitizerMask DiagnosedKinds; // All Kinds we have diagnosed up to now.
+ // Used to deduplicate diagnostics.
SanitizerMask Kinds;
const SanitizerMask Supported = setGroupBits(TC.getSupportedSanitizers());
@@ -367,6 +369,19 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
Add &= ~NotAllowedWithMinimalRuntime;
}
+ if (llvm::opt::Arg *A = Args.getLastArg(options::OPT_mcmodel_EQ)) {
+ StringRef CM = A->getValue();
+ if (CM != "small" &&
+ (Add & SanitizerKind::Function & ~DiagnosedKinds)) {
+ if (DiagnoseErrors)
+ D.Diag(diag::err_drv_argument_only_allowed_with)
+ << "-fsanitize=function"
+ << "-mcmodel=small";
+ Add &= ~SanitizerKind::Function;
+ DiagnosedKinds |= SanitizerKind::Function;
+ }
+ }
+
// FIXME: Make CFI on member function calls compatible with cross-DSO CFI.
// There are currently two problems:
// - Virtual function call checks need to pass a pointer to the function
@@ -402,7 +417,7 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
if ((Add & SanitizerKind::Vptr) && (RTTIMode == ToolChain::RM_Disabled)) {
if (const llvm::opt::Arg *NoRTTIArg = TC.getRTTIArg()) {
assert(NoRTTIArg->getOption().matches(options::OPT_fno_rtti) &&
- "RTTI disabled without -fno-rtti option?");
+ "RTTI disabled without -fno-rtti option?");
// The user explicitly passed -fno-rtti with -fsanitize=vptr, but
// the vptr sanitizer requires RTTI, so this is a user error.
if (DiagnoseErrors)
@@ -638,20 +653,36 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
}
}
}
- MsanUseAfterDtor =
- Args.hasFlag(options::OPT_fsanitize_memory_use_after_dtor,
- options::OPT_fno_sanitize_memory_use_after_dtor,
- MsanUseAfterDtor);
+ MsanUseAfterDtor = Args.hasFlag(
+ options::OPT_fsanitize_memory_use_after_dtor,
+ options::OPT_fno_sanitize_memory_use_after_dtor, MsanUseAfterDtor);
MsanParamRetval = Args.hasFlag(
options::OPT_fsanitize_memory_param_retval,
options::OPT_fno_sanitize_memory_param_retval, MsanParamRetval);
NeedPIE |= !(TC.getTriple().isOSLinux() &&
TC.getTriple().getArch() == llvm::Triple::x86_64);
+ } else if (AllAddedKinds & SanitizerKind::KernelMemory) {
+ MsanUseAfterDtor = false;
+ MsanParamRetval = Args.hasFlag(
+ options::OPT_fsanitize_memory_param_retval,
+ options::OPT_fno_sanitize_memory_param_retval, MsanParamRetval);
} else {
MsanUseAfterDtor = false;
MsanParamRetval = false;
}
+ if (AllAddedKinds & SanitizerKind::MemTag) {
+ StringRef S =
+ Args.getLastArgValue(options::OPT_fsanitize_memtag_mode_EQ, "sync");
+ if (S == "async" || S == "sync") {
+ MemtagMode = S.str();
+ } else {
+ D.Diag(clang::diag::err_drv_invalid_value_with_suggestion)
+ << "-fsanitize-memtag-mode=" << S << "{async, sync}";
+ MemtagMode = "sync";
+ }
+ }
+
if (AllAddedKinds & SanitizerKind::Thread) {
TsanMemoryAccess = Args.hasFlag(
options::OPT_fsanitize_thread_memory_access,
@@ -777,7 +808,7 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
CoverageFeatures |= CoverageFunc;
}
- // Parse -fsanitize-coverage-(ignore|white)list options if coverage enabled.
+ // Parse -fsanitize-coverage-(allow|ignore)list options if coverage enabled.
// This also validates special case lists format.
// Here, OptSpecifier() acts as a never-matching command-line argument.
// So, there is no way to clear coverage lists but you can append to them.
@@ -805,13 +836,13 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
NeedPIE |= TC.getTriple().isOSFuchsia();
if (Arg *A =
Args.getLastArg(options::OPT_fsanitize_address_field_padding)) {
- StringRef S = A->getValue();
- // Legal values are 0 and 1, 2, but in future we may add more levels.
- if ((S.getAsInteger(0, AsanFieldPadding) || AsanFieldPadding < 0 ||
- AsanFieldPadding > 2) &&
- DiagnoseErrors) {
- D.Diag(clang::diag::err_drv_invalid_value) << A->getAsString(Args) << S;
- }
+ StringRef S = A->getValue();
+ // Legal values are 0 and 1, 2, but in future we may add more levels.
+ if ((S.getAsInteger(0, AsanFieldPadding) || AsanFieldPadding < 0 ||
+ AsanFieldPadding > 2) &&
+ DiagnoseErrors) {
+ D.Diag(clang::diag::err_drv_invalid_value) << A->getAsString(Args) << S;
+ }
}
if (Arg *WindowsDebugRTArg =
@@ -846,12 +877,13 @@ SanitizerArgs::SanitizerArgs(const ToolChain &TC,
AsanOutlineInstrumentation);
// As a workaround for a bug in gold 2.26 and earlier, dead stripping of
- // globals in ASan is disabled by default on ELF targets.
+ // globals in ASan is disabled by default on most ELF targets.
// See https://sourceware.org/bugzilla/show_bug.cgi?id=19002
- AsanGlobalsDeadStripping =
+ AsanGlobalsDeadStripping = Args.hasFlag(
+ options::OPT_fsanitize_address_globals_dead_stripping,
+ options::OPT_fno_sanitize_address_globals_dead_stripping,
!TC.getTriple().isOSBinFormatELF() || TC.getTriple().isOSFuchsia() ||
- TC.getTriple().isPS4() ||
- Args.hasArg(options::OPT_fsanitize_address_globals_dead_stripping);
+ TC.getTriple().isPS());
AsanUseOdrIndicator =
Args.hasFlag(options::OPT_fsanitize_address_use_odr_indicator,
@@ -994,7 +1026,8 @@ static void addIncludeLinkerOption(const ToolChain &TC,
}
static bool hasTargetFeatureMTE(const llvm::opt::ArgStringList &CmdArgs) {
- for (auto Start = CmdArgs.begin(), End = CmdArgs.end(); Start != End; ++Start) {
+ for (auto Start = CmdArgs.begin(), End = CmdArgs.end(); Start != End;
+ ++Start) {
auto It = std::find(Start, End, StringRef("+mte"));
if (It == End)
break;
@@ -1015,8 +1048,8 @@ void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
// AMDGPU sanitizer support is experimental and controlled by -fgpu-sanitize.
if (TC.getTriple().isNVPTX() ||
(TC.getTriple().isAMDGPU() &&
- !Args.hasFlag(options::OPT_fgpu_sanitize,
- options::OPT_fno_gpu_sanitize)))
+ !Args.hasFlag(options::OPT_fgpu_sanitize, options::OPT_fno_gpu_sanitize,
+ true)))
return;
// Translate available CoverageFeatures to corresponding clang-cc1 flags.
@@ -1230,7 +1263,8 @@ void SanitizerArgs::addArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
<< "-fvisibility=";
}
- if (Sanitizers.has(SanitizerKind::MemTag) && !hasTargetFeatureMTE(CmdArgs))
+ if (Sanitizers.has(SanitizerKind::MemtagStack) &&
+ !hasTargetFeatureMTE(CmdArgs))
TC.getDriver().Diag(diag::err_stack_tagging_requires_hardware_feature);
}
@@ -1319,8 +1353,8 @@ std::string lastArgumentForMask(const Driver &D, const llvm::opt::ArgList &Args,
}
std::string describeSanitizeArg(const llvm::opt::Arg *A, SanitizerMask Mask) {
- assert(A->getOption().matches(options::OPT_fsanitize_EQ)
- && "Invalid argument in describeSanitizerArg!");
+ assert(A->getOption().matches(options::OPT_fsanitize_EQ) &&
+ "Invalid argument in describeSanitizerArg!");
std::string Sanitizers;
for (int i = 0, n = A->getNumValues(); i != n; ++i) {
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChain.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChain.cpp
index d657d21bfcdb..5130eb9b72c1 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChain.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChain.cpp
@@ -67,8 +67,9 @@ static ToolChain::RTTIMode CalculateRTTIMode(const ArgList &Args,
return ToolChain::RM_Disabled;
}
- // -frtti is default, except for the PS4 CPU.
- return (Triple.isPS4CPU()) ? ToolChain::RM_Disabled : ToolChain::RM_Enabled;
+ // -frtti is default, except for the PS4/PS5 and DriverKit.
+ bool NoRTTI = Triple.isPS() || Triple.isDriverKit();
+ return NoRTTI ? ToolChain::RM_Disabled : ToolChain::RM_Enabled;
}
ToolChain::ToolChain(const Driver &D, const llvm::Triple &T,
@@ -105,6 +106,34 @@ bool ToolChain::useIntegratedAs() const {
IsIntegratedAssemblerDefault());
}
+bool ToolChain::useIntegratedBackend() const {
+ assert(
+ ((IsIntegratedBackendDefault() && IsIntegratedBackendSupported()) ||
+ (!IsIntegratedBackendDefault() || IsNonIntegratedBackendSupported())) &&
+ "(Non-)integrated backend set incorrectly!");
+
+ bool IBackend = Args.hasFlag(options::OPT_fintegrated_objemitter,
+ options::OPT_fno_integrated_objemitter,
+ IsIntegratedBackendDefault());
+
+ // Diagnose when integrated-objemitter options are not supported by this
+ // toolchain.
+ unsigned DiagID;
+ if ((IBackend && !IsIntegratedBackendSupported()) ||
+ (!IBackend && !IsNonIntegratedBackendSupported()))
+ DiagID = clang::diag::err_drv_unsupported_opt_for_target;
+ else
+ DiagID = clang::diag::warn_drv_unsupported_opt_for_target;
+ Arg *A = Args.getLastArg(options::OPT_fno_integrated_objemitter);
+ if (A && !IsNonIntegratedBackendSupported())
+ D.Diag(DiagID) << A->getAsString(Args) << Triple.getTriple();
+ A = Args.getLastArg(options::OPT_fintegrated_objemitter);
+ if (A && !IsIntegratedBackendSupported())
+ D.Diag(DiagID) << A->getAsString(Args) << Triple.getTriple();
+
+ return IBackend;
+}
+
bool ToolChain::useRelaxRelocations() const {
return ENABLE_X86_RELAX_RELOCATIONS;
}
@@ -153,6 +182,7 @@ static const DriverSuffix *FindDriverSuffix(StringRef ProgName, size_t &Pos) {
{"cl", "--driver-mode=cl"},
{"++", "--driver-mode=g++"},
{"flang", "--driver-mode=flang"},
+ {"clang-dxc", "--driver-mode=dxc"},
};
for (size_t i = 0; i < llvm::array_lengthof(DriverSuffixes); ++i) {
@@ -327,6 +357,12 @@ Tool *ToolChain::getOffloadWrapper() const {
return OffloadWrapper.get();
}
+Tool *ToolChain::getOffloadPackager() const {
+ if (!OffloadPackager)
+ OffloadPackager.reset(new tools::OffloadPackager(*this));
+ return OffloadPackager.get();
+}
+
Tool *ToolChain::getLinkerWrapper() const {
if (!LinkerWrapper)
LinkerWrapper.reset(new tools::LinkerWrapper(*this, getLink()));
@@ -359,6 +395,7 @@ Tool *ToolChain::getTool(Action::ActionClass AC) const {
case Action::PrecompileJobClass:
case Action::HeaderModulePrecompileJobClass:
case Action::PreprocessJobClass:
+ case Action::ExtractAPIJobClass:
case Action::AnalyzeJobClass:
case Action::MigrateJobClass:
case Action::VerifyPCHJobClass:
@@ -371,6 +408,8 @@ Tool *ToolChain::getTool(Action::ActionClass AC) const {
case Action::OffloadWrapperJobClass:
return getOffloadWrapper();
+ case Action::OffloadPackagerJobClass:
+ return getOffloadPackager();
case Action::LinkerWrapperJobClass:
return getLinkerWrapper();
}
@@ -903,6 +942,14 @@ void ToolChain::addExternCSystemIncludeIfExists(const ArgList &DriverArgs,
}
}
+/*static*/ std::string ToolChain::concat(StringRef Path, const Twine &A,
+ const Twine &B, const Twine &C,
+ const Twine &D) {
+ SmallString<128> Result(Path);
+ llvm::sys::path::append(Result, llvm::sys::path::Style::posix, A, B, C, D);
+ return std::string(Result);
+}
+
std::string ToolChain::detectLibcxxVersion(StringRef IncludePath) const {
std::error_code EC;
int MaxVersion = 0;
@@ -1032,7 +1079,7 @@ SanitizerMask ToolChain::getSupportedSanitizers() const {
if (getTriple().getArch() == llvm::Triple::x86 ||
getTriple().getArch() == llvm::Triple::x86_64 ||
getTriple().getArch() == llvm::Triple::arm || getTriple().isWasm() ||
- getTriple().isAArch64())
+ getTriple().isAArch64() || getTriple().isRISCV())
Res |= SanitizerKind::CFIICall;
if (getTriple().getArch() == llvm::Triple::x86_64 ||
getTriple().isAArch64(64) || getTriple().isRISCV())
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.cpp
index e4bbf498b9cd..878b84a77702 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/AIX.cpp
@@ -98,9 +98,10 @@ void aix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-bnoentry");
}
- // Specify PGO linker option without LTO
- if (!D.isUsingLTO() &&
- (Args.hasFlag(options::OPT_fprofile_arcs, options::OPT_fno_profile_arcs,
+ // PGO instrumentation generates symbols belonging to special sections, and
+ // the linker needs to place all symbols in a particular section together in
+ // memory; the AIX linker does that under an option.
+ if (Args.hasFlag(options::OPT_fprofile_arcs, options::OPT_fno_profile_arcs,
false) ||
Args.hasFlag(options::OPT_fprofile_generate,
options::OPT_fno_profile_generate, false) ||
@@ -115,8 +116,8 @@ void aix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.hasFlag(options::OPT_fcs_profile_generate_EQ,
options::OPT_fno_profile_generate, false) ||
Args.hasArg(options::OPT_fcreate_profile) ||
- Args.hasArg(options::OPT_coverage)))
- CmdArgs.push_back("-bdbg:namedsects");
+ Args.hasArg(options::OPT_coverage))
+ CmdArgs.push_back("-bdbg:namedsects:ss");
// Specify linker output file.
assert((Output.isFilename() || Output.isNothing()) && "Invalid output.");
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.cpp
index 9638fa2ecfca..8718ab53ac1a 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.cpp
@@ -91,6 +91,7 @@ void RocmInstallationDetector::scanLibDevicePath(llvm::StringRef Path) {
else if (FileName.endswith(Suffix))
BaseName = FileName.drop_back(Suffix.size());
+ const StringRef ABIVersionPrefix = "oclc_abi_version_";
if (BaseName == "ocml") {
OCML = FilePath;
} else if (BaseName == "ockl") {
@@ -121,6 +122,12 @@ void RocmInstallationDetector::scanLibDevicePath(llvm::StringRef Path) {
WavefrontSize64.On = FilePath;
} else if (BaseName == "oclc_wavefrontsize64_off") {
WavefrontSize64.Off = FilePath;
+ } else if (BaseName.startswith(ABIVersionPrefix)) {
+ unsigned ABIVersionNumber;
+ if (BaseName.drop_front(ABIVersionPrefix.size())
+ .getAsInteger(/*Redex=*/0, ABIVersionNumber))
+ continue;
+ ABIVersionMap[ABIVersionNumber] = FilePath.str();
} else {
// Process all bitcode filenames that look like
// ocl_isa_version_XXX.amdgcn.bc
@@ -545,7 +552,7 @@ void amdgpu::getAMDGPUTargetFeatures(const Driver &D,
llvm::StringMap<bool> FeatureMap;
auto OptionalGpuArch = parseTargetID(Triple, TargetID, &FeatureMap);
if (OptionalGpuArch) {
- StringRef GpuArch = OptionalGpuArch.getValue();
+ StringRef GpuArch = *OptionalGpuArch;
// Iterate through all possible target ID features for the given GPU.
// If it is mapped to true, add +feature.
// If it is mapped to false, add -feature.
@@ -723,7 +730,7 @@ AMDGPUToolChain::getParsedTargetID(const llvm::opt::ArgList &DriverArgs) const {
if (!OptionalGpuArch)
return {TargetID.str(), None, None};
- return {TargetID.str(), OptionalGpuArch.getValue().str(), FeatureMap};
+ return {TargetID.str(), OptionalGpuArch->str(), FeatureMap};
}
void AMDGPUToolChain::checkTargetID(
@@ -731,7 +738,7 @@ void AMDGPUToolChain::checkTargetID(
auto PTID = getParsedTargetID(DriverArgs);
if (PTID.OptionalTargetID && !PTID.OptionalGPUArch) {
getDriver().Diag(clang::diag::err_drv_bad_target_id)
- << PTID.OptionalTargetID.getValue();
+ << *PTID.OptionalTargetID;
}
}
@@ -822,20 +829,16 @@ void ROCMToolChain::addClangTargetOptions(
if (DriverArgs.hasArg(options::OPT_nogpulib))
return;
- if (!RocmInstallation.hasDeviceLibrary()) {
- getDriver().Diag(diag::err_drv_no_rocm_device_lib) << 0;
- return;
- }
-
// Get the device name and canonicalize it
const StringRef GpuArch = getGPUArch(DriverArgs);
auto Kind = llvm::AMDGPU::parseArchAMDGCN(GpuArch);
const StringRef CanonArch = llvm::AMDGPU::getArchNameAMDGCN(Kind);
std::string LibDeviceFile = RocmInstallation.getLibDeviceFile(CanonArch);
- if (LibDeviceFile.empty()) {
- getDriver().Diag(diag::err_drv_no_rocm_device_lib) << 1 << GpuArch;
+ auto ABIVer = DeviceLibABIVersion::fromCodeObjectVersion(
+ getAMDGPUCodeObjectVersion(getDriver(), DriverArgs));
+ if (!RocmInstallation.checkCommonBitcodeLibs(CanonArch, LibDeviceFile,
+ ABIVer))
return;
- }
bool Wave64 = isWave64(DriverArgs, Kind);
@@ -858,20 +861,37 @@ void ROCMToolChain::addClangTargetOptions(
// Add the generic set of libraries.
BCLibs.append(RocmInstallation.getCommonBitcodeLibs(
DriverArgs, LibDeviceFile, Wave64, DAZ, FiniteOnly, UnsafeMathOpt,
- FastRelaxedMath, CorrectSqrt));
+ FastRelaxedMath, CorrectSqrt, ABIVer, false));
- llvm::for_each(BCLibs, [&](StringRef BCFile) {
+ for (StringRef BCFile : BCLibs) {
CC1Args.push_back("-mlink-builtin-bitcode");
CC1Args.push_back(DriverArgs.MakeArgString(BCFile));
- });
+ }
+}
+
+bool RocmInstallationDetector::checkCommonBitcodeLibs(
+ StringRef GPUArch, StringRef LibDeviceFile,
+ DeviceLibABIVersion ABIVer) const {
+ if (!hasDeviceLibrary()) {
+ D.Diag(diag::err_drv_no_rocm_device_lib) << 0;
+ return false;
+ }
+ if (LibDeviceFile.empty()) {
+ D.Diag(diag::err_drv_no_rocm_device_lib) << 1 << GPUArch;
+ return false;
+ }
+ if (ABIVer.requiresLibrary() && getABIVersionPath(ABIVer).empty()) {
+ D.Diag(diag::err_drv_no_rocm_device_lib) << 2 << ABIVer.toString();
+ return false;
+ }
+ return true;
}
llvm::SmallVector<std::string, 12>
RocmInstallationDetector::getCommonBitcodeLibs(
const llvm::opt::ArgList &DriverArgs, StringRef LibDeviceFile, bool Wave64,
bool DAZ, bool FiniteOnly, bool UnsafeMathOpt, bool FastRelaxedMath,
- bool CorrectSqrt) const {
-
+ bool CorrectSqrt, DeviceLibABIVersion ABIVer, bool isOpenMP = false) const {
llvm::SmallVector<std::string, 12> BCLibs;
auto AddBCLib = [&](StringRef BCFile) { BCLibs.push_back(BCFile.str()); };
@@ -884,6 +904,9 @@ RocmInstallationDetector::getCommonBitcodeLibs(
AddBCLib(getCorrectlyRoundedSqrtPath(CorrectSqrt));
AddBCLib(getWavefrontSize64Path(Wave64));
AddBCLib(LibDeviceFile);
+ auto ABIVerPath = getABIVersionPath(ABIVer);
+ if (!ABIVerPath.empty())
+ AddBCLib(ABIVerPath);
return BCLibs;
}
@@ -897,15 +920,17 @@ bool AMDGPUToolChain::shouldSkipArgument(const llvm::opt::Arg *A) const {
llvm::SmallVector<std::string, 12>
ROCMToolChain::getCommonDeviceLibNames(const llvm::opt::ArgList &DriverArgs,
- const std::string &GPUArch) const {
+ const std::string &GPUArch,
+ bool isOpenMP) const {
auto Kind = llvm::AMDGPU::parseArchAMDGCN(GPUArch);
const StringRef CanonArch = llvm::AMDGPU::getArchNameAMDGCN(Kind);
std::string LibDeviceFile = RocmInstallation.getLibDeviceFile(CanonArch);
- if (LibDeviceFile.empty()) {
- getDriver().Diag(diag::err_drv_no_rocm_device_lib) << 1 << GPUArch;
+ auto ABIVer = DeviceLibABIVersion::fromCodeObjectVersion(
+ getAMDGPUCodeObjectVersion(getDriver(), DriverArgs));
+ if (!RocmInstallation.checkCommonBitcodeLibs(CanonArch, LibDeviceFile,
+ ABIVer))
return {};
- }
// If --hip-device-lib is not set, add the default bitcode libraries.
// TODO: There are way too many flags that change this. Do we need to check
@@ -922,10 +947,10 @@ ROCMToolChain::getCommonDeviceLibNames(const llvm::opt::ArgList &DriverArgs,
options::OPT_fno_fast_math, false);
bool CorrectSqrt = DriverArgs.hasFlag(
options::OPT_fhip_fp32_correctly_rounded_divide_sqrt,
- options::OPT_fno_hip_fp32_correctly_rounded_divide_sqrt);
+ options::OPT_fno_hip_fp32_correctly_rounded_divide_sqrt, true);
bool Wave64 = isWave64(DriverArgs, Kind);
return RocmInstallation.getCommonBitcodeLibs(
DriverArgs, LibDeviceFile, Wave64, DAZ, FiniteOnly, UnsafeMathOpt,
- FastRelaxedMath, CorrectSqrt);
+ FastRelaxedMath, CorrectSqrt, ABIVer, isOpenMP);
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.h
index 156bfd1fbdb2..ddcc124b25ba 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPU.h
@@ -142,7 +142,8 @@ public:
// Returns a list of device library names shared by different languages
llvm::SmallVector<std::string, 12>
getCommonDeviceLibNames(const llvm::opt::ArgList &DriverArgs,
- const std::string &GPUArch) const;
+ const std::string &GPUArch,
+ bool isOpenMP = false) const;
};
} // end namespace toolchains
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp
index d7cf41e4b660..efcd565b510b 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/AMDGPUOpenMP.cpp
@@ -125,9 +125,8 @@ const char *AMDGCN::OpenMPLinker::constructLLVMLinkCommand(
// the device-libs bitcode through that on the way to this llvm-link
SmallVector<std::string, 12> BCLibs =
AMDGPUOpenMPTC.getCommonDeviceLibNames(Args, SubArchName.str());
- llvm::for_each(BCLibs, [&](StringRef BCFile) {
+ for (StringRef BCFile : BCLibs)
CmdArgs.push_back(Args.MakeArgString(BCFile));
- });
}
}
@@ -289,15 +288,7 @@ void AMDGPUOpenMPToolChain::addClangTargetOptions(
if (getDriver().isUsingLTO(/* IsOffload */ true))
return;
- std::string BitcodeSuffix;
- if (DriverArgs.hasFlag(options::OPT_fopenmp_target_new_runtime,
- options::OPT_fno_openmp_target_new_runtime, true))
- BitcodeSuffix = "new-amdgpu-" + GPUArch;
- else
- BitcodeSuffix = "amdgcn-" + GPUArch;
-
- addOpenMPDeviceRTL(getDriver(), DriverArgs, CC1Args, BitcodeSuffix,
- getTriple());
+ addOpenMPDeviceRTL(getDriver(), DriverArgs, CC1Args, GPUArch, getTriple());
}
llvm::opt::DerivedArgList *AMDGPUOpenMPToolChain::TranslateArgs(
@@ -315,9 +306,10 @@ llvm::opt::DerivedArgList *AMDGPUOpenMPToolChain::TranslateArgs(
if (!llvm::is_contained(*DAL, A))
DAL->append(A);
- std::string Arch = DAL->getLastArgValue(options::OPT_march_EQ).str();
- if (Arch.empty()) {
- checkSystemForAMDGPU(Args, *this, Arch);
+ if (!DAL->hasArg(options::OPT_march_EQ)) {
+ std::string Arch = BoundArch.str();
+ if (BoundArch.empty())
+ checkSystemForAMDGPU(Args, *this, Arch);
DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_march_EQ), Arch);
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/AVR.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/AVR.cpp
index 2cf16cf9fdb4..1e866553d826 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/AVR.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/AVR.cpp
@@ -28,6 +28,7 @@ using namespace llvm::opt;
namespace {
+// NOTE: This list has been synchronized with gcc-avr 7.3.0 and avr-libc 2.0.0.
constexpr struct {
StringRef Name;
StringRef SubPath;
@@ -62,6 +63,7 @@ constexpr struct {
{"attiny261a", "avr25/tiny-stack", "avr25", 0x800060},
{"at86rf401", "avr25", "avr25", 0x800060},
{"ata5272", "avr25", "avr25", 0x800100},
+ {"ata6616c", "avr25", "avr25", 0x800100},
{"attiny4313", "avr25", "avr25", 0x800060},
{"attiny44", "avr25", "avr25", 0x800060},
{"attiny44a", "avr25", "avr25", 0x800060},
@@ -88,6 +90,8 @@ constexpr struct {
{"at90usb82", "avr35", "avr35", 0x800100},
{"at90usb162", "avr35", "avr35", 0x800100},
{"ata5505", "avr35", "avr35", 0x800100},
+ {"ata6617c", "avr35", "avr35", 0x800100},
+ {"ata664251", "avr35", "avr35", 0x800100},
{"atmega8u2", "avr35", "avr35", 0x800100},
{"atmega16u2", "avr35", "avr35", 0x800100},
{"atmega32u2", "avr35", "avr35", 0x800100},
@@ -97,6 +101,7 @@ constexpr struct {
{"atmega8a", "avr4", "avr4", 0x800060},
{"ata6285", "avr4", "avr4", 0x800100},
{"ata6286", "avr4", "avr4", 0x800100},
+ {"ata6612c", "avr4", "avr4", 0x800100},
{"atmega48", "avr4", "avr4", 0x800100},
{"atmega48a", "avr4", "avr4", 0x800100},
{"atmega48pa", "avr4", "avr4", 0x800100},
@@ -116,8 +121,17 @@ constexpr struct {
{"at90pwm3", "avr4", "avr4", 0x800100},
{"at90pwm3b", "avr4", "avr4", 0x800100},
{"at90pwm81", "avr4", "avr4", 0x800100},
+ {"ata5702m322", "avr5", "avr5", 0x800200},
+ {"ata5782", "avr5", "avr5", 0x800200},
{"ata5790", "avr5", "avr5", 0x800100},
+ {"ata5790n", "avr5", "avr5", 0x800100},
+ {"ata5791", "avr5", "avr5", 0x800100},
{"ata5795", "avr5", "avr5", 0x800100},
+ {"ata5831", "avr5", "avr5", 0x800200},
+ {"ata6613c", "avr5", "avr5", 0x800100},
+ {"ata6614q", "avr5", "avr5", 0x800100},
+ {"ata8210", "avr5", "avr5", 0x800200},
+ {"ata8510", "avr5", "avr5", 0x800200},
{"atmega16", "avr5", "avr5", 0x800060},
{"atmega16a", "avr5", "avr5", 0x800060},
{"atmega161", "avr5", "avr5", 0x800060},
@@ -145,6 +159,7 @@ constexpr struct {
{"atmega324a", "avr5", "avr5", 0x800100},
{"atmega324p", "avr5", "avr5", 0x800100},
{"atmega324pa", "avr5", "avr5", 0x800100},
+ {"atmega324pb", "avr5", "avr5", 0x800100},
{"atmega325", "avr5", "avr5", 0x800100},
{"atmega325a", "avr5", "avr5", 0x800100},
{"atmega325p", "avr5", "avr5", 0x800100},
@@ -155,6 +170,7 @@ constexpr struct {
{"atmega3250pa", "avr5", "avr5", 0x800100},
{"atmega328", "avr5", "avr5", 0x800100},
{"atmega328p", "avr5", "avr5", 0x800100},
+ {"atmega328pb", "avr5", "avr5", 0x800100},
{"atmega329", "avr5", "avr5", 0x800100},
{"atmega329a", "avr5", "avr5", 0x800100},
{"atmega329p", "avr5", "avr5", 0x800100},
@@ -192,6 +208,7 @@ constexpr struct {
{"atmega32hvb", "avr5", "avr5", 0x800100},
{"atmega32hvbrevb", "avr5", "avr5", 0x800100},
{"atmega64hve", "avr5", "avr5", 0x800100},
+ {"atmega64hve2", "avr5", "avr5", 0x800100},
{"at90can32", "avr5", "avr5", 0x800100},
{"at90can64", "avr5", "avr5", 0x800100},
{"at90pwm161", "avr5", "avr5", 0x800100},
@@ -232,17 +249,22 @@ constexpr struct {
{"attiny10", "avrtiny", "avrtiny", 0x800040},
{"attiny20", "avrtiny", "avrtiny", 0x800040},
{"attiny40", "avrtiny", "avrtiny", 0x800040},
+ {"attiny102", "avrtiny", "avrtiny", 0x800040},
+ {"attiny104", "avrtiny", "avrtiny", 0x800040},
{"atxmega16a4", "avrxmega2", "avrxmega2", 0x802000},
{"atxmega16a4u", "avrxmega2", "avrxmega2", 0x802000},
{"atxmega16c4", "avrxmega2", "avrxmega2", 0x802000},
{"atxmega16d4", "avrxmega2", "avrxmega2", 0x802000},
{"atxmega32a4", "avrxmega2", "avrxmega2", 0x802000},
{"atxmega32a4u", "avrxmega2", "avrxmega2", 0x802000},
+ {"atxmega32c3", "avrxmega2", "avrxmega2", 0x802000},
{"atxmega32c4", "avrxmega2", "avrxmega2", 0x802000},
+ {"atxmega32d3", "avrxmega2", "avrxmega2", 0x802000},
{"atxmega32d4", "avrxmega2", "avrxmega2", 0x802000},
{"atxmega32e5", "avrxmega2", "avrxmega2", 0x802000},
{"atxmega16e5", "avrxmega2", "avrxmega2", 0x802000},
{"atxmega8e5", "avrxmega2", "avrxmega2", 0x802000},
+ {"atxmega64a3", "avrxmega4", "avrxmega4", 0x802000},
{"atxmega64a3u", "avrxmega4", "avrxmega4", 0x802000},
{"atxmega64a4u", "avrxmega4", "avrxmega4", 0x802000},
{"atxmega64b1", "avrxmega4", "avrxmega4", 0x802000},
@@ -274,6 +296,42 @@ constexpr struct {
{"atxmega128a1", "avrxmega7", "avrxmega7", 0x802000},
{"atxmega128a1u", "avrxmega7", "avrxmega7", 0x802000},
{"atxmega128a4u", "avrxmega7", "avrxmega7", 0x802000},
+ {"attiny202", "avrxmega3/short-calls", "avrxmega3", 0x803F80},
+ {"attiny204", "avrxmega3/short-calls", "avrxmega3", 0x803F80},
+ {"attiny212", "avrxmega3/short-calls", "avrxmega3", 0x803F80},
+ {"attiny214", "avrxmega3/short-calls", "avrxmega3", 0x803F80},
+ {"attiny402", "avrxmega3/short-calls", "avrxmega3", 0x803F00},
+ {"attiny404", "avrxmega3/short-calls", "avrxmega3", 0x803F00},
+ {"attiny406", "avrxmega3/short-calls", "avrxmega3", 0x803F00},
+ {"attiny412", "avrxmega3/short-calls", "avrxmega3", 0x803F00},
+ {"attiny414", "avrxmega3/short-calls", "avrxmega3", 0x803F00},
+ {"attiny416", "avrxmega3/short-calls", "avrxmega3", 0x803F00},
+ {"attiny417", "avrxmega3/short-calls", "avrxmega3", 0x803F00},
+ {"attiny804", "avrxmega3/short-calls", "avrxmega3", 0x803E00},
+ {"attiny806", "avrxmega3/short-calls", "avrxmega3", 0x803E00},
+ {"attiny807", "avrxmega3/short-calls", "avrxmega3", 0x803E00},
+ {"attiny814", "avrxmega3/short-calls", "avrxmega3", 0x803E00},
+ {"attiny816", "avrxmega3/short-calls", "avrxmega3", 0x803E00},
+ {"attiny817", "avrxmega3/short-calls", "avrxmega3", 0x803E00},
+ {"atmega808", "avrxmega3/short-calls", "avrxmega3", 0x803C00},
+ {"atmega809", "avrxmega3/short-calls", "avrxmega3", 0x803C00},
+ {"atmega1608", "avrxmega3", "avrxmega3", 0x803800},
+ {"atmega1609", "avrxmega3", "avrxmega3", 0x803800},
+ {"atmega3208", "avrxmega3", "avrxmega3", 0x803000},
+ {"atmega3209", "avrxmega3", "avrxmega3", 0x803000},
+ {"atmega4808", "avrxmega3", "avrxmega3", 0x802800},
+ {"atmega4809", "avrxmega3", "avrxmega3", 0x802800},
+ {"attiny1604", "avrxmega3", "avrxmega3", 0x803C00},
+ {"attiny1606", "avrxmega3", "avrxmega3", 0x803C00},
+ {"attiny1607", "avrxmega3", "avrxmega3", 0x803C00},
+ {"attiny1614", "avrxmega3", "avrxmega3", 0x803800},
+ {"attiny1616", "avrxmega3", "avrxmega3", 0x803800},
+ {"attiny1617", "avrxmega3", "avrxmega3", 0x803800},
+ {"attiny1624", "avrxmega3", "avrxmega3", 0x803800},
+ {"attiny1626", "avrxmega3", "avrxmega3", 0x803800},
+ {"attiny1627", "avrxmega3", "avrxmega3", 0x803800},
+ {"attiny3216", "avrxmega3", "avrxmega3", 0x803800},
+ {"attiny3217", "avrxmega3", "avrxmega3", 0x803800},
};
std::string GetMCUSubPath(StringRef MCUName) {
@@ -308,49 +366,19 @@ const StringRef PossibleAVRLibcLocations[] = {
/// AVR Toolchain
AVRToolChain::AVRToolChain(const Driver &D, const llvm::Triple &Triple,
const ArgList &Args)
- : Generic_ELF(D, Triple, Args), LinkStdlib(false) {
+ : Generic_ELF(D, Triple, Args) {
GCCInstallation.init(Triple, Args);
+ std::string CPU = getCPUName(D, Args, Triple);
+ if (CPU.empty())
+ D.Diag(diag::warn_drv_avr_mcu_not_specified);
+
// Only add default libraries if the user hasn't explicitly opted out.
if (!Args.hasArg(options::OPT_nostdlib) &&
- !Args.hasArg(options::OPT_nodefaultlibs) &&
- !Args.hasArg(options::OPT_c /* does not apply when not linking */)) {
- std::string CPU = getCPUName(D, Args, Triple);
-
- if (CPU.empty()) {
- // We cannot link any standard libraries without an MCU specified.
- D.Diag(diag::warn_drv_avr_mcu_not_specified);
- } else {
- Optional<StringRef> FamilyName = GetMCUFamilyName(CPU);
- Optional<std::string> AVRLibcRoot = findAVRLibcInstallation();
-
- if (!FamilyName.hasValue()) {
- // We do not have an entry for this CPU in the family
- // mapping table yet.
- D.Diag(diag::warn_drv_avr_family_linking_stdlibs_not_implemented)
- << CPU;
- } else if (!GCCInstallation.isValid()) {
- // No avr-gcc found and so no runtime linked.
- D.Diag(diag::warn_drv_avr_gcc_not_found);
- } else if (!AVRLibcRoot.hasValue()) {
- // No avr-libc found and so no runtime linked.
- D.Diag(diag::warn_drv_avr_libc_not_found);
- } else { // We have enough information to link stdlibs
- std::string GCCRoot(GCCInstallation.getInstallPath());
- std::string GCCParentPath(GCCInstallation.getParentLibPath());
- std::string LibcRoot = AVRLibcRoot.getValue();
- std::string SubPath = GetMCUSubPath(CPU);
-
- getProgramPaths().push_back(GCCParentPath + "/../bin");
- getFilePaths().push_back(LibcRoot + std::string("/lib/") + SubPath);
- getFilePaths().push_back(GCCRoot + std::string("/") + SubPath);
-
- LinkStdlib = true;
- }
- }
-
- if (!LinkStdlib)
- D.Diag(diag::warn_drv_avr_stdlib_not_linked);
+ !Args.hasArg(options::OPT_nodefaultlibs) && GCCInstallation.isValid()) {
+ GCCInstallPath = GCCInstallation.getInstallPath();
+ std::string GCCParentPath(GCCInstallation.getParentLibPath());
+ getProgramPaths().push_back(GCCParentPath + "/../bin");
}
}
@@ -362,11 +390,11 @@ void AVRToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
// Omit if there is no avr-libc installed.
Optional<std::string> AVRLibcRoot = findAVRLibcInstallation();
- if (!AVRLibcRoot.hasValue())
+ if (!AVRLibcRoot)
return;
// Add 'avr-libc/include' to clang system include paths if applicable.
- std::string AVRInc = AVRLibcRoot.getValue() + "/include";
+ std::string AVRInc = *AVRLibcRoot + "/include";
if (llvm::sys::fs::is_directory(AVRInc))
addSystemInclude(DriverArgs, CC1Args, AVRInc);
}
@@ -387,21 +415,27 @@ void AVRToolChain::addClangTargetOptions(
}
Tool *AVRToolChain::buildLinker() const {
- return new tools::AVR::Linker(getTriple(), *this, LinkStdlib);
+ return new tools::AVR::Linker(getTriple(), *this);
}
void AVR::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
const InputInfoList &Inputs, const ArgList &Args,
const char *LinkingOutput) const {
+ const auto &TC = static_cast<const AVRToolChain &>(getToolChain());
const Driver &D = getToolChain().getDriver();
// Compute information about the target AVR.
std::string CPU = getCPUName(D, Args, getToolChain().getTriple());
llvm::Optional<StringRef> FamilyName = GetMCUFamilyName(CPU);
+ llvm::Optional<std::string> AVRLibcRoot = TC.findAVRLibcInstallation();
llvm::Optional<unsigned> SectionAddressData = GetMCUSectionAddressData(CPU);
- std::string Linker = getToolChain().GetProgramPath(getShortName());
+ // Compute the linker program path, and use GNU "avr-ld" as default.
+ const Arg *A = Args.getLastArg(options::OPT_fuse_ld_EQ);
+ std::string Linker = A ? getToolChain().GetLinkerPath(nullptr)
+ : getToolChain().GetProgramPath(getShortName());
+
ArgStringList CmdArgs;
AddLinkerInputs(getToolChain(), Inputs, Args, CmdArgs, JA);
@@ -415,7 +449,33 @@ void AVR::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddAllArgs(CmdArgs, options::OPT_L);
getToolChain().AddFilePathLibArgs(Args, CmdArgs);
- if (SectionAddressData.hasValue()) {
+ // Only add default libraries if the user hasn't explicitly opted out.
+ bool LinkStdlib = false;
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nodefaultlibs)) {
+ if (!CPU.empty()) {
+ if (!FamilyName) {
+ // We do not have an entry for this CPU in the family
+ // mapping table yet.
+ D.Diag(diag::warn_drv_avr_family_linking_stdlibs_not_implemented)
+ << CPU;
+ } else if (!AVRLibcRoot) {
+ // No avr-libc found and so no runtime linked.
+ D.Diag(diag::warn_drv_avr_libc_not_found);
+ } else {
+ std::string SubPath = GetMCUSubPath(CPU);
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine("-L") + *AVRLibcRoot + "/lib/" + SubPath));
+ CmdArgs.push_back(
+ Args.MakeArgString("-L" + TC.getGCCInstallPath() + "/" + SubPath));
+ LinkStdlib = true;
+ }
+ }
+ if (!LinkStdlib)
+ D.Diag(diag::warn_drv_avr_stdlib_not_linked);
+ }
+
+ if (SectionAddressData) {
std::string DataSectionArg = std::string("-Tdata=0x") +
llvm::utohexstr(SectionAddressData.getValue());
CmdArgs.push_back(Args.MakeArgString(DataSectionArg));
@@ -445,11 +505,15 @@ void AVR::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("--end-group");
+ // Add user specified linker script.
+ Args.AddAllArgs(CmdArgs, options::OPT_T);
+
// Specify the family name as the emulation mode to use.
// This is almost always required because otherwise avr-ld
// will assume 'avr2' and warn about the program being larger
// than the bare minimum supports.
- CmdArgs.push_back(Args.MakeArgString(std::string("-m") + *FamilyName));
+ if (Linker.find("avr-ld") != std::string::npos)
+ CmdArgs.push_back(Args.MakeArgString(std::string("-m") + *FamilyName));
}
C.addCommand(std::make_unique<Command>(
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/AVR.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/AVR.h
index 2d027957ed76..ab147d852ad7 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/AVR.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/AVR.h
@@ -31,17 +31,14 @@ public:
llvm::opt::ArgStringList &CC1Args,
Action::OffloadKind DeviceOffloadKind) const override;
+ llvm::Optional<std::string> findAVRLibcInstallation() const;
+ StringRef getGCCInstallPath() const { return GCCInstallPath; }
+
protected:
Tool *buildLinker() const override;
private:
- /// Whether libgcc, libct, and friends should be linked.
- ///
- /// This is not done if the user does not specify a
- /// microcontroller on the command line.
- bool LinkStdlib;
-
- llvm::Optional<std::string> findAVRLibcInstallation() const;
+ StringRef GCCInstallPath;
};
} // end namespace toolchains
@@ -50,9 +47,8 @@ namespace tools {
namespace AVR {
class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
- Linker(const llvm::Triple &Triple, const ToolChain &TC, bool LinkStdlib)
- : Tool("AVR::Linker", "avr-ld", TC), Triple(Triple),
- LinkStdlib(LinkStdlib) {}
+ Linker(const llvm::Triple &Triple, const ToolChain &TC)
+ : Tool("AVR::Linker", "avr-ld", TC), Triple(Triple) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
@@ -63,7 +59,6 @@ public:
protected:
const llvm::Triple &Triple;
- bool LinkStdlib;
};
} // end namespace AVR
} // end namespace tools
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
index 53610f0909a2..cf7e201b4972 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.cpp
@@ -151,6 +151,8 @@ getAArch64ArchFeaturesFromMarch(const Driver &D, StringRef March,
std::pair<StringRef, StringRef> Split = StringRef(MarchLowerCase).split("+");
llvm::AArch64::ArchKind ArchKind = llvm::AArch64::parseArch(Split.first);
+ if (Split.first == "native")
+ ArchKind = llvm::AArch64::getCPUArchKind(llvm::sys::getHostCPUName().str());
if (ArchKind == llvm::AArch64::ArchKind::INVALID ||
!llvm::AArch64::getArchFeatures(ArchKind, Features))
return false;
@@ -222,7 +224,6 @@ getAArch64MicroArchFeaturesFromMcpu(const Driver &D, StringRef Mcpu,
void aarch64::getAArch64TargetFeatures(const Driver &D,
const llvm::Triple &Triple,
const ArgList &Args,
- llvm::opt::ArgStringList &CmdArgs,
std::vector<StringRef> &Features,
bool ForAS) {
Arg *A;
@@ -264,13 +265,13 @@ void aarch64::getAArch64TargetFeatures(const Driver &D,
D, getAArch64TargetCPU(Args, Triple, A), Args, Features);
if (!success) {
- auto Diag = D.Diag(diag::err_drv_clang_unsupported);
+ auto Diag = D.Diag(diag::err_drv_unsupported_option_argument);
// If "-Wa,-march=" is used, 'WaMArch' will contain the argument's value,
// while 'A' is uninitialized. Only dereference 'A' in the other case.
if (!WaMArch.empty())
- Diag << "-march=" + WaMArch.str();
+ Diag << "march=" << WaMArch;
else
- Diag << A->getAsString(Args);
+ Diag << A->getOption().getName() << A->getValue();
}
if (Args.getLastArg(options::OPT_mgeneral_regs_only)) {
@@ -323,8 +324,8 @@ void aarch64::getAArch64TargetFeatures(const Driver &D,
DisableComdat = true;
continue;
}
- D.Diag(diag::err_invalid_sls_hardening)
- << Scope << A->getAsString(Args);
+ D.Diag(diag::err_drv_unsupported_option_argument)
+ << A->getOption().getName() << Scope;
break;
}
}
@@ -346,28 +347,85 @@ void aarch64::getAArch64TargetFeatures(const Driver &D,
Features.push_back("-crc");
}
+ int V8Version = -1;
+ int V9Version = -1;
+ bool HasNoSM4 = false;
+ bool HasNoSHA3 = false;
+ bool HasNoSHA2 = false;
+ bool HasNoAES = false;
+ bool HasSM4 = false;
+ bool HasSHA3 = false;
+ bool HasSHA2 = false;
+ bool HasAES = false;
+ bool HasCrypto = false;
+ bool HasNoCrypto = false;
+ int FullFP16Pos = -1;
+ int NoFullFP16Pos = -1;
+ int FP16FMLPos = -1;
+ int NoFP16FMLPos = -1;
+ int ArchFeatPos = -1;
+
+ for (auto I = Features.begin(), E = Features.end(); I != E; I++) {
+ if (*I == "+v8a") V8Version = 0;
+ else if (*I == "+v8.1a") V8Version = 1;
+ else if (*I == "+v8.2a") V8Version = 2;
+ else if (*I == "+v8.3a") V8Version = 3;
+ else if (*I == "+v8.4a") V8Version = 4;
+ else if (*I == "+v8.5a") V8Version = 5;
+ else if (*I == "+v8.6a") V8Version = 6;
+ else if (*I == "+v8.7a") V8Version = 7;
+ else if (*I == "+v8.8a") V8Version = 8;
+ else if (*I == "+v8.9a") V8Version = 9;
+ else if (*I == "+v9a") V9Version = 0;
+ else if (*I == "+v9.1a") V9Version = 1;
+ else if (*I == "+v9.2a") V9Version = 2;
+ else if (*I == "+v9.3a") V9Version = 3;
+ else if (*I == "+v9.4a") V9Version = 4;
+ else if (*I == "+sm4") HasSM4 = true;
+ else if (*I == "+sha3") HasSHA3 = true;
+ else if (*I == "+sha2") HasSHA2 = true;
+ else if (*I == "+aes") HasAES = true;
+ else if (*I == "-sm4") HasNoSM4 = true;
+ else if (*I == "-sha3") HasNoSHA3 = true;
+ else if (*I == "-sha2") HasNoSHA2 = true;
+ else if (*I == "-aes") HasNoAES = true;
+ else if (*I == "+fp16fml") FP16FMLPos = I - Features.begin();
+ else if (*I == "-fp16fml") NoFP16FMLPos = I - Features.begin();
+ else if (*I == "-fullfp16") NoFullFP16Pos = I - Features.begin();
+ else if (*I == "+fullfp16") FullFP16Pos = I - Features.begin();
+ // Whichever option comes after (right-most option) will win
+ else if (*I == "+crypto") {
+ HasCrypto = true;
+ HasNoCrypto = false;
+ } else if (*I == "-crypto") {
+ HasCrypto = false;
+ HasNoCrypto = true;
+ }
+ // Register the iterator position if this is an architecture feature
+ if (ArchFeatPos == -1 && (V8Version != -1 || V9Version != -1))
+ ArchFeatPos = I - Features.begin();
+ }
+
// Handle (arch-dependent) fp16fml/fullfp16 relationship.
// FIXME: this fp16fml option handling will be reimplemented after the
// TargetParser rewrite.
- const auto ItRNoFullFP16 = std::find(Features.rbegin(), Features.rend(), "-fullfp16");
- const auto ItRFP16FML = std::find(Features.rbegin(), Features.rend(), "+fp16fml");
- if (llvm::is_contained(Features, "+v8.4a")) {
- const auto ItRFullFP16 = std::find(Features.rbegin(), Features.rend(), "+fullfp16");
- if (ItRFullFP16 < ItRNoFullFP16 && ItRFullFP16 < ItRFP16FML) {
+ if (V8Version >= 4) {
+ // "-fullfp16" "+fullfp16" && "+fp16fml" "+fullfp16" && no "+fullfp16" "-fp16fml" = "+fp16fml"
+ if (FullFP16Pos > NoFullFP16Pos && FullFP16Pos > FP16FMLPos && FullFP16Pos > NoFP16FMLPos)
// Only entangled feature that can be to the right of this +fullfp16 is -fp16fml.
// Only append the +fp16fml if there is no -fp16fml after the +fullfp16.
- if (std::find(Features.rbegin(), ItRFullFP16, "-fp16fml") == ItRFullFP16)
- Features.push_back("+fp16fml");
- }
+ Features.push_back("+fp16fml");
else
goto fp16_fml_fallthrough;
} else {
fp16_fml_fallthrough:
// In both of these cases, putting the 'other' feature on the end of the vector will
// result in the same effect as placing it immediately after the current feature.
- if (ItRNoFullFP16 < ItRFP16FML)
+ // "+fp16fml" "-fullfp16" = "-fp16fml"
+ if (NoFullFP16Pos > FP16FMLPos)
Features.push_back("-fp16fml");
- else if (ItRNoFullFP16 > ItRFP16FML)
+ // "-fullfp16" "+fp16fml" = "+fullfp16"
+ else if (NoFullFP16Pos < FP16FMLPos)
Features.push_back("+fullfp16");
}
@@ -376,53 +434,23 @@ fp16_fml_fallthrough:
// Context sensitive meaning of Crypto:
// 1) For Arch >= ARMv8.4a: crypto = sm4 + sha3 + sha2 + aes
// 2) For Arch <= ARMv8.3a: crypto = sha2 + aes
- const auto ItBegin = Features.begin();
- const auto ItEnd = Features.end();
- const auto ItRBegin = Features.rbegin();
- const auto ItREnd = Features.rend();
- const auto ItRCrypto = std::find(ItRBegin, ItREnd, "+crypto");
- const auto ItRNoCrypto = std::find(ItRBegin, ItREnd, "-crypto");
- const auto HasCrypto = ItRCrypto != ItREnd;
- const auto HasNoCrypto = ItRNoCrypto != ItREnd;
- const ptrdiff_t PosCrypto = ItRCrypto - ItRBegin;
- const ptrdiff_t PosNoCrypto = ItRNoCrypto - ItRBegin;
-
- bool NoCrypto = false;
- if (HasCrypto && HasNoCrypto) {
- if (PosNoCrypto < PosCrypto)
- NoCrypto = true;
- }
-
- if (std::find(ItBegin, ItEnd, "+v8.4a") != ItEnd ||
- std::find(ItBegin, ItEnd, "+v8.8a") != ItEnd ||
- std::find(ItBegin, ItEnd, "+v9a") != ItEnd ||
- std::find(ItBegin, ItEnd, "+v9.1a") != ItEnd ||
- std::find(ItBegin, ItEnd, "+v9.2a") != ItEnd ||
- std::find(ItBegin, ItEnd, "+v9.3a") != ItEnd) {
- if (HasCrypto && !NoCrypto) {
+ if (V8Version >= 4 || V9Version >= 0) {
+ if (HasCrypto && !HasNoCrypto) {
// Check if we have NOT disabled an algorithm with something like:
// +crypto, -algorithm
// And if "-algorithm" does not occur, we enable that crypto algorithm.
- const bool HasSM4 = (std::find(ItBegin, ItEnd, "-sm4") == ItEnd);
- const bool HasSHA3 = (std::find(ItBegin, ItEnd, "-sha3") == ItEnd);
- const bool HasSHA2 = (std::find(ItBegin, ItEnd, "-sha2") == ItEnd);
- const bool HasAES = (std::find(ItBegin, ItEnd, "-aes") == ItEnd);
- if (HasSM4)
+ if (!HasNoSM4)
Features.push_back("+sm4");
- if (HasSHA3)
+ if (!HasNoSHA3)
Features.push_back("+sha3");
- if (HasSHA2)
+ if (!HasNoSHA2)
Features.push_back("+sha2");
- if (HasAES)
+ if (!HasNoAES)
Features.push_back("+aes");
} else if (HasNoCrypto) {
// Check if we have NOT enabled a crypto algorithm with something like:
// -crypto, +algorithm
// And if "+algorithm" does not occur, we disable that crypto algorithm.
- const bool HasSM4 = (std::find(ItBegin, ItEnd, "+sm4") != ItEnd);
- const bool HasSHA3 = (std::find(ItBegin, ItEnd, "+sha3") != ItEnd);
- const bool HasSHA2 = (std::find(ItBegin, ItEnd, "+sha2") != ItEnd);
- const bool HasAES = (std::find(ItBegin, ItEnd, "+aes") != ItEnd);
if (!HasSM4)
Features.push_back("-sm4");
if (!HasSHA3)
@@ -433,49 +461,41 @@ fp16_fml_fallthrough:
Features.push_back("-aes");
}
} else {
- if (HasCrypto && !NoCrypto) {
- const bool HasSHA2 = (std::find(ItBegin, ItEnd, "-sha2") == ItEnd);
- const bool HasAES = (std::find(ItBegin, ItEnd, "-aes") == ItEnd);
- if (HasSHA2)
+ if (HasCrypto && !HasNoCrypto) {
+ if (!HasNoSHA2)
Features.push_back("+sha2");
- if (HasAES)
+ if (!HasNoAES)
Features.push_back("+aes");
} else if (HasNoCrypto) {
- const bool HasSHA2 = (std::find(ItBegin, ItEnd, "+sha2") != ItEnd);
- const bool HasAES = (std::find(ItBegin, ItEnd, "+aes") != ItEnd);
- const bool HasV82a = (std::find(ItBegin, ItEnd, "+v8.2a") != ItEnd);
- const bool HasV83a = (std::find(ItBegin, ItEnd, "+v8.3a") != ItEnd);
- const bool HasV84a = (std::find(ItBegin, ItEnd, "+v8.4a") != ItEnd);
if (!HasSHA2)
Features.push_back("-sha2");
if (!HasAES)
Features.push_back("-aes");
- if (HasV82a || HasV83a || HasV84a) {
+ if (V8Version == 2 || V8Version == 3) {
Features.push_back("-sm4");
Features.push_back("-sha3");
}
}
}
- const char *Archs[] = {"+v8.6a", "+v8.7a", "+v8.8a",
- "+v9.1a", "+v9.2a", "+v9.3a"};
- auto Pos = std::find_first_of(Features.begin(), Features.end(),
- std::begin(Archs), std::end(Archs));
- if (Pos != std::end(Features))
- Pos = Features.insert(std::next(Pos), {"+i8mm", "+bf16"});
+ // FIXME: these insertions should ideally be automated using default
+ // extensions support from the backend target parser.
+ if (V8Version >= 6 || V9Version >= 1)
+ Features.insert(std::next(Features.begin() + ArchFeatPos),
+ {"+i8mm", "+bf16"});
+
+ // For Armv8.8-a/Armv9.3-a or later, FEAT_HBC and FEAT_MOPS are enabled by
+ // default.
+ if (V8Version >= 8 || V9Version >= 3)
+ Features.insert(std::next(Features.begin() + ArchFeatPos),
+ {"+hbc", "+mops"});
if (Arg *A = Args.getLastArg(options::OPT_mno_unaligned_access,
options::OPT_munaligned_access)) {
- if (A->getOption().matches(options::OPT_mno_unaligned_access)) {
+ if (A->getOption().matches(options::OPT_mno_unaligned_access))
Features.push_back("+strict-align");
- if (!ForAS)
- CmdArgs.push_back("-Wunaligned-access");
- }
- } else if (Triple.isOSOpenBSD()) {
+ } else if (Triple.isOSOpenBSD())
Features.push_back("+strict-align");
- if (!ForAS)
- CmdArgs.push_back("-Wunaligned-access");
- }
if (Args.hasArg(options::OPT_ffixed_x1))
Features.push_back("+reserve-x1");
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.h
index 0cdc2ec725e0..d47c402d4a42 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/AArch64.h
@@ -22,7 +22,6 @@ namespace aarch64 {
void getAArch64TargetFeatures(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args,
- llvm::opt::ArgStringList &CmdArgs,
std::vector<llvm::StringRef> &Features,
bool ForAS);
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp
index 16af9f6d7129..5c49db2f0837 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.cpp
@@ -121,7 +121,8 @@ static void checkARMArchName(const Driver &D, const Arg *A, const ArgList &Args,
if (ArchKind == llvm::ARM::ArchKind::INVALID ||
(Split.second.size() && !DecodeARMFeatures(D, Split.second, CPUName,
ArchKind, Features, ArgFPUID)))
- D.Diag(clang::diag::err_drv_clang_unsupported) << A->getAsString(Args);
+ D.Diag(clang::diag::err_drv_unsupported_option_argument)
+ << A->getOption().getName() << A->getValue();
}
// Check -mcpu=. Needs ArchName to handle -mcpu=generic.
@@ -137,7 +138,8 @@ static void checkARMCPUName(const Driver &D, const Arg *A, const ArgList &Args,
if (ArchKind == llvm::ARM::ArchKind::INVALID ||
(Split.second.size() &&
!DecodeARMFeatures(D, Split.second, CPU, ArchKind, Features, ArgFPUID)))
- D.Diag(clang::diag::err_drv_clang_unsupported) << A->getAsString(Args);
+ D.Diag(clang::diag::err_drv_unsupported_option_argument)
+ << A->getOption().getName() << A->getValue();
}
bool arm::useAAPCSForMachO(const llvm::Triple &T) {
@@ -320,6 +322,7 @@ arm::FloatABI arm::getDefaultFloatABI(const llvm::Triple &Triple) {
case llvm::Triple::MacOSX:
case llvm::Triple::IOS:
case llvm::Triple::TvOS:
+ case llvm::Triple::DriverKit:
// Darwin defaults to "softfp" for v6 and v7.
if (Triple.isWatchABI())
return FloatABI::Hard;
@@ -434,7 +437,7 @@ static bool hasIntegerMVE(const std::vector<StringRef> &F) {
}
void arm::getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple,
- const ArgList &Args, ArgStringList &CmdArgs,
+ const ArgList &Args,
std::vector<StringRef> &Features, bool ForAS) {
bool KernelOrKext =
Args.hasArg(options::OPT_mkernel, options::OPT_fapple_kext);
@@ -717,6 +720,15 @@ fp16_fml_fallthrough:
}
}
+ // Propagate frame-chain model selection
+ if (Arg *A = Args.getLastArg(options::OPT_mframe_chain)) {
+ StringRef FrameChainOption = A->getValue();
+ if (FrameChainOption.startswith("aapcs"))
+ Features.push_back("+aapcs-frame-chain");
+ if (FrameChainOption == "aapcs+leaf")
+ Features.push_back("+aapcs-frame-chain-leaf");
+ }
+
// CMSE: Check for target 8M (for -mcmse to be applicable) is performed later.
if (Args.getLastArg(options::OPT_mcmse))
Features.push_back("+8msecext");
@@ -733,6 +745,16 @@ fp16_fml_fallthrough:
Features.push_back("-fix-cmse-cve-2021-35465");
}
+ // This also handles the -m(no-)fix-cortex-a72-1655431 arguments via aliases.
+ if (Arg *A = Args.getLastArg(options::OPT_mfix_cortex_a57_aes_1742098,
+ options::OPT_mno_fix_cortex_a57_aes_1742098)) {
+ if (A->getOption().matches(options::OPT_mfix_cortex_a57_aes_1742098)) {
+ Features.push_back("+fix-cortex-a57-aes-1742098");
+ } else {
+ Features.push_back("-fix-cortex-a57-aes-1742098");
+ }
+ }
+
// Look for the last occurrence of -mlong-calls or -mno-long-calls. If
// neither options are specified, see if we are compiling for kernel/kext and
// decide whether to pass "+long-calls" based on the OS and its version.
@@ -772,8 +794,6 @@ fp16_fml_fallthrough:
// Kernel code has more strict alignment requirements.
if (KernelOrKext) {
Features.push_back("+strict-align");
- if (!ForAS)
- CmdArgs.push_back("-Wunaligned-access");
} else if (Arg *A = Args.getLastArg(options::OPT_mno_unaligned_access,
options::OPT_munaligned_access)) {
if (A->getOption().matches(options::OPT_munaligned_access)) {
@@ -784,11 +804,8 @@ fp16_fml_fallthrough:
// access either.
else if (Triple.getSubArch() == llvm::Triple::SubArchType::ARMSubArch_v8m_baseline)
D.Diag(diag::err_target_unsupported_unaligned) << "v8m.base";
- } else {
+ } else
Features.push_back("+strict-align");
- if (!ForAS)
- CmdArgs.push_back("-Wunaligned-access");
- }
} else {
// Assume pre-ARMv6 doesn't support unaligned accesses.
//
@@ -807,23 +824,14 @@ fp16_fml_fallthrough:
int VersionNum = getARMSubArchVersionNumber(Triple);
if (Triple.isOSDarwin() || Triple.isOSNetBSD()) {
if (VersionNum < 6 ||
- Triple.getSubArch() == llvm::Triple::SubArchType::ARMSubArch_v6m) {
+ Triple.getSubArch() == llvm::Triple::SubArchType::ARMSubArch_v6m)
Features.push_back("+strict-align");
- if (!ForAS)
- CmdArgs.push_back("-Wunaligned-access");
- }
} else if (Triple.isOSLinux() || Triple.isOSNaCl() ||
Triple.isOSWindows()) {
- if (VersionNum < 7) {
+ if (VersionNum < 7)
Features.push_back("+strict-align");
- if (!ForAS)
- CmdArgs.push_back("-Wunaligned-access");
- }
- } else {
+ } else
Features.push_back("+strict-align");
- if (!ForAS)
- CmdArgs.push_back("-Wunaligned-access");
- }
}
// llvm does not support reserving registers in general. There is support
@@ -871,8 +879,8 @@ fp16_fml_fallthrough:
DisableComdat = true;
continue;
}
- D.Diag(diag::err_invalid_sls_hardening)
- << Scope << A->getAsString(Args);
+ D.Diag(diag::err_drv_unsupported_option_argument)
+ << A->getOption().getName() << Scope;
break;
}
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.h
index 862a2f2796be..782bdf3d0202 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/ARM.h
@@ -66,7 +66,6 @@ void getARMArchCPUFromArgs(const llvm::opt::ArgList &Args,
bool FromAs = false);
void getARMTargetFeatures(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args,
- llvm::opt::ArgStringList &CmdArgs,
std::vector<llvm::StringRef> &Features, bool ForAS);
int getARMSubArchVersionNumber(const llvm::Triple &Triple);
bool isARMMProfile(const llvm::Triple &Triple);
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/CSKY.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/CSKY.cpp
new file mode 100644
index 000000000000..3a8f92785609
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/CSKY.cpp
@@ -0,0 +1,170 @@
+//===--- CSKY.cpp - CSKY Helpers for Tools --------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "CSKY.h"
+#include "ToolChains/CommonArgs.h"
+#include "clang/Basic/CharInfo.h"
+#include "clang/Driver/Driver.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "clang/Driver/Options.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/Option/ArgList.h"
+#include "llvm/Support/CSKYTargetParser.h"
+#include "llvm/Support/Host.h"
+#include "llvm/Support/TargetParser.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang::driver;
+using namespace clang::driver::tools;
+using namespace clang;
+using namespace llvm::opt;
+
+llvm::Optional<llvm::StringRef>
+csky::getCSKYArchName(const Driver &D, const ArgList &Args,
+ const llvm::Triple &Triple) {
+ if (const Arg *A = Args.getLastArg(options::OPT_march_EQ)) {
+ llvm::CSKY::ArchKind ArchKind = llvm::CSKY::parseArch(A->getValue());
+
+ if (ArchKind == llvm::CSKY::ArchKind::INVALID) {
+ D.Diag(clang::diag::err_drv_invalid_arch_name) << A->getAsString(Args);
+ return llvm::Optional<llvm::StringRef>();
+ }
+ return llvm::Optional<llvm::StringRef>(A->getValue());
+ }
+
+ if (const Arg *A = Args.getLastArg(clang::driver::options::OPT_mcpu_EQ)) {
+ llvm::CSKY::ArchKind ArchKind = llvm::CSKY::parseCPUArch(A->getValue());
+ if (ArchKind == llvm::CSKY::ArchKind::INVALID) {
+ D.Diag(clang::diag::err_drv_clang_unsupported) << A->getAsString(Args);
+ return llvm::Optional<llvm::StringRef>();
+ }
+ return llvm::Optional<llvm::StringRef>(llvm::CSKY::getArchName(ArchKind));
+ }
+
+ return llvm::Optional<llvm::StringRef>("ck810");
+}
+
+csky::FloatABI csky::getCSKYFloatABI(const Driver &D, const ArgList &Args) {
+ csky::FloatABI ABI = FloatABI::Soft;
+ if (Arg *A =
+ Args.getLastArg(options::OPT_msoft_float, options::OPT_mhard_float,
+ options::OPT_mfloat_abi_EQ)) {
+ if (A->getOption().matches(options::OPT_msoft_float)) {
+ ABI = FloatABI::Soft;
+ } else if (A->getOption().matches(options::OPT_mhard_float)) {
+ ABI = FloatABI::Hard;
+ } else {
+ ABI = llvm::StringSwitch<csky::FloatABI>(A->getValue())
+ .Case("soft", FloatABI::Soft)
+ .Case("softfp", FloatABI::SoftFP)
+ .Case("hard", FloatABI::Hard)
+ .Default(FloatABI::Invalid);
+ if (ABI == FloatABI::Invalid) {
+ D.Diag(diag::err_drv_invalid_mfloat_abi) << A->getAsString(Args);
+ ABI = FloatABI::Soft;
+ }
+ }
+ }
+
+ return ABI;
+}
+
+// Handle -mfpu=.
+static llvm::CSKY::CSKYFPUKind
+getCSKYFPUFeatures(const Driver &D, const Arg *A, const ArgList &Args,
+ StringRef FPU, std::vector<StringRef> &Features) {
+
+ llvm::CSKY::CSKYFPUKind FPUID =
+ llvm::StringSwitch<llvm::CSKY::CSKYFPUKind>(FPU)
+ .Case("auto", llvm::CSKY::FK_AUTO)
+ .Case("fpv2", llvm::CSKY::FK_FPV2)
+ .Case("fpv2_divd", llvm::CSKY::FK_FPV2_DIVD)
+ .Case("fpv2_sf", llvm::CSKY::FK_FPV2_SF)
+ .Case("fpv3", llvm::CSKY::FK_FPV3)
+ .Case("fpv3_hf", llvm::CSKY::FK_FPV3_HF)
+ .Case("fpv3_hsf", llvm::CSKY::FK_FPV3_HSF)
+ .Case("fpv3_sdf", llvm::CSKY::FK_FPV3_SDF)
+ .Default(llvm::CSKY::FK_INVALID);
+ if (FPUID == llvm::CSKY::FK_INVALID) {
+ D.Diag(clang::diag::err_drv_clang_unsupported) << A->getAsString(Args);
+ return llvm::CSKY::FK_INVALID;
+ }
+
+ auto RemoveTargetFPUFeature =
+ [&Features](ArrayRef<const char *> FPUFeatures) {
+ for (auto FPUFeature : FPUFeatures) {
+ auto it = std::find(Features.begin(), Features.end(), FPUFeature);
+ if (it != Features.end())
+ Features.erase(it);
+ }
+ };
+
+ RemoveTargetFPUFeature({"+fpuv2_sf", "+fpuv2_df", "+fdivdu", "+fpuv3_hi",
+ "+fpuv3_hf", "+fpuv3_sf", "+fpuv3_df"});
+
+ if (!llvm::CSKY::getFPUFeatures(FPUID, Features)) {
+ D.Diag(clang::diag::err_drv_clang_unsupported) << A->getAsString(Args);
+ return llvm::CSKY::FK_INVALID;
+ }
+
+ return FPUID;
+}
+
+void csky::getCSKYTargetFeatures(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args, ArgStringList &CmdArgs,
+ std::vector<llvm::StringRef> &Features) {
+ llvm::StringRef archName;
+ llvm::StringRef cpuName;
+ llvm::CSKY::ArchKind ArchKind = llvm::CSKY::ArchKind::INVALID;
+ if (const Arg *A = Args.getLastArg(options::OPT_march_EQ)) {
+ ArchKind = llvm::CSKY::parseArch(A->getValue());
+ if (ArchKind == llvm::CSKY::ArchKind::INVALID) {
+ D.Diag(clang::diag::err_drv_invalid_arch_name) << A->getAsString(Args);
+ return;
+ }
+ archName = A->getValue();
+ }
+
+ if (const Arg *A = Args.getLastArg(clang::driver::options::OPT_mcpu_EQ)) {
+ llvm::CSKY::ArchKind Kind = llvm::CSKY::parseCPUArch(A->getValue());
+ if (Kind == llvm::CSKY::ArchKind::INVALID) {
+ D.Diag(clang::diag::err_drv_clang_unsupported) << A->getAsString(Args);
+ return;
+ }
+ if (!archName.empty() && Kind != ArchKind) {
+ D.Diag(clang::diag::err_drv_clang_unsupported) << A->getAsString(Args);
+ return;
+ }
+ cpuName = A->getValue();
+ if (archName.empty())
+ archName = llvm::CSKY::getArchName(Kind);
+ }
+
+ if (archName.empty() && cpuName.empty()) {
+ archName = "ck810";
+ cpuName = "ck810";
+ } else if (!archName.empty() && cpuName.empty()) {
+ cpuName = archName;
+ }
+
+ csky::FloatABI FloatABI = csky::getCSKYFloatABI(D, Args);
+
+ if (FloatABI == csky::FloatABI::Hard) {
+ Features.push_back("+hard-float-abi");
+ Features.push_back("+hard-float");
+ } else if (FloatABI == csky::FloatABI::SoftFP) {
+ Features.push_back("+hard-float");
+ }
+
+ uint64_t Extension = llvm::CSKY::getDefaultExtensions(cpuName);
+ llvm::CSKY::getExtensionFeatures(Extension, Features);
+
+ if (const Arg *FPUArg = Args.getLastArg(options::OPT_mfpu_EQ))
+ getCSKYFPUFeatures(D, FPUArg, Args, FPUArg->getValue(), Features);
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/CSKY.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/CSKY.h
new file mode 100644
index 000000000000..d23da1d66e35
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/CSKY.h
@@ -0,0 +1,47 @@
+//===--- CSKY.h - CSKY-specific Tool Helpers ------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ARCH_CSKY_H
+#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ARCH_CSKY_H
+
+#include "clang/Driver/Driver.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Option/ArgList.h"
+#include "llvm/Option/Option.h"
+#include <string>
+#include <vector>
+
+namespace clang {
+namespace driver {
+namespace tools {
+namespace csky {
+
+enum class FloatABI {
+ Invalid,
+ Soft,
+ SoftFP,
+ Hard,
+};
+
+FloatABI getCSKYFloatABI(const Driver &D, const llvm::opt::ArgList &Args);
+
+void getCSKYTargetFeatures(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs,
+ std::vector<llvm::StringRef> &Features);
+
+llvm::Optional<llvm::StringRef> getCSKYArchName(const Driver &D,
+ const llvm::opt::ArgList &Args,
+ const llvm::Triple &Triple);
+
+} // end namespace csky
+} // namespace tools
+} // end namespace driver
+} // end namespace clang
+
+#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_ARCH_CSKY_H
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.cpp
index 94b179636e4f..ca625c822d05 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/RISCV.cpp
@@ -199,7 +199,7 @@ StringRef riscv::getRISCVABI(const ArgList &Args, const llvm::Triple &Triple) {
// Ignore parsing error, just go 3rd step.
consumeError(ParseResult.takeError());
else
- return llvm::RISCV::computeDefaultABIFromArch(**ParseResult);
+ return (*ParseResult)->computeDefaultABI();
// 3. Choose a default based on the triple
//
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/VE.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/VE.h
index 531433534914..c47a41df25bc 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/VE.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/VE.h
@@ -24,7 +24,7 @@ void getVETargetFeatures(const Driver &D, const llvm::opt::ArgList &Args,
std::vector<llvm::StringRef> &Features);
} // end namespace ve
-} // namespace tools
+} // end namespace tools
} // end namespace driver
} // end namespace clang
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/X86.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/X86.cpp
index bfa008f964e1..cd7c014faa5e 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/X86.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Arch/X86.cpp
@@ -84,13 +84,19 @@ std::string x86::getX86TargetCPU(const Driver &D, const ArgList &Args,
// Simulators can still run on 10.11 though, like Xcode.
if (Triple.isMacOSX() && !Triple.isOSVersionLT(10, 12))
return "penryn";
+
+ if (Triple.isDriverKit())
+ return "nehalem";
+
// The oldest x86_64 Macs have core2/Merom; the oldest x86 Macs have Yonah.
return Is64Bit ? "core2" : "yonah";
}
- // Set up default CPU name for PS4 compilers.
- if (Triple.isPS4CPU())
+ // Set up default CPU name for PS4/PS5 compilers.
+ if (Triple.isPS4())
return "btver2";
+ if (Triple.isPS5())
+ return "znver2";
// On Android use targets compatible with gcc
if (Triple.isAndroid())
@@ -240,4 +246,20 @@ void x86::getX86TargetFeatures(const Driver &D, const llvm::Triple &Triple,
Name = Name.substr(3);
Features.push_back(Args.MakeArgString((IsNegative ? "-" : "+") + Name));
}
+
+ // Enable/disable straight line speculation hardening.
+ if (Arg *A = Args.getLastArg(options::OPT_mharden_sls_EQ)) {
+ StringRef Scope = A->getValue();
+ if (Scope == "all") {
+ Features.push_back("+harden-sls-ijmp");
+ Features.push_back("+harden-sls-ret");
+ } else if (Scope == "return") {
+ Features.push_back("+harden-sls-ret");
+ } else if (Scope == "indirect-jmp") {
+ Features.push_back("+harden-sls-ijmp");
+ } else if (Scope != "none") {
+ D.Diag(diag::err_drv_unsupported_option_argument)
+ << A->getOption().getName() << Scope;
+ }
+ }
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/CSKYToolChain.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/CSKYToolChain.cpp
new file mode 100644
index 000000000000..de286faaca6d
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/CSKYToolChain.cpp
@@ -0,0 +1,204 @@
+//===--- CSKYToolchain.cpp - CSKY ToolChain Implementations ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "CSKYToolChain.h"
+#include "CommonArgs.h"
+#include "clang/Driver/Compilation.h"
+#include "clang/Driver/InputInfo.h"
+#include "clang/Driver/Options.h"
+#include "llvm/Option/ArgList.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang::driver;
+using namespace clang::driver::toolchains;
+using namespace clang::driver::tools;
+using namespace clang;
+using namespace llvm::opt;
+
+static void addMultilibsFilePaths(const Driver &D, const MultilibSet &Multilibs,
+ const Multilib &Multilib,
+ StringRef InstallPath,
+ ToolChain::path_list &Paths) {
+ if (const auto &PathsCallback = Multilibs.filePathsCallback())
+ for (const auto &Path : PathsCallback(Multilib))
+ addPathIfExists(D, InstallPath + Path, Paths);
+}
+
+/// CSKY Toolchain
+CSKYToolChain::CSKYToolChain(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args)
+ : Generic_ELF(D, Triple, Args) {
+ GCCInstallation.init(Triple, Args);
+ if (GCCInstallation.isValid()) {
+ Multilibs = GCCInstallation.getMultilibs();
+ SelectedMultilib = GCCInstallation.getMultilib();
+ path_list &Paths = getFilePaths();
+ // Add toolchain/multilib specific file paths.
+ addMultilibsFilePaths(D, Multilibs, SelectedMultilib,
+ GCCInstallation.getInstallPath(), Paths);
+ getFilePaths().push_back(GCCInstallation.getInstallPath().str() +
+ SelectedMultilib.osSuffix());
+ ToolChain::path_list &PPaths = getProgramPaths();
+ // Multilib cross-compiler GCC installations put ld in a triple-prefixed
+ // directory off of the parent of the GCC installation.
+ PPaths.push_back(Twine(GCCInstallation.getParentLibPath() + "/../" +
+ GCCInstallation.getTriple().str() + "/bin")
+ .str());
+ PPaths.push_back((GCCInstallation.getParentLibPath() + "/../bin").str());
+ } else {
+ getProgramPaths().push_back(D.Dir);
+ }
+ getFilePaths().push_back(computeSysRoot() + "/lib" +
+ SelectedMultilib.osSuffix());
+}
+
+Tool *CSKYToolChain::buildLinker() const {
+ return new tools::CSKY::Linker(*this);
+}
+
+ToolChain::RuntimeLibType CSKYToolChain::GetDefaultRuntimeLibType() const {
+ return GCCInstallation.isValid() ? ToolChain::RLT_Libgcc
+ : ToolChain::RLT_CompilerRT;
+}
+
+ToolChain::UnwindLibType
+CSKYToolChain::GetUnwindLibType(const llvm::opt::ArgList &Args) const {
+ return ToolChain::UNW_None;
+}
+
+void CSKYToolChain::addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind) const {
+ CC1Args.push_back("-nostdsysteminc");
+}
+
+void CSKYToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
+ ArgStringList &CC1Args) const {
+ if (DriverArgs.hasArg(options::OPT_nostdinc))
+ return;
+
+ if (!DriverArgs.hasArg(options::OPT_nostdlibinc)) {
+ SmallString<128> Dir(computeSysRoot());
+ llvm::sys::path::append(Dir, "include");
+ addSystemInclude(DriverArgs, CC1Args, Dir.str());
+ SmallString<128> Dir2(computeSysRoot());
+ llvm::sys::path::append(Dir2, "sys-include");
+ addSystemInclude(DriverArgs, CC1Args, Dir2.str());
+ }
+}
+
+void CSKYToolChain::addLibStdCxxIncludePaths(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+ const GCCVersion &Version = GCCInstallation.getVersion();
+ StringRef TripleStr = GCCInstallation.getTriple().str();
+ const Multilib &Multilib = GCCInstallation.getMultilib();
+ addLibStdCXXIncludePaths(computeSysRoot() + "/include/c++/" + Version.Text,
+ TripleStr, Multilib.includeSuffix(), DriverArgs,
+ CC1Args);
+}
+
+std::string CSKYToolChain::computeSysRoot() const {
+ if (!getDriver().SysRoot.empty())
+ return getDriver().SysRoot;
+
+ SmallString<128> SysRootDir;
+ if (GCCInstallation.isValid()) {
+ StringRef LibDir = GCCInstallation.getParentLibPath();
+ StringRef TripleStr = GCCInstallation.getTriple().str();
+ llvm::sys::path::append(SysRootDir, LibDir, "..", TripleStr);
+ } else {
+ // Use the triple as provided to the driver. Unlike the parsed triple
+ // this has not been normalized to always contain every field.
+ llvm::sys::path::append(SysRootDir, getDriver().Dir, "..",
+ getDriver().getTargetTriple());
+ }
+
+ if (!llvm::sys::fs::exists(SysRootDir))
+ return std::string();
+
+ return std::string(SysRootDir.str());
+}
+
+void CSKY::Linker::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ const ToolChain &ToolChain = getToolChain();
+ const Driver &D = ToolChain.getDriver();
+ ArgStringList CmdArgs;
+
+ if (!D.SysRoot.empty())
+ CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
+
+ CmdArgs.push_back("-m");
+ CmdArgs.push_back("cskyelf");
+
+ std::string Linker = getToolChain().GetLinkerPath();
+
+ bool WantCRTs =
+ !Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles);
+
+ const char *crtbegin, *crtend;
+ auto RuntimeLib = ToolChain.GetRuntimeLibType(Args);
+ if (RuntimeLib == ToolChain::RLT_Libgcc) {
+ crtbegin = "crtbegin.o";
+ crtend = "crtend.o";
+ } else {
+ assert(RuntimeLib == ToolChain::RLT_CompilerRT);
+ crtbegin = ToolChain.getCompilerRTArgString(Args, "crtbegin",
+ ToolChain::FT_Object);
+ crtend =
+ ToolChain.getCompilerRTArgString(Args, "crtend", ToolChain::FT_Object);
+ }
+
+ if (WantCRTs) {
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crt0.o")));
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crti.o")));
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtbegin)));
+ }
+
+ Args.AddAllArgs(CmdArgs, options::OPT_L);
+ ToolChain.AddFilePathLibArgs(Args, CmdArgs);
+ Args.AddAllArgs(CmdArgs,
+ {options::OPT_T_Group, options::OPT_e, options::OPT_s,
+ options::OPT_t, options::OPT_Z_Flag, options::OPT_r});
+
+ AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
+
+ // TODO: add C++ includes and libs if compiling C++.
+
+ if (!Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_nodefaultlibs)) {
+ if (ToolChain.ShouldLinkCXXStdlib(Args))
+ ToolChain.AddCXXStdlibLibArgs(Args, CmdArgs);
+ CmdArgs.push_back("--start-group");
+ CmdArgs.push_back("-lc");
+ if (Args.hasArg(options::OPT_msim))
+ CmdArgs.push_back("-lsemi");
+ else
+ CmdArgs.push_back("-lnosys");
+ CmdArgs.push_back("--end-group");
+ AddRunTimeLibs(ToolChain, ToolChain.getDriver(), CmdArgs, Args);
+ }
+
+ if (WantCRTs) {
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath(crtend)));
+ CmdArgs.push_back(Args.MakeArgString(ToolChain.GetFilePath("crtn.o")));
+ }
+
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::AtFileCurCP(), Args.MakeArgString(Linker),
+ CmdArgs, Inputs, Output));
+}
+// CSKY tools end.
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/CSKYToolChain.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/CSKYToolChain.h
new file mode 100644
index 000000000000..52b37539cc5f
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/CSKYToolChain.h
@@ -0,0 +1,63 @@
+//===--- CSKYToolchain.h - CSKY ToolChain Implementations -----*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_CSKYTOOLCHAIN_H
+#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_CSKYTOOLCHAIN_H
+
+#include "Gnu.h"
+#include "clang/Driver/ToolChain.h"
+
+namespace clang {
+namespace driver {
+namespace toolchains {
+
+class LLVM_LIBRARY_VISIBILITY CSKYToolChain : public Generic_ELF {
+public:
+ CSKYToolChain(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+
+ void addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind) const override;
+ RuntimeLibType GetDefaultRuntimeLibType() const override;
+ UnwindLibType GetUnwindLibType(const llvm::opt::ArgList &Args) const override;
+ void
+ AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+ void
+ addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
+
+protected:
+ Tool *buildLinker() const override;
+
+private:
+ std::string computeSysRoot() const override;
+};
+
+} // end namespace toolchains
+
+namespace tools {
+namespace CSKY {
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
+public:
+ Linker(const ToolChain &TC) : Tool("CSKY::Linker", "ld", TC) {}
+ bool hasIntegratedCPP() const override { return false; }
+ bool isLinkJob() const override { return true; }
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+} // end namespace CSKY
+} // end namespace tools
+
+} // end namespace driver
+} // end namespace clang
+
+#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_CSKYTOOLCHAIN_H
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp
index f2f18e901ab0..c9bbdb2ac72e 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.cpp
@@ -10,6 +10,7 @@
#include "AMDGPU.h"
#include "Arch/AArch64.h"
#include "Arch/ARM.h"
+#include "Arch/CSKY.h"
#include "Arch/M68k.h"
#include "Arch/Mips.h"
#include "Arch/PPC.h"
@@ -28,6 +29,8 @@
#include "clang/Basic/LangOptions.h"
#include "clang/Basic/ObjCRuntime.h"
#include "clang/Basic/Version.h"
+#include "clang/Config/config.h"
+#include "clang/Driver/Action.h"
#include "clang/Driver/Distro.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/InputInfo.h"
@@ -35,6 +38,7 @@
#include "clang/Driver/SanitizerArgs.h"
#include "clang/Driver/Types.h"
#include "clang/Driver/XRayArgs.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/Option/ArgList.h"
@@ -223,12 +227,16 @@ static void ParseMRecip(const Driver &D, const ArgList &Args,
llvm::StringMap<bool> OptionStrings;
OptionStrings.insert(std::make_pair("divd", false));
OptionStrings.insert(std::make_pair("divf", false));
+ OptionStrings.insert(std::make_pair("divh", false));
OptionStrings.insert(std::make_pair("vec-divd", false));
OptionStrings.insert(std::make_pair("vec-divf", false));
+ OptionStrings.insert(std::make_pair("vec-divh", false));
OptionStrings.insert(std::make_pair("sqrtd", false));
OptionStrings.insert(std::make_pair("sqrtf", false));
+ OptionStrings.insert(std::make_pair("sqrth", false));
OptionStrings.insert(std::make_pair("vec-sqrtd", false));
OptionStrings.insert(std::make_pair("vec-sqrtf", false));
+ OptionStrings.insert(std::make_pair("vec-sqrth", false));
for (unsigned i = 0; i != NumOptions; ++i) {
StringRef Val = A->getValue(i);
@@ -252,10 +260,11 @@ static void ParseMRecip(const Driver &D, const ArgList &Args,
D.Diag(diag::err_drv_unknown_argument) << Val;
return;
}
- // The option was specified without a float or double suffix.
- // Make sure that the double entry was not already specified.
+ // The option was specified without a half or float or double suffix.
+ // Make sure that the double or half entry was not already specified.
// The float entry will be checked below.
- if (OptionStrings[ValBase.str() + 'd']) {
+ if (OptionStrings[ValBase.str() + 'd'] ||
+ OptionStrings[ValBase.str() + 'h']) {
D.Diag(diag::err_drv_invalid_value) << A->getOption().getName() << Val;
return;
}
@@ -270,9 +279,12 @@ static void ParseMRecip(const Driver &D, const ArgList &Args,
// Mark the matched option as found. Do not allow duplicate specifiers.
OptionIter->second = true;
- // If the precision was not specified, also mark the double entry as found.
- if (ValBase.back() != 'f' && ValBase.back() != 'd')
+ // If the precision was not specified, also mark the double and half entry
+ // as found.
+ if (ValBase.back() != 'f' && ValBase.back() != 'd' && ValBase.back() != 'h') {
OptionStrings[ValBase.str() + 'd'] = true;
+ OptionStrings[ValBase.str() + 'h'] = true;
+ }
// Build the output string.
StringRef Prefix = IsDisabled ? DisabledPrefixOut : EnabledPrefixOut;
@@ -328,7 +340,7 @@ static void getTargetFeatures(const Driver &D, const llvm::Triple &Triple,
case llvm::Triple::armeb:
case llvm::Triple::thumb:
case llvm::Triple::thumbeb:
- arm::getARMTargetFeatures(D, Triple, Args, CmdArgs, Features, ForAS);
+ arm::getARMTargetFeatures(D, Triple, Args, Features, ForAS);
break;
case llvm::Triple::ppc:
@@ -347,8 +359,7 @@ static void getTargetFeatures(const Driver &D, const llvm::Triple &Triple,
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_32:
case llvm::Triple::aarch64_be:
- aarch64::getAArch64TargetFeatures(D, Triple, Args, CmdArgs, Features,
- ForAS);
+ aarch64::getAArch64TargetFeatures(D, Triple, Args, Features, ForAS);
break;
case llvm::Triple::x86:
case llvm::Triple::x86_64:
@@ -370,6 +381,10 @@ static void getTargetFeatures(const Driver &D, const llvm::Triple &Triple,
case llvm::Triple::amdgcn:
amdgpu::getAMDGPUTargetFeatures(D, Triple, Args, Features);
break;
+ case llvm::Triple::nvptx:
+ case llvm::Triple::nvptx64:
+ NVPTX::getNVPTXTargetFeatures(D, Triple, Args, Features);
+ break;
case llvm::Triple::m68k:
m68k::getM68kTargetFeatures(D, Triple, Args, Features);
break;
@@ -379,6 +394,9 @@ static void getTargetFeatures(const Driver &D, const llvm::Triple &Triple,
case llvm::Triple::ve:
ve::getVETargetFeatures(D, Args, Features);
break;
+ case llvm::Triple::csky:
+ csky::getCSKYTargetFeatures(D, Triple, Args, CmdArgs, Features);
+ break;
}
for (auto Feature : unifyTargetFeatures(Features)) {
@@ -450,9 +468,9 @@ static bool addExceptionArgs(const ArgList &Args, types::ID InputType,
}
if (types::isCXX(InputType)) {
- // Disable C++ EH by default on XCore and PS4.
- bool CXXExceptionsEnabled =
- Triple.getArch() != llvm::Triple::xcore && !Triple.isPS4CPU();
+ // Disable C++ EH by default on XCore and PS4/PS5.
+ bool CXXExceptionsEnabled = Triple.getArch() != llvm::Triple::xcore &&
+ !Triple.isPS() && !Triple.isDriverKit();
Arg *ExceptionArg = Args.getLastArg(
options::OPT_fcxx_exceptions, options::OPT_fno_cxx_exceptions,
options::OPT_fexceptions, options::OPT_fno_exceptions);
@@ -543,6 +561,7 @@ static bool useFramePointerForTargetByDefault(const ArgList &Args,
case llvm::Triple::riscv64:
case llvm::Triple::amdgcn:
case llvm::Triple::r600:
+ case llvm::Triple::csky:
return !areOptimizationsEnabled(Args);
default:
break;
@@ -613,10 +632,10 @@ getFramePointerKind(const ArgList &Args, const llvm::Triple &Triple) {
bool OmitFP = A && A->getOption().matches(options::OPT_fomit_frame_pointer);
bool NoOmitFP =
A && A->getOption().matches(options::OPT_fno_omit_frame_pointer);
- bool OmitLeafFP = Args.hasFlag(options::OPT_momit_leaf_frame_pointer,
- options::OPT_mno_omit_leaf_frame_pointer,
- Triple.isAArch64() || Triple.isPS4CPU() ||
- Triple.isVE());
+ bool OmitLeafFP =
+ Args.hasFlag(options::OPT_momit_leaf_frame_pointer,
+ options::OPT_mno_omit_leaf_frame_pointer,
+ Triple.isAArch64() || Triple.isPS() || Triple.isVE());
if (NoOmitFP || mustUseNonLeafFramePointerForTarget(Triple) ||
(!OmitFP && useFramePointerForTargetByDefault(Args, Triple))) {
if (OmitLeafFP)
@@ -669,17 +688,24 @@ static void addDebugObjectName(const ArgList &Args, ArgStringList &CmdArgs,
}
/// Add a CC1 and CC1AS option to specify the debug file path prefix map.
-static void addDebugPrefixMapArg(const Driver &D, const ArgList &Args, ArgStringList &CmdArgs) {
- for (const Arg *A : Args.filtered(options::OPT_ffile_prefix_map_EQ,
- options::OPT_fdebug_prefix_map_EQ)) {
- StringRef Map = A->getValue();
+static void addDebugPrefixMapArg(const Driver &D, const ToolChain &TC,
+ const ArgList &Args, ArgStringList &CmdArgs) {
+ auto AddOneArg = [&](StringRef Map, StringRef Name) {
if (!Map.contains('='))
- D.Diag(diag::err_drv_invalid_argument_to_option)
- << Map << A->getOption().getName();
+ D.Diag(diag::err_drv_invalid_argument_to_option) << Map << Name;
else
CmdArgs.push_back(Args.MakeArgString("-fdebug-prefix-map=" + Map));
+ };
+
+ for (const Arg *A : Args.filtered(options::OPT_ffile_prefix_map_EQ,
+ options::OPT_fdebug_prefix_map_EQ)) {
+ AddOneArg(A->getValue(), A->getOption().getName());
A->claim();
}
+ std::string GlobalRemapEntry = TC.GetGlobalDebugPathRemapping();
+ if (GlobalRemapEntry.empty())
+ return;
+ AddOneArg(GlobalRemapEntry, "environment");
}
/// Add a CC1 and CC1AS option to specify the macro file path prefix map.
@@ -1150,7 +1176,8 @@ static const char *RelocationModelName(llvm::Reloc::Model Model) {
}
static void handleAMDGPUCodeObjectVersionOptions(const Driver &D,
const ArgList &Args,
- ArgStringList &CmdArgs) {
+ ArgStringList &CmdArgs,
+ bool IsCC1As = false) {
// If no version was requested by the user, use the default value from the
// back end. This is consistent with the value returned from
// getAMDGPUCodeObjectVersion. This lets clang emit IR for amdgpu without
@@ -1162,6 +1189,11 @@ static void handleAMDGPUCodeObjectVersionOptions(const Driver &D,
Args.MakeArgString(Twine("--amdhsa-code-object-version=") +
Twine(CodeObjVer)));
CmdArgs.insert(CmdArgs.begin() + 1, "-mllvm");
+ // -cc1as does not accept -mcode-object-version option.
+ if (!IsCC1As)
+ CmdArgs.insert(CmdArgs.begin() + 1,
+ Args.MakeArgString(Twine("-mcode-object-version=") +
+ Twine(CodeObjVer)));
}
}
@@ -1277,6 +1309,7 @@ void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
// If we are offloading to a target via OpenMP we need to include the
// openmp_wrappers folder which contains alternative system headers.
if (JA.isDeviceOffloading(Action::OFK_OpenMP) &&
+ !Args.hasArg(options::OPT_nostdinc) &&
(getToolChain().getTriple().isNVPTX() ||
getToolChain().getTriple().isAMDGCN())) {
if (!Args.hasArg(options::OPT_nobuiltininc)) {
@@ -1333,7 +1366,8 @@ void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
bool RenderedImplicitInclude = false;
for (const Arg *A : Args.filtered(options::OPT_clang_i_Group)) {
- if (A->getOption().matches(options::OPT_include)) {
+ if (A->getOption().matches(options::OPT_include) &&
+ D.getProbePrecompiled()) {
// Handling of gcc-style gch precompiled headers.
bool IsFirstImplicitInclude = !RenderedImplicitInclude;
RenderedImplicitInclude = true;
@@ -1344,12 +1378,12 @@ void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
// so that replace_extension does the right thing.
P += ".dummy";
llvm::sys::path::replace_extension(P, "pch");
- if (llvm::sys::fs::exists(P))
+ if (D.getVFS().exists(P))
FoundPCH = true;
if (!FoundPCH) {
llvm::sys::path::replace_extension(P, "gch");
- if (llvm::sys::fs::exists(P)) {
+ if (D.getVFS().exists(P)) {
FoundPCH = true;
}
}
@@ -1453,6 +1487,9 @@ void Clang::AddPreprocessingOptions(Compilation &C, const JobAction &JA,
addMacroPrefixMapArg(D, Args, CmdArgs);
addCoveragePrefixMapArg(D, Args, CmdArgs);
+
+ Args.AddLastArg(CmdArgs, options::OPT_ffile_reproducible,
+ options::OPT_fno_file_reproducible);
}
// FIXME: Move to target hook.
@@ -1625,6 +1662,16 @@ void RenderARMABI(const Driver &D, const llvm::Triple &Triple,
CmdArgs.push_back("-target-abi");
CmdArgs.push_back(ABIName);
}
+
+void AddUnalignedAccessWarning(ArgStringList &CmdArgs) {
+ auto StrictAlignIter =
+ std::find_if(CmdArgs.rbegin(), CmdArgs.rend(), [](StringRef Arg) {
+ return Arg == "+strict-align" || Arg == "-strict-align";
+ });
+ if (StrictAlignIter != CmdArgs.rend() &&
+ StringRef(*StrictAlignIter) == "+strict-align")
+ CmdArgs.push_back("-Wunaligned-access");
+}
}
static void CollectARMPACBTIOptions(const ToolChain &TC, const ArgList &Args,
@@ -1647,18 +1694,17 @@ static void CollectARMPACBTIOptions(const ToolChain &TC, const ArgList &Args,
if (A->getOption().matches(options::OPT_msign_return_address_EQ)) {
Scope = A->getValue();
- if (!Scope.equals("none") && !Scope.equals("non-leaf") &&
- !Scope.equals("all"))
- D.Diag(diag::err_invalid_branch_protection)
- << Scope << A->getAsString(Args);
+ if (Scope != "none" && Scope != "non-leaf" && Scope != "all")
+ D.Diag(diag::err_drv_unsupported_option_argument)
+ << A->getOption().getName() << Scope;
Key = "a_key";
IndirectBranches = false;
} else {
StringRef DiagMsg;
llvm::ARM::ParsedBranchProtection PBP;
if (!llvm::ARM::parseBranchProtection(A->getValue(), PBP, DiagMsg))
- D.Diag(diag::err_invalid_branch_protection)
- << DiagMsg << A->getAsString(Args);
+ D.Diag(diag::err_drv_unsupported_option_argument)
+ << A->getOption().getName() << DiagMsg;
if (!isAArch64 && PBP.Key == "b_key")
D.Diag(diag::warn_unsupported_branch_protection)
<< "b-key" << A->getAsString(Args);
@@ -1720,6 +1766,8 @@ void Clang::AddARMTargetArgs(const llvm::Triple &Triple, const ArgList &Args,
// Enable/disable return address signing and indirect branch targets.
CollectARMPACBTIOptions(getToolChain(), Args, CmdArgs, false /*isAArch64*/);
+
+ AddUnalignedAccessWarning(CmdArgs);
}
void Clang::RenderTargetOptions(const llvm::Triple &EffectiveTriple,
@@ -1893,6 +1941,8 @@ void Clang::AddAArch64TargetArgs(const ArgList &Args,
CmdArgs.push_back(Args.MakeArgString(TuneCPU));
}
}
+
+ AddUnalignedAccessWarning(CmdArgs);
}
void Clang::AddMIPSTargetArgs(const ArgList &Args,
@@ -2259,10 +2309,10 @@ void Clang::AddX86TargetArgs(const ArgList &Args,
// Handle -mtune.
- // Default to "generic" unless -march is present or targetting the PS4.
+ // Default to "generic" unless -march is present or targetting the PS4/PS5.
std::string TuneCPU;
if (!Args.hasArg(clang::driver::options::OPT_march_EQ) &&
- !getToolChain().getTriple().isPS4CPU())
+ !getToolChain().getTriple().isPS())
TuneCPU = "generic";
// Override based on -mtune.
@@ -2356,7 +2406,8 @@ void Clang::DumpCompilationDatabase(Compilation &C, StringRef Filename,
if (!CompilationDatabase) {
std::error_code EC;
auto File = std::make_unique<llvm::raw_fd_ostream>(
- Filename, EC, llvm::sys::fs::OF_TextWithCRLF);
+ Filename, EC,
+ llvm::sys::fs::OF_TextWithCRLF | llvm::sys::fs::OF_Append);
if (EC) {
D.Diag(clang::diag::err_drv_compilationdatabase) << Filename
<< EC.message();
@@ -2382,6 +2433,7 @@ void Clang::DumpCompilationDatabase(Compilation &C, StringRef Filename,
CDB << ", \"" << escape(Buf) << "\"";
}
CDB << ", \"" << escape(Input.getFilename()) << "\"";
+ CDB << ", \"-o\", \"" << escape(Output.getFilename()) << "\"";
for (auto &A: Args) {
auto &O = A->getOption();
// Skip language selection, which is positional.
@@ -2395,6 +2447,9 @@ void Clang::DumpCompilationDatabase(Compilation &C, StringRef Filename,
// Skip inputs.
if (O.getKind() == Option::InputClass)
continue;
+ // Skip output.
+ if (O.getID() == options::OPT_o)
+ continue;
// All other arguments are quoted and appended.
ArgStringList ASL;
A->render(Args, ASL);
@@ -2468,6 +2523,8 @@ static void CollectArgsForIntegratedAssembler(Compilation &C,
DefaultIncrementalLinkerCompatible))
CmdArgs.push_back("-mincremental-linker-compatible");
+ Args.AddLastArg(CmdArgs, options::OPT_femit_dwarf_unwind_EQ);
+
// If you add more args here, also add them to the block below that
// starts with "// If CollectArgsForIntegratedAssembler() isn't called below".
@@ -2700,6 +2757,8 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
StringRef FPModel = "";
// -ffp-exception-behavior options: strict, maytrap, ignore
StringRef FPExceptionBehavior = "";
+ // -ffp-eval-method options: double, extended, source
+ StringRef FPEvalMethod = "";
const llvm::DenormalMode DefaultDenormalFPMath =
TC.getDefaultDenormalModeForType(Args, JA);
const llvm::DenormalMode DefaultDenormalFP32Math =
@@ -2837,6 +2896,7 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
case options::OPT_fdenormal_fp_math_EQ:
DenormalFPMath = llvm::parseDenormalFPAttribute(A->getValue());
+ DenormalFP32Math = DenormalFPMath;
if (!DenormalFPMath.isValid()) {
D.Diag(diag::err_drv_invalid_value)
<< A->getAsString(Args) << A->getValue();
@@ -2895,6 +2955,18 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
break;
}
+ // Validate and pass through -ffp-eval-method option.
+ case options::OPT_ffp_eval_method_EQ: {
+ StringRef Val = A->getValue();
+ if (Val.equals("double") || Val.equals("extended") ||
+ Val.equals("source"))
+ FPEvalMethod = Val;
+ else
+ D.Diag(diag::err_drv_unsupported_option_argument)
+ << A->getOption().getName() << Val;
+ break;
+ }
+
case options::OPT_ffinite_math_only:
HonorINFs = false;
HonorNaNs = false;
@@ -3050,6 +3122,9 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
CmdArgs.push_back(Args.MakeArgString("-ffp-exception-behavior=" +
FPExceptionBehavior));
+ if (!FPEvalMethod.empty())
+ CmdArgs.push_back(Args.MakeArgString("-ffp-eval-method=" + FPEvalMethod));
+
ParseMRecip(D, Args, CmdArgs);
// -ffast-math enables the __FAST_MATH__ preprocessor macro, but check for the
@@ -3090,12 +3165,6 @@ static void RenderFloatingPointOptions(const ToolChain &TC, const Driver &D,
static void RenderAnalyzerOptions(const ArgList &Args, ArgStringList &CmdArgs,
const llvm::Triple &Triple,
const InputInfo &Input) {
- // Enable region store model by default.
- CmdArgs.push_back("-analyzer-store=region");
-
- // Treat blocks as analysis entry points.
- CmdArgs.push_back("-analyzer-opt-analyze-nested-blocks");
-
// Add default argument set.
if (!Args.hasArg(options::OPT__analyzer_no_default_checks)) {
CmdArgs.push_back("-analyzer-checker=core");
@@ -3113,8 +3182,8 @@ static void RenderAnalyzerOptions(const ArgList &Args, ArgStringList &CmdArgs,
CmdArgs.push_back("-analyzer-checker=unix.cstring.NullArg");
}
- // Disable some unix checkers for PS4.
- if (Triple.isPS4CPU()) {
+ // Disable some unix checkers for PS4/PS5.
+ if (Triple.isPS()) {
CmdArgs.push_back("-analyzer-disable-checker=unix.API");
CmdArgs.push_back("-analyzer-disable-checker=unix.Vfork");
}
@@ -3132,7 +3201,7 @@ static void RenderAnalyzerOptions(const ArgList &Args, ArgStringList &CmdArgs,
if (types::isCXX(Input.getType()))
CmdArgs.push_back("-analyzer-checker=cplusplus");
- if (!Triple.isPS4CPU()) {
+ if (!Triple.isPS()) {
CmdArgs.push_back("-analyzer-checker=security.insecureAPI.UncheckedReturn");
CmdArgs.push_back("-analyzer-checker=security.insecureAPI.getpw");
CmdArgs.push_back("-analyzer-checker=security.insecureAPI.gets");
@@ -3306,9 +3375,8 @@ static void RenderSCPOptions(const ToolChain &TC, const ArgList &Args,
!EffectiveTriple.isPPC64())
return;
- if (Args.hasFlag(options::OPT_fstack_clash_protection,
- options::OPT_fno_stack_clash_protection, false))
- CmdArgs.push_back("-fstack-clash-protection");
+ Args.addOptInFlag(CmdArgs, options::OPT_fstack_clash_protection,
+ options::OPT_fno_stack_clash_protection);
}
static void RenderTrivialAutoVarInitOptions(const Driver &D,
@@ -3391,6 +3459,9 @@ static void RenderOpenCLOptions(const ArgList &Args, ArgStringList &CmdArgs,
if (Arg *A = Args.getLastArg(options::OPT_cl_std_EQ)) {
std::string CLStdStr = std::string("-cl-std=") + A->getValue();
CmdArgs.push_back(Args.MakeArgString(CLStdStr));
+ } else if (Arg *A = Args.getLastArg(options::OPT_cl_ext_EQ)) {
+ std::string CLExtStr = std::string("-cl-ext=") + A->getValue();
+ CmdArgs.push_back(Args.MakeArgString(CLExtStr));
}
for (const auto &Arg : ForwardedArguments)
@@ -3406,6 +3477,24 @@ static void RenderOpenCLOptions(const ArgList &Args, ArgStringList &CmdArgs,
}
}
+static void RenderHLSLOptions(const ArgList &Args, ArgStringList &CmdArgs,
+ types::ID InputType) {
+ const unsigned ForwardedArguments[] = {options::OPT_dxil_validator_version,
+ options::OPT_D,
+ options::OPT_S,
+ options::OPT_emit_llvm,
+ options::OPT_disable_llvm_passes,
+ options::OPT_fnative_half_type};
+
+ for (const auto &Arg : ForwardedArguments)
+ if (const auto *A = Args.getLastArg(Arg))
+ A->renderAsInput(Args, CmdArgs);
+ // Add the default headers if dxc_no_stdinc is not set.
+ if (!Args.hasArg(options::OPT_dxc_no_stdinc))
+ CmdArgs.push_back("-finclude-default-header");
+ CmdArgs.push_back("-fallow-half-arguments-and-returns");
+}
+
static void RenderARCMigrateToolOptions(const Driver &D, const ArgList &Args,
ArgStringList &CmdArgs) {
bool ARCMTEnabled = false;
@@ -3492,20 +3581,13 @@ static void RenderBuiltinOptions(const ToolChain &TC, const llvm::Triple &T,
UseBuiltins = false;
// Process the -fno-builtin-* options.
- for (const auto &Arg : Args) {
- const Option &O = Arg->getOption();
- if (!O.matches(options::OPT_fno_builtin_))
- continue;
-
- Arg->claim();
+ for (const Arg *A : Args.filtered(options::OPT_fno_builtin_)) {
+ A->claim();
// If -fno-builtin is specified, then there's no need to pass the option to
// the frontend.
- if (!UseBuiltins)
- continue;
-
- StringRef FuncName = Arg->getValue();
- CmdArgs.push_back(Args.MakeArgString("-fno-builtin-" + FuncName));
+ if (UseBuiltins)
+ A->render(Args, CmdArgs);
}
// le32-specific flags:
@@ -3516,6 +3598,11 @@ static void RenderBuiltinOptions(const ToolChain &TC, const llvm::Triple &T,
}
bool Driver::getDefaultModuleCachePath(SmallVectorImpl<char> &Result) {
+ if (const char *Str = std::getenv("CLANG_MODULE_CACHE_PATH")) {
+ Twine Path{Str};
+ Path.toVector(Result);
+ return Path.getSingleStringRef() != "";
+ }
if (llvm::sys::path::cache_directory(Result)) {
llvm::sys::path::append(Result, "clang");
llvm::sys::path::append(Result, "ModuleCache");
@@ -3554,9 +3641,8 @@ static void RenderModulesOptions(Compilation &C, const Driver &D,
CmdArgs.push_back("-fimplicit-module-maps");
// -fmodules-decluse checks that modules used are declared so (off by default)
- if (Args.hasFlag(options::OPT_fmodules_decluse,
- options::OPT_fno_modules_decluse, false))
- CmdArgs.push_back("-fmodules-decluse");
+ Args.addOptInFlag(CmdArgs, options::OPT_fmodules_decluse,
+ options::OPT_fno_modules_decluse);
// -fmodules-strict-decluse is like -fmodule-decluse, but also checks that
// all #included headers are part of modules.
@@ -3855,15 +3941,10 @@ static void RenderDiagnosticsOptions(const Driver &D, const ArgList &Args,
options::OPT_fno_caret_diagnostics, CaretDefault))
CmdArgs.push_back("-fno-caret-diagnostics");
- // -fdiagnostics-fixit-info is default, only pass non-default.
- if (!Args.hasFlag(options::OPT_fdiagnostics_fixit_info,
- options::OPT_fno_diagnostics_fixit_info))
- CmdArgs.push_back("-fno-diagnostics-fixit-info");
-
- // Enable -fdiagnostics-show-option by default.
- if (!Args.hasFlag(options::OPT_fdiagnostics_show_option,
- options::OPT_fno_diagnostics_show_option, true))
- CmdArgs.push_back("-fno-diagnostics-show-option");
+ Args.addOptOutFlag(CmdArgs, options::OPT_fdiagnostics_fixit_info,
+ options::OPT_fno_diagnostics_fixit_info);
+ Args.addOptOutFlag(CmdArgs, options::OPT_fdiagnostics_show_option,
+ options::OPT_fno_diagnostics_show_option);
if (const Arg *A =
Args.getLastArg(options::OPT_fdiagnostics_show_category_EQ)) {
@@ -3871,9 +3952,8 @@ static void RenderDiagnosticsOptions(const Driver &D, const ArgList &Args,
CmdArgs.push_back(A->getValue());
}
- if (Args.hasFlag(options::OPT_fdiagnostics_show_hotness,
- options::OPT_fno_diagnostics_show_hotness, false))
- CmdArgs.push_back("-fdiagnostics-show-hotness");
+ Args.addOptInFlag(CmdArgs, options::OPT_fdiagnostics_show_hotness,
+ options::OPT_fno_diagnostics_show_hotness);
if (const Arg *A =
Args.getLastArg(options::OPT_fdiagnostics_hotness_threshold_EQ)) {
@@ -3901,22 +3981,13 @@ static void RenderDiagnosticsOptions(const Driver &D, const ArgList &Args,
// re-parsed to construct this job; claim any possible color diagnostic here
// to avoid warn_drv_unused_argument and diagnose bad
// OPT_fdiagnostics_color_EQ values.
- for (const Arg *A : Args) {
- const Option &O = A->getOption();
- if (!O.matches(options::OPT_fcolor_diagnostics) &&
- !O.matches(options::OPT_fdiagnostics_color) &&
- !O.matches(options::OPT_fno_color_diagnostics) &&
- !O.matches(options::OPT_fno_diagnostics_color) &&
- !O.matches(options::OPT_fdiagnostics_color_EQ))
- continue;
-
- if (O.matches(options::OPT_fdiagnostics_color_EQ)) {
- StringRef Value(A->getValue());
- if (Value != "always" && Value != "never" && Value != "auto")
- D.Diag(diag::err_drv_clang_unsupported)
- << ("-fdiagnostics-color=" + Value).str();
- }
- A->claim();
+ Args.getLastArg(options::OPT_fcolor_diagnostics,
+ options::OPT_fno_color_diagnostics);
+ if (const Arg *A = Args.getLastArg(options::OPT_fdiagnostics_color_EQ)) {
+ StringRef Value(A->getValue());
+ if (Value != "always" && Value != "never" && Value != "auto")
+ D.Diag(diag::err_drv_invalid_argument_to_option)
+ << Value << A->getOption().getName();
}
if (D.getDiags().getDiagnosticOptions().ShowColors)
@@ -3925,9 +3996,8 @@ static void RenderDiagnosticsOptions(const Driver &D, const ArgList &Args,
if (Args.hasArg(options::OPT_fansi_escape_codes))
CmdArgs.push_back("-fansi-escape-codes");
- if (!Args.hasFlag(options::OPT_fshow_source_location,
- options::OPT_fno_show_source_location))
- CmdArgs.push_back("-fno-show-source-location");
+ Args.addOptOutFlag(CmdArgs, options::OPT_fshow_source_location,
+ options::OPT_fno_show_source_location);
if (Args.hasArg(options::OPT_fdiagnostics_absolute_paths))
CmdArgs.push_back("-fdiagnostics-absolute-paths");
@@ -3936,9 +4006,8 @@ static void RenderDiagnosticsOptions(const Driver &D, const ArgList &Args,
ColumnDefault))
CmdArgs.push_back("-fno-show-column");
- if (!Args.hasFlag(options::OPT_fspell_checking,
- options::OPT_fno_spell_checking))
- CmdArgs.push_back("-fno-spell-checking");
+ Args.addOptOutFlag(CmdArgs, options::OPT_fspell_checking,
+ options::OPT_fno_spell_checking);
}
enum class DwarfFissionKind { None, Split, Single };
@@ -4247,8 +4316,8 @@ static void renderDebugOptions(const ToolChain &TC, const Driver &D,
? "-gpubnames"
: "-ggnu-pubnames");
const auto *SimpleTemplateNamesArg =
- Args.getLastArg(options::OPT_gsimple_template_names, options::OPT_gno_simple_template_names,
- options::OPT_gsimple_template_names_EQ);
+ Args.getLastArg(options::OPT_gsimple_template_names,
+ options::OPT_gno_simple_template_names);
bool ForwardTemplateParams = DebuggerTuning == llvm::DebuggerKind::SCE;
if (SimpleTemplateNamesArg &&
checkDebugInfoOption(SimpleTemplateNamesArg, Args, D, TC)) {
@@ -4256,17 +4325,6 @@ static void renderDebugOptions(const ToolChain &TC, const Driver &D,
if (Opt.matches(options::OPT_gsimple_template_names)) {
ForwardTemplateParams = true;
CmdArgs.push_back("-gsimple-template-names=simple");
- } else if (Opt.matches(options::OPT_gsimple_template_names_EQ)) {
- ForwardTemplateParams = true;
- StringRef Value = SimpleTemplateNamesArg->getValue();
- if (Value == "simple") {
- CmdArgs.push_back("-gsimple-template-names=simple");
- } else if (Value == "mangled") {
- CmdArgs.push_back("-gsimple-template-names=mangled");
- } else {
- D.Diag(diag::err_drv_unsupported_option_argument)
- << Opt.getName() << SimpleTemplateNamesArg->getValue();
- }
}
}
@@ -4344,17 +4402,26 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// CUDA/HIP compilation may have multiple inputs (source file + results of
// device-side compilations). OpenMP device jobs also take the host IR as a
// second input. Module precompilation accepts a list of header files to
- // include as part of the module. All other jobs are expected to have exactly
- // one input.
+ // include as part of the module. API extraction accepts a list of header
+ // files whose API information is emitted in the output. All other jobs are
+ // expected to have exactly one input.
bool IsCuda = JA.isOffloading(Action::OFK_Cuda);
bool IsCudaDevice = JA.isDeviceOffloading(Action::OFK_Cuda);
bool IsHIP = JA.isOffloading(Action::OFK_HIP);
bool IsHIPDevice = JA.isDeviceOffloading(Action::OFK_HIP);
bool IsOpenMPDevice = JA.isDeviceOffloading(Action::OFK_OpenMP);
- bool IsOpenMPHost = JA.isHostOffloading(Action::OFK_OpenMP);
bool IsHeaderModulePrecompile = isa<HeaderModulePrecompileJobAction>(JA);
+ bool IsExtractAPI = isa<ExtractAPIJobAction>(JA);
bool IsDeviceOffloadAction = !(JA.isDeviceOffloading(Action::OFK_None) ||
JA.isDeviceOffloading(Action::OFK_Host));
+ bool IsHostOffloadingAction =
+ (JA.isHostOffloading(Action::OFK_OpenMP) &&
+ Args.hasFlag(options::OPT_fopenmp_new_driver,
+ options::OPT_no_offload_new_driver, true)) ||
+ (JA.isHostOffloading(C.getActiveOffloadKinds()) &&
+ Args.hasFlag(options::OPT_offload_new_driver,
+ options::OPT_no_offload_new_driver, false));
+
bool IsUsingLTO = D.isUsingLTO(IsDeviceOffloadAction);
auto LTOMode = D.getLTOMode(IsDeviceOffloadAction);
@@ -4366,11 +4433,22 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}();
InputInfo HeaderModuleInput(Inputs[0].getType(), ModuleName, ModuleName);
- const InputInfo &Input =
- IsHeaderModulePrecompile ? HeaderModuleInput : Inputs[0];
+ // Extract API doesn't have a main input file, so invent a fake one as a
+ // placeholder.
+ InputInfo ExtractAPIPlaceholderInput(Inputs[0].getType(), "extract-api",
+ "extract-api");
+
+ const InputInfo &Input = [&]() -> const InputInfo & {
+ if (IsHeaderModulePrecompile)
+ return HeaderModuleInput;
+ if (IsExtractAPI)
+ return ExtractAPIPlaceholderInput;
+ return Inputs[0];
+ }();
InputInfoList ModuleHeaderInputs;
- InputInfoList OpenMPHostInputs;
+ InputInfoList ExtractAPIInputs;
+ InputInfoList HostOffloadingInputs;
const InputInfo *CudaDeviceInput = nullptr;
const InputInfo *OpenMPDeviceInput = nullptr;
for (const InputInfo &I : Inputs) {
@@ -4385,12 +4463,20 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
<< types::getTypeName(Expected);
}
ModuleHeaderInputs.push_back(I);
+ } else if (IsExtractAPI) {
+ auto ExpectedInputType = ExtractAPIPlaceholderInput.getType();
+ if (I.getType() != ExpectedInputType) {
+ D.Diag(diag::err_drv_extract_api_wrong_kind)
+ << I.getFilename() << types::getTypeName(I.getType())
+ << types::getTypeName(ExpectedInputType);
+ }
+ ExtractAPIInputs.push_back(I);
+ } else if (IsHostOffloadingAction) {
+ HostOffloadingInputs.push_back(I);
} else if ((IsCuda || IsHIP) && !CudaDeviceInput) {
CudaDeviceInput = &I;
} else if (IsOpenMPDevice && !OpenMPDeviceInput) {
OpenMPDeviceInput = &I;
- } else if (IsOpenMPHost) {
- OpenMPHostInputs.push_back(I);
} else {
llvm_unreachable("unexpectedly given multiple inputs");
}
@@ -4534,6 +4620,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
Args.ClaimAllArgs(options::OPT_Wa_COMMA);
Args.ClaimAllArgs(options::OPT_Xassembler);
+ Args.ClaimAllArgs(options::OPT_femit_dwarf_unwind_EQ);
}
if (isa<AnalyzeJobAction>(JA)) {
@@ -4549,6 +4636,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_rewrite_objc) &&
!Args.hasArg(options::OPT_g_Group))
CmdArgs.push_back("-P");
+ else if (JA.getType() == types::TY_PP_CXXHeaderUnit)
+ CmdArgs.push_back("-fdirectives-only");
}
} else if (isa<AssembleJobAction>(JA)) {
CmdArgs.push_back("-emit-obj");
@@ -4564,10 +4653,18 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(IsHeaderModulePrecompile
? "-emit-header-module"
: "-emit-module-interface");
+ else if (JA.getType() == types::TY_HeaderUnit)
+ CmdArgs.push_back("-emit-header-unit");
else
CmdArgs.push_back("-emit-pch");
} else if (isa<VerifyPCHJobAction>(JA)) {
CmdArgs.push_back("-verify-pch");
+ } else if (isa<ExtractAPIJobAction>(JA)) {
+ assert(JA.getType() == types::TY_API_INFO &&
+ "Extract API actions must generate a API information.");
+ CmdArgs.push_back("-extract-api");
+ if (Arg *ProductNameArg = Args.getLastArg(options::OPT_product_name_EQ))
+ ProductNameArg->render(Args, CmdArgs);
} else {
assert((isa<CompileJobAction>(JA) || isa<BackendJobAction>(JA)) &&
"Invalid action for clang tool.");
@@ -4606,8 +4703,6 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
} else if (JA.getType() == types::TY_RewrittenLegacyObjC) {
CmdArgs.push_back("-rewrite-objc");
rewriteKind = RK_Fragile;
- } else if (JA.getType() == types::TY_API_INFO) {
- CmdArgs.push_back("-extract-api");
} else {
assert(JA.getType() == types::TY_PP_Asm && "Unexpected output type!");
}
@@ -4619,9 +4714,14 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (JA.getType() == types::TY_LLVM_BC)
CmdArgs.push_back("-emit-llvm-uselists");
- if (IsUsingLTO && !Args.hasArg(options::OPT_fopenmp_new_driver)) {
+ if (IsUsingLTO) {
// Only AMDGPU supports device-side LTO.
- if (IsDeviceOffloadAction && !Triple.isAMDGPU()) {
+ if (IsDeviceOffloadAction &&
+ !Args.hasFlag(options::OPT_fopenmp_new_driver,
+ options::OPT_no_offload_new_driver, true) &&
+ !Args.hasFlag(options::OPT_offload_new_driver,
+ options::OPT_no_offload_new_driver, false) &&
+ !Triple.isAMDGPU()) {
D.Diag(diag::err_drv_unsupported_opt_for_target)
<< Args.getLastArg(options::OPT_foffload_lto,
options::OPT_foffload_lto_EQ)
@@ -4717,9 +4817,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
D.Diag(diag::err_drv_unsupported_embed_bitcode) << A->getSpelling();
// Render the CodeGen options that need to be passed.
- if (!Args.hasFlag(options::OPT_foptimize_sibling_calls,
- options::OPT_fno_optimize_sibling_calls))
- CmdArgs.push_back("-mdisable-tail-calls");
+ Args.addOptOutFlag(CmdArgs, options::OPT_foptimize_sibling_calls,
+ options::OPT_fno_optimize_sibling_calls);
RenderFloatingPointOptions(TC, D, isOptimizationLevelFast(Args), Args,
CmdArgs, JA);
@@ -5008,17 +5107,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
A->claim();
}
- if (!Args.hasFlag(options::OPT_fjump_tables, options::OPT_fno_jump_tables,
- true))
- CmdArgs.push_back("-fno-jump-tables");
-
- if (Args.hasFlag(options::OPT_fprofile_sample_accurate,
- options::OPT_fno_profile_sample_accurate, false))
- CmdArgs.push_back("-fprofile-sample-accurate");
-
- if (!Args.hasFlag(options::OPT_fpreserve_as_comments,
- options::OPT_fno_preserve_as_comments, true))
- CmdArgs.push_back("-fno-preserve-as-comments");
+ Args.addOptOutFlag(CmdArgs, options::OPT_fjump_tables,
+ options::OPT_fno_jump_tables);
+ Args.addOptInFlag(CmdArgs, options::OPT_fprofile_sample_accurate,
+ options::OPT_fno_profile_sample_accurate);
+ Args.addOptOutFlag(CmdArgs, options::OPT_fpreserve_as_comments,
+ options::OPT_fno_preserve_as_comments);
if (Arg *A = Args.getLastArg(options::OPT_mregparm_EQ)) {
CmdArgs.push_back("-mregparm");
@@ -5078,9 +5172,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
assert(FPKeepKindStr && "unknown FramePointerKind");
CmdArgs.push_back(FPKeepKindStr);
- if (!Args.hasFlag(options::OPT_fzero_initialized_in_bss,
- options::OPT_fno_zero_initialized_in_bss, true))
- CmdArgs.push_back("-fno-zero-initialized-in-bss");
+ Args.addOptOutFlag(CmdArgs, options::OPT_fzero_initialized_in_bss,
+ options::OPT_fno_zero_initialized_in_bss);
bool OFastEnabled = isOptimizationLevelFast(Args);
// If -Ofast is the optimization level, then -fstrict-aliasing should be
@@ -5094,31 +5187,22 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fno_strict_aliasing, TBAAOnByDefault))
CmdArgs.push_back("-relaxed-aliasing");
if (!Args.hasFlag(options::OPT_fstruct_path_tbaa,
- options::OPT_fno_struct_path_tbaa))
+ options::OPT_fno_struct_path_tbaa, true))
CmdArgs.push_back("-no-struct-path-tbaa");
- if (Args.hasFlag(options::OPT_fstrict_enums, options::OPT_fno_strict_enums,
- false))
- CmdArgs.push_back("-fstrict-enums");
- if (!Args.hasFlag(options::OPT_fstrict_return, options::OPT_fno_strict_return,
- true))
- CmdArgs.push_back("-fno-strict-return");
- if (Args.hasFlag(options::OPT_fallow_editor_placeholders,
- options::OPT_fno_allow_editor_placeholders, false))
- CmdArgs.push_back("-fallow-editor-placeholders");
- if (Args.hasFlag(options::OPT_fstrict_vtable_pointers,
- options::OPT_fno_strict_vtable_pointers,
- false))
- CmdArgs.push_back("-fstrict-vtable-pointers");
- if (Args.hasFlag(options::OPT_fforce_emit_vtables,
- options::OPT_fno_force_emit_vtables,
- false))
- CmdArgs.push_back("-fforce-emit-vtables");
- if (!Args.hasFlag(options::OPT_foptimize_sibling_calls,
- options::OPT_fno_optimize_sibling_calls))
- CmdArgs.push_back("-mdisable-tail-calls");
- if (Args.hasFlag(options::OPT_fno_escaping_block_tail_calls,
- options::OPT_fescaping_block_tail_calls, false))
- CmdArgs.push_back("-fno-escaping-block-tail-calls");
+ Args.addOptInFlag(CmdArgs, options::OPT_fstrict_enums,
+ options::OPT_fno_strict_enums);
+ Args.addOptOutFlag(CmdArgs, options::OPT_fstrict_return,
+ options::OPT_fno_strict_return);
+ Args.addOptInFlag(CmdArgs, options::OPT_fallow_editor_placeholders,
+ options::OPT_fno_allow_editor_placeholders);
+ Args.addOptInFlag(CmdArgs, options::OPT_fstrict_vtable_pointers,
+ options::OPT_fno_strict_vtable_pointers);
+ Args.addOptInFlag(CmdArgs, options::OPT_fforce_emit_vtables,
+ options::OPT_fno_force_emit_vtables);
+ Args.addOptOutFlag(CmdArgs, options::OPT_foptimize_sibling_calls,
+ options::OPT_fno_optimize_sibling_calls);
+ Args.addOptOutFlag(CmdArgs, options::OPT_fescaping_block_tail_calls,
+ options::OPT_fno_escaping_block_tail_calls);
Args.AddLastArg(CmdArgs, options::OPT_ffine_grained_bitfield_accesses,
options::OPT_fno_fine_grained_bitfield_accesses);
@@ -5246,6 +5330,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Freestanding)
CmdArgs.push_back("-ffreestanding");
+ Args.AddLastArg(CmdArgs, options::OPT_fno_knr_functions);
+
// This is a coarse approximation of what llvm-gcc actually does, both
// -fasynchronous-unwind-tables and -fnon-call-exceptions interact in more
// complicated ways.
@@ -5349,6 +5435,19 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
types::isLLVMIR(InputType), CmdArgs, DebugInfoKind,
DwarfFission);
+ // This controls whether or not we perform JustMyCode instrumentation.
+ if (Args.hasFlag(options::OPT_fjmc, options::OPT_fno_jmc, false)) {
+ if (TC.getTriple().isOSBinFormatELF()) {
+ if (DebugInfoKind >= codegenoptions::DebugInfoConstructor)
+ CmdArgs.push_back("-fjmc");
+ else
+ D.Diag(clang::diag::warn_drv_jmc_requires_debuginfo) << "-fjmc"
+ << "-g";
+ } else {
+ D.Diag(clang::diag::warn_drv_fjmc_for_elf_only);
+ }
+ }
+
// Add the split debug info name to the command lines here so we
// can propagate it to the backend.
bool SplitDWARF = (DwarfFission != DwarfFissionKind::None) &&
@@ -5464,17 +5563,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-fdata-sections");
}
- if (!Args.hasFlag(options::OPT_funique_section_names,
- options::OPT_fno_unique_section_names, true))
- CmdArgs.push_back("-fno-unique-section-names");
-
- if (Args.hasFlag(options::OPT_funique_internal_linkage_names,
- options::OPT_fno_unique_internal_linkage_names, false))
- CmdArgs.push_back("-funique-internal-linkage-names");
-
- if (Args.hasFlag(options::OPT_funique_basic_block_section_names,
- options::OPT_fno_unique_basic_block_section_names, false))
- CmdArgs.push_back("-funique-basic-block-section-names");
+ Args.addOptOutFlag(CmdArgs, options::OPT_funique_section_names,
+ options::OPT_fno_unique_section_names);
+ Args.addOptInFlag(CmdArgs, options::OPT_funique_internal_linkage_names,
+ options::OPT_fno_unique_internal_linkage_names);
+ Args.addOptInFlag(CmdArgs, options::OPT_funique_basic_block_section_names,
+ options::OPT_fno_unique_basic_block_section_names);
if (Arg *A = Args.getLastArg(options::OPT_fsplit_machine_functions,
options::OPT_fno_split_machine_functions)) {
@@ -5500,11 +5594,11 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_fclang_abi_compat_EQ);
- // Add runtime flag for PS4 when PGO, coverage, or sanitizers are enabled.
- if (RawTriple.isPS4CPU() &&
+ // Add runtime flag for PS4/PS5 when PGO, coverage, or sanitizers are enabled.
+ if (RawTriple.isPS() &&
!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
- PS4cpu::addProfileRTArgs(TC, Args, CmdArgs);
- PS4cpu::addSanitizerArgs(TC, Args, CmdArgs);
+ PScpu::addProfileRTArgs(TC, Args, CmdArgs);
+ PScpu::addSanitizerArgs(TC, Args, CmdArgs);
}
// Pass options for controlling the default header search paths.
@@ -5698,7 +5792,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
const char *DebugCompilationDir =
addDebugCompDirArg(Args, CmdArgs, D.getVFS());
- addDebugPrefixMapArg(D, Args, CmdArgs);
+ addDebugPrefixMapArg(D, TC, Args, CmdArgs);
if (Arg *A = Args.getLastArg(options::OPT_ftemplate_depth_,
options::OPT_ftemplate_depth_EQ)) {
@@ -5721,6 +5815,13 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(A->getValue());
}
+ if (Args.hasArg(options::OPT_funstable)) {
+ CmdArgs.push_back("-funstable");
+ if (!Args.hasArg(options::OPT_fno_coroutines_ts))
+ CmdArgs.push_back("-fcoroutines-ts");
+ CmdArgs.push_back("-fmodules-ts");
+ }
+
if (Args.hasArg(options::OPT_fexperimental_new_constant_interpreter))
CmdArgs.push_back("-fexperimental-new-constant-interpreter");
@@ -5746,7 +5847,7 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
"standalone", "objc", "swift", "swift-5.0", "swift-4.2", "swift-4.1",
};
- if (find(kCFABIs, StringRef(A->getValue())) == std::end(kCFABIs))
+ if (!llvm::is_contained(kCFABIs, StringRef(A->getValue())))
D.Diag(diag::err_drv_invalid_cf_runtime_abi) << A->getValue();
else
A->render(Args, CmdArgs);
@@ -5820,6 +5921,14 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(
Args.MakeArgString("-fmessage-length=" + Twine(MessageLength)));
+ if (Arg *A = Args.getLastArg(options::OPT_frandomize_layout_seed_EQ))
+ CmdArgs.push_back(
+ Args.MakeArgString("-frandomize-layout-seed=" + Twine(A->getValue(0))));
+
+ if (Arg *A = Args.getLastArg(options::OPT_frandomize_layout_seed_file_EQ))
+ CmdArgs.push_back(Args.MakeArgString("-frandomize-layout-seed-file=" +
+ Twine(A->getValue(0))));
+
// -fvisibility= and -fvisibility-ms-compat are of a piece.
if (const Arg *A = Args.getLastArg(options::OPT_fvisibility_EQ,
options::OPT_fvisibility_ms_compat)) {
@@ -5864,6 +5973,14 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
<< A->getAsString(Args) << TripleStr;
}
+ if (const Arg *A =
+ Args.getLastArg(options::OPT_mdefault_visibility_export_mapping_EQ)) {
+ if (Triple.isOSAIX())
+ A->render(Args, CmdArgs);
+ else
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getAsString(Args) << TripleStr;
+ }
if (Args.hasFlag(options::OPT_fvisibility_inlines_hidden,
options::OPT_fno_visibility_inlines_hidden, false))
@@ -5888,6 +6005,16 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_fdigraphs, options::OPT_fno_digraphs);
Args.AddLastArg(CmdArgs, options::OPT_femulated_tls,
options::OPT_fno_emulated_tls);
+ Args.AddLastArg(CmdArgs, options::OPT_fzero_call_used_regs_EQ);
+
+ if (Arg *A = Args.getLastArg(options::OPT_fzero_call_used_regs_EQ)) {
+ // FIXME: There's no reason for this to be restricted to X86. The backend
+ // code needs to be changed to include the appropriate function calls
+ // automatically.
+ if (!Triple.isX86() && !Triple.isAArch64())
+ D.Diag(diag::err_drv_unsupported_opt_for_target)
+ << A->getAsString(Args) << TripleStr;
+ }
// AltiVec-like language extensions aren't relevant for assembling.
if (!isa<PreprocessJobAction>(JA) || Output.getType() != types::TY_PP_Asm)
@@ -5936,13 +6063,6 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fno_openmp_cuda_mode, /*Default=*/false))
CmdArgs.push_back("-fopenmp-cuda-mode");
- // When in OpenMP offloading mode, enable or disable the new device
- // runtime.
- if (Args.hasFlag(options::OPT_fopenmp_target_new_runtime,
- options::OPT_fno_openmp_target_new_runtime,
- /*Default=*/true))
- CmdArgs.push_back("-fopenmp-target-new-runtime");
-
// When in OpenMP offloading mode, enable debugging on the device.
Args.AddAllArgs(CmdArgs, options::OPT_fopenmp_target_debug_EQ);
if (Args.hasFlag(options::OPT_fopenmp_target_debug,
@@ -5966,6 +6086,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT_fno_openmp_assume_threads_oversubscription,
/*Default=*/false))
CmdArgs.push_back("-fopenmp-assume-threads-oversubscription");
+ if (Args.hasArg(options::OPT_fopenmp_assume_no_thread_state))
+ CmdArgs.push_back("-fopenmp-assume-no-thread-state");
+ if (Args.hasArg(options::OPT_fopenmp_offload_mandatory))
+ CmdArgs.push_back("-fopenmp-offload-mandatory");
break;
default:
// By default, if Clang doesn't know how to generate useful OpenMP code
@@ -5980,11 +6104,14 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_fopenmp_simd,
options::OPT_fno_openmp_simd);
Args.AddAllArgs(CmdArgs, options::OPT_fopenmp_version_EQ);
- if (!Args.hasFlag(options::OPT_fopenmp_extensions,
- options::OPT_fno_openmp_extensions, /*Default=*/true))
- CmdArgs.push_back("-fno-openmp-extensions");
+ Args.addOptOutFlag(CmdArgs, options::OPT_fopenmp_extensions,
+ options::OPT_fno_openmp_extensions);
}
+ // Forward the new driver to change offloading code generation.
+ if (Args.hasArg(options::OPT_offload_new_driver))
+ CmdArgs.push_back("--offload-new-driver");
+
SanitizeArgs.addArgs(TC, Args, CmdArgs, InputType);
const XRayArgs &XRay = TC.getXRayArgs();
@@ -6125,9 +6252,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-mstack-probe-size=0");
}
- if (!Args.hasFlag(options::OPT_mstack_arg_probe,
- options::OPT_mno_stack_arg_probe, true))
- CmdArgs.push_back(Args.MakeArgString("-mno-stack-arg-probe"));
+ Args.addOptOutFlag(CmdArgs, options::OPT_mstack_arg_probe,
+ options::OPT_mno_stack_arg_probe);
if (Arg *A = Args.getLastArg(options::OPT_mrestrict_it,
options::OPT_mno_restrict_it)) {
@@ -6136,19 +6262,17 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-arm-restrict-it");
} else {
CmdArgs.push_back("-mllvm");
- CmdArgs.push_back("-arm-no-restrict-it");
+ CmdArgs.push_back("-arm-default-it");
}
- } else if (Triple.isOSWindows() &&
- (Triple.getArch() == llvm::Triple::arm ||
- Triple.getArch() == llvm::Triple::thumb)) {
- // Windows on ARM expects restricted IT blocks
- CmdArgs.push_back("-mllvm");
- CmdArgs.push_back("-arm-restrict-it");
}
// Forward -cl options to -cc1
RenderOpenCLOptions(Args, CmdArgs, InputType);
+ // Forward hlsl options to -cc1
+ if (C.getDriver().IsDXCMode())
+ RenderHLSLOptions(Args, CmdArgs, InputType);
+
if (IsHIP) {
if (Args.hasFlag(options::OPT_fhip_new_launch_api,
options::OPT_fno_hip_new_launch_api, true))
@@ -6156,9 +6280,14 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasFlag(options::OPT_fgpu_allow_device_init,
options::OPT_fno_gpu_allow_device_init, false))
CmdArgs.push_back("-fgpu-allow-device-init");
+ Args.addOptInFlag(CmdArgs, options::OPT_fhip_kernel_arg_name,
+ options::OPT_fno_hip_kernel_arg_name);
}
if (IsCuda || IsHIP) {
+ if (!Args.hasFlag(options::OPT_fgpu_rdc, options::OPT_fno_gpu_rdc, false) &&
+ Args.hasArg(options::OPT_offload_new_driver))
+ D.Diag(diag::err_drv_no_rdc_new_driver);
if (Args.hasFlag(options::OPT_fgpu_rdc, options::OPT_fno_gpu_rdc, false))
CmdArgs.push_back("-fgpu-rdc");
if (Args.hasFlag(options::OPT_fgpu_defer_diag,
@@ -6172,6 +6301,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
}
+ // Forward -nogpulib to -cc1.
+ if (Args.hasArg(options::OPT_nogpulib))
+ CmdArgs.push_back("-nogpulib");
+
if (Arg *A = Args.getLastArg(options::OPT_fcf_protection_EQ)) {
CmdArgs.push_back(
Args.MakeArgString(Twine("-fcf-protection=") + A->getValue()));
@@ -6214,9 +6347,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
RenderBuiltinOptions(TC, RawTriple, Args, CmdArgs);
- if (!Args.hasFlag(options::OPT_fassume_sane_operator_new,
- options::OPT_fno_assume_sane_operator_new))
- CmdArgs.push_back("-fno-assume-sane-operator-new");
+ Args.addOptOutFlag(CmdArgs, options::OPT_fassume_sane_operator_new,
+ options::OPT_fno_assume_sane_operator_new);
// -fblocks=0 is default.
if (Args.hasFlag(options::OPT_fblocks, options::OPT_fno_blocks,
@@ -6243,15 +6375,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_fdouble_square_bracket_attributes,
options::OPT_fno_double_square_bracket_attributes);
- // -faccess-control is default.
- if (Args.hasFlag(options::OPT_fno_access_control,
- options::OPT_faccess_control, false))
- CmdArgs.push_back("-fno-access-control");
-
- // -felide-constructors is the default.
- if (Args.hasFlag(options::OPT_fno_elide_constructors,
- options::OPT_felide_constructors, false))
- CmdArgs.push_back("-fno-elide-constructors");
+ Args.addOptOutFlag(CmdArgs, options::OPT_faccess_control,
+ options::OPT_fno_access_control);
+ Args.addOptOutFlag(CmdArgs, options::OPT_felide_constructors,
+ options::OPT_fno_elide_constructors);
ToolChain::RTTIMode RTTIMode = TC.getRTTIMode();
@@ -6280,10 +6407,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
RawTriple.isOSDarwin() && !KernelOrKext))
CmdArgs.push_back("-fregister-global-dtors-with-atexit");
- // -fno-use-line-directives is default.
- if (Args.hasFlag(options::OPT_fuse_line_directives,
- options::OPT_fno_use_line_directives, false))
- CmdArgs.push_back("-fuse-line-directives");
+ Args.addOptInFlag(CmdArgs, options::OPT_fuse_line_directives,
+ options::OPT_fno_use_line_directives);
// -fno-minimize-whitespace is default.
if (Args.hasFlag(options::OPT_fminimize_whitespace,
@@ -6316,8 +6441,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
StringRef Val = A->getValue();
Val = Val.empty() ? "0" : Val; // Treat "" as 0 or disable.
bool Invalid = GNUCVer.tryParse(Val);
- unsigned Minor = GNUCVer.getMinor().getValueOr(0);
- unsigned Patch = GNUCVer.getSubminor().getValueOr(0);
+ unsigned Minor = GNUCVer.getMinor().value_or(0);
+ unsigned Patch = GNUCVer.getSubminor().value_or(0);
if (Invalid || GNUCVer.getBuild() || Minor >= 100 || Patch >= 100) {
D.Diag(diag::err_drv_invalid_value)
<< A->getAsString(Args) << A->getValue();
@@ -6376,14 +6501,12 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(LanguageStandard.data());
}
- // -fno-borland-extensions is default.
- if (Args.hasFlag(options::OPT_fborland_extensions,
- options::OPT_fno_borland_extensions, false))
- CmdArgs.push_back("-fborland-extensions");
+ Args.addOptInFlag(CmdArgs, options::OPT_fborland_extensions,
+ options::OPT_fno_borland_extensions);
- // -fno-declspec is default, except for PS4.
+ // -fno-declspec is default, except for PS4/PS5.
if (Args.hasFlag(options::OPT_fdeclspec, options::OPT_fno_declspec,
- RawTriple.isPS4()))
+ RawTriple.isPS()))
CmdArgs.push_back("-fdeclspec");
else if (Args.hasArg(options::OPT_fno_declspec))
CmdArgs.push_back("-fno-declspec"); // Explicitly disabling __declspec.
@@ -6409,16 +6532,18 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
Args.AddLastArg(CmdArgs, options::OPT_fgnu_keywords,
options::OPT_fno_gnu_keywords);
- if (Args.hasFlag(options::OPT_fgnu89_inline, options::OPT_fno_gnu89_inline,
- false))
- CmdArgs.push_back("-fgnu89-inline");
-
- if (Args.hasArg(options::OPT_fno_inline))
- CmdArgs.push_back("-fno-inline");
+ Args.addOptInFlag(CmdArgs, options::OPT_fgnu89_inline,
+ options::OPT_fno_gnu89_inline);
- Args.AddLastArg(CmdArgs, options::OPT_finline_functions,
- options::OPT_finline_hint_functions,
- options::OPT_fno_inline_functions);
+ const Arg *InlineArg = Args.getLastArg(options::OPT_finline_functions,
+ options::OPT_finline_hint_functions,
+ options::OPT_fno_inline_functions);
+ if (Arg *A = Args.getLastArg(options::OPT_finline, options::OPT_fno_inline)) {
+ if (A->getOption().matches(options::OPT_fno_inline))
+ A->render(Args, CmdArgs);
+ } else if (InlineArg) {
+ InlineArg->render(Args, CmdArgs);
+ }
// FIXME: Find a better way to determine whether the language has modules
// support by default, or just assume that all languages do.
@@ -6440,8 +6565,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
false))
CmdArgs.push_back("-fmodules-debuginfo");
- Args.AddLastArg(CmdArgs, options::OPT_flegacy_pass_manager,
- options::OPT_fno_legacy_pass_manager);
+ if (!CLANG_ENABLE_OPAQUE_POINTERS_INTERNAL)
+ CmdArgs.push_back("-no-opaque-pointers");
ObjCRuntime Runtime = AddObjCRuntimeArgs(Args, Inputs, CmdArgs, rewriteKind);
RenderObjCOptions(TC, D, RawTriple, Args, Runtime, rewriteKind != RK_None,
@@ -6493,22 +6618,19 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
// C++ "sane" operator new.
- if (!Args.hasFlag(options::OPT_fassume_sane_operator_new,
- options::OPT_fno_assume_sane_operator_new))
- CmdArgs.push_back("-fno-assume-sane-operator-new");
+ Args.addOptOutFlag(CmdArgs, options::OPT_fassume_sane_operator_new,
+ options::OPT_fno_assume_sane_operator_new);
// -frelaxed-template-template-args is off by default, as it is a severe
// breaking change until a corresponding change to template partial ordering
// is provided.
- if (Args.hasFlag(options::OPT_frelaxed_template_template_args,
- options::OPT_fno_relaxed_template_template_args, false))
- CmdArgs.push_back("-frelaxed-template-template-args");
+ Args.addOptInFlag(CmdArgs, options::OPT_frelaxed_template_template_args,
+ options::OPT_fno_relaxed_template_template_args);
// -fsized-deallocation is off by default, as it is an ABI-breaking change for
// most platforms.
- if (Args.hasFlag(options::OPT_fsized_deallocation,
- options::OPT_fno_sized_deallocation, false))
- CmdArgs.push_back("-fsized-deallocation");
+ Args.addOptInFlag(CmdArgs, options::OPT_fsized_deallocation,
+ options::OPT_fno_sized_deallocation);
// -faligned-allocation is on by default in C++17 onwards and otherwise off
// by default.
@@ -6531,15 +6653,13 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
// -fconstant-cfstrings is default, and may be subject to argument translation
// on Darwin.
if (!Args.hasFlag(options::OPT_fconstant_cfstrings,
- options::OPT_fno_constant_cfstrings) ||
+ options::OPT_fno_constant_cfstrings, true) ||
!Args.hasFlag(options::OPT_mconstant_cfstrings,
- options::OPT_mno_constant_cfstrings))
+ options::OPT_mno_constant_cfstrings, true))
CmdArgs.push_back("-fno-constant-cfstrings");
- // -fno-pascal-strings is default, only pass non-default.
- if (Args.hasFlag(options::OPT_fpascal_strings,
- options::OPT_fno_pascal_strings, false))
- CmdArgs.push_back("-fpascal-strings");
+ Args.addOptInFlag(CmdArgs, options::OPT_fpascal_strings,
+ options::OPT_fno_pascal_strings);
// Honor -fpack-struct= and -fpack-struct, if given. Note that
// -fno-pack-struct doesn't apply to -fpack-struct=.
@@ -6571,18 +6691,17 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-Qn");
// -fno-common is the default, set -fcommon only when that flag is set.
- if (Args.hasFlag(options::OPT_fcommon, options::OPT_fno_common, false))
- CmdArgs.push_back("-fcommon");
+ Args.addOptInFlag(CmdArgs, options::OPT_fcommon, options::OPT_fno_common);
// -fsigned-bitfields is default, and clang doesn't yet support
// -funsigned-bitfields.
if (!Args.hasFlag(options::OPT_fsigned_bitfields,
- options::OPT_funsigned_bitfields))
+ options::OPT_funsigned_bitfields, true))
D.Diag(diag::warn_drv_clang_unsupported)
<< Args.getLastArg(options::OPT_funsigned_bitfields)->getAsString(Args);
// -fsigned-bitfields is default, and clang doesn't support -fno-for-scope.
- if (!Args.hasFlag(options::OPT_ffor_scope, options::OPT_fno_for_scope))
+ if (!Args.hasFlag(options::OPT_ffor_scope, options::OPT_fno_for_scope, true))
D.Diag(diag::err_drv_clang_unsupported)
<< Args.getLastArg(options::OPT_fno_for_scope)->getAsString(Args);
@@ -6604,10 +6723,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
RenderDiagnosticsOptions(D, Args, CmdArgs);
- // -fno-asm-blocks is default.
- if (Args.hasFlag(options::OPT_fasm_blocks, options::OPT_fno_asm_blocks,
- false))
- CmdArgs.push_back("-fasm-blocks");
+ Args.addOptInFlag(CmdArgs, options::OPT_fasm_blocks,
+ options::OPT_fno_asm_blocks);
// -fgnu-inline-asm is default.
if (!Args.hasFlag(options::OPT_fgnu_inline_asm,
@@ -6648,17 +6765,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-fno-dollars-in-identifiers");
}
- // -funit-at-a-time is default, and we don't support -fno-unit-at-a-time for
- // practical purposes.
- if (Arg *A = Args.getLastArg(options::OPT_funit_at_a_time,
- options::OPT_fno_unit_at_a_time)) {
- if (A->getOption().matches(options::OPT_fno_unit_at_a_time))
- D.Diag(diag::warn_drv_clang_unsupported) << A->getAsString(Args);
- }
-
- if (Args.hasFlag(options::OPT_fapple_pragma_pack,
- options::OPT_fno_apple_pragma_pack, false))
- CmdArgs.push_back("-fapple-pragma-pack");
+ Args.addOptInFlag(CmdArgs, options::OPT_fapple_pragma_pack,
+ options::OPT_fno_apple_pragma_pack);
if (Args.hasFlag(options::OPT_fxl_pragma_pack,
options::OPT_fno_xl_pragma_pack, RawTriple.isOSAIX()))
@@ -6673,6 +6781,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
if (RewriteImports)
CmdArgs.push_back("-frewrite-imports");
+ if (Args.hasFlag(options::OPT_fdirectives_only,
+ options::OPT_fno_directives_only, false))
+ CmdArgs.push_back("-fdirectives-only");
+
// Enable rewrite includes if the user's asked for it or if we're generating
// diagnostics.
// TODO: Once -module-dependency-dir works with -frewrite-includes it'd be
@@ -6871,8 +6983,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(Twine("-cuid=") + Twine(CUID)));
}
- if (IsHIP)
+ if (IsHIP) {
CmdArgs.push_back("-fcuda-allow-variadic-functions");
+ Args.AddLastArg(CmdArgs, options::OPT_fgpu_default_stream_EQ);
+ }
if (IsCudaDevice || IsHIPDevice) {
StringRef InlineThresh =
@@ -6897,50 +7011,29 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
}
}
- // Host-side OpenMP offloading recieves the device object files and embeds it
- // in a named section including the associated target triple and architecture.
- if (IsOpenMPHost && !OpenMPHostInputs.empty()) {
- auto InputFile = OpenMPHostInputs.begin();
- auto OpenMPTCs = C.getOffloadToolChains<Action::OFK_OpenMP>();
- for (auto TI = OpenMPTCs.first, TE = OpenMPTCs.second; TI != TE;
- ++TI, ++InputFile) {
- const ToolChain *TC = TI->second;
- const ArgList &TCArgs = C.getArgsForToolChain(TC, "", Action::OFK_OpenMP);
- StringRef File =
- C.getArgs().MakeArgString(TC->getInputFilename(*InputFile));
- StringRef InputName = Clang::getBaseInputStem(Args, Inputs);
-
- CmdArgs.push_back(Args.MakeArgString(
- "-fembed-offload-object=" + File + "," + TC->getTripleString() + "." +
- TCArgs.getLastArgValue(options::OPT_march_EQ) + "." + InputName));
- }
- }
+ // Host-side offloading recieves the device object files and embeds it in a
+ // named section including the associated target triple and architecture.
+ for (const InputInfo Input : HostOffloadingInputs)
+ CmdArgs.push_back(Args.MakeArgString("-fembed-offload-object=" +
+ TC.getInputFilename(Input)));
if (Triple.isAMDGPU()) {
handleAMDGPUCodeObjectVersionOptions(D, Args, CmdArgs);
- if (Args.hasFlag(options::OPT_munsafe_fp_atomics,
- options::OPT_mno_unsafe_fp_atomics, /*Default=*/false))
- CmdArgs.push_back("-munsafe-fp-atomics");
+ Args.addOptInFlag(CmdArgs, options::OPT_munsafe_fp_atomics,
+ options::OPT_mno_unsafe_fp_atomics);
}
// For all the host OpenMP offloading compile jobs we need to pass the targets
// information using -fopenmp-targets= option.
if (JA.isHostOffloading(Action::OFK_OpenMP)) {
- SmallString<128> TargetInfo("-fopenmp-targets=");
+ SmallString<128> Targets("-fopenmp-targets=");
- Arg *Tgts = Args.getLastArg(options::OPT_fopenmp_targets_EQ);
- assert(Tgts && Tgts->getNumValues() &&
- "OpenMP offloading has to have targets specified.");
- for (unsigned i = 0; i < Tgts->getNumValues(); ++i) {
- if (i)
- TargetInfo += ',';
- // We need to get the string from the triple because it may be not exactly
- // the same as the one we get directly from the arguments.
- llvm::Triple T(Tgts->getValue(i));
- TargetInfo += T.getTriple();
- }
- CmdArgs.push_back(Args.MakeArgString(TargetInfo.str()));
+ SmallVector<std::string, 4> Triples;
+ auto TCRange = C.getOffloadToolChains<Action::OFK_OpenMP>();
+ std::transform(TCRange.first, TCRange.second, std::back_inserter(Triples),
+ [](auto TC) { return TC.second->getTripleString(); });
+ CmdArgs.push_back(Args.MakeArgString(Targets + llvm::join(Triples, ",")));
}
bool VirtualFunctionElimination =
@@ -7041,13 +7134,10 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-fforce-enable-int128");
}
- if (Args.hasFlag(options::OPT_fkeep_static_consts,
- options::OPT_fno_keep_static_consts, false))
- CmdArgs.push_back("-fkeep-static-consts");
-
- if (Args.hasFlag(options::OPT_fcomplete_member_pointers,
- options::OPT_fno_complete_member_pointers, false))
- CmdArgs.push_back("-fcomplete-member-pointers");
+ Args.addOptInFlag(CmdArgs, options::OPT_fkeep_static_consts,
+ options::OPT_fno_keep_static_consts);
+ Args.addOptInFlag(CmdArgs, options::OPT_fcomplete_member_pointers,
+ options::OPT_fno_complete_member_pointers);
if (!Args.hasFlag(options::OPT_fcxx_static_destructors,
options::OPT_fno_cxx_static_destructors, true))
@@ -7127,6 +7217,8 @@ void Clang::ConstructJob(Compilation &C, const JobAction &JA,
ArrayRef<InputInfo> FrontendInputs = Input;
if (IsHeaderModulePrecompile)
FrontendInputs = ModuleHeaderInputs;
+ else if (IsExtractAPI)
+ FrontendInputs = ExtractAPIInputs;
else if (Input.isNothing())
FrontendInputs = {};
@@ -7378,6 +7470,12 @@ static EHFlags parseClangCLEHFlags(const Driver &D, const ArgList &Args) {
EH.NoUnwindC = true;
}
+ if (Args.hasArg(options::OPT__SLASH_kernel)) {
+ EH.Synch = false;
+ EH.NoUnwindC = false;
+ EH.Asynch = false;
+ }
+
return EH;
}
@@ -7473,6 +7571,17 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
}
const Driver &D = getToolChain().getDriver();
+
+ // This controls whether or not we perform JustMyCode instrumentation.
+ if (Args.hasFlag(options::OPT__SLASH_JMC, options::OPT__SLASH_JMC_,
+ /*Default=*/false)) {
+ if (*EmitCodeView && *DebugInfoKind >= codegenoptions::DebugInfoConstructor)
+ CmdArgs.push_back("-fjmc");
+ else
+ D.Diag(clang::diag::warn_drv_jmc_requires_debuginfo) << "/JMC"
+ << "'/Zi', '/Z7'";
+ }
+
EHFlags EH = parseClangCLEHFlags(D, Args);
if (!isNVPTX && (EH.Synch || EH.Asynch)) {
if (types::isCXX(InputType))
@@ -7506,6 +7615,32 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
CmdArgs.push_back("-fno-dllexport-inlines");
}
+ if (Args.hasFlag(options::OPT__SLASH_Zc_wchar_t_,
+ options::OPT__SLASH_Zc_wchar_t, false)) {
+ CmdArgs.push_back("-fno-wchar");
+ }
+
+ if (Args.hasArg(options::OPT__SLASH_kernel)) {
+ llvm::Triple::ArchType Arch = getToolChain().getArch();
+ std::vector<std::string> Values =
+ Args.getAllArgValues(options::OPT__SLASH_arch);
+ if (!Values.empty()) {
+ llvm::SmallSet<std::string, 4> SupportedArches;
+ if (Arch == llvm::Triple::x86)
+ SupportedArches.insert("IA32");
+
+ for (auto &V : Values)
+ if (!SupportedArches.contains(V))
+ D.Diag(diag::err_drv_argument_not_allowed_with)
+ << std::string("/arch:").append(V) << "/kernel";
+ }
+
+ CmdArgs.push_back("-fno-rtti");
+ if (Args.hasFlag(options::OPT__SLASH_GR, options::OPT__SLASH_GR_, false))
+ D.Diag(diag::err_drv_argument_not_allowed_with) << "/GR"
+ << "/kernel";
+ }
+
Arg *MostGeneralArg = Args.getLastArg(options::OPT__SLASH_vmg);
Arg *BestCaseArg = Args.getLastArg(options::OPT__SLASH_vmb);
if (MostGeneralArg && BestCaseArg)
@@ -7575,6 +7710,9 @@ void Clang::AddClangCLArgs(const ArgList &Args, types::ID InputType,
CmdArgs.push_back("msvc");
}
+ if (Args.hasArg(options::OPT__SLASH_kernel))
+ CmdArgs.push_back("-fms-kernel");
+
if (Arg *A = Args.getLastArg(options::OPT__SLASH_guard)) {
StringRef GuardArgs = A->getValue();
// The only valid options are "cf", "cf,nochecks", "cf-", "ehcont" and
@@ -7675,6 +7813,8 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
const llvm::Triple &Triple = getToolChain().getEffectiveTriple();
const std::string &TripleStr = Triple.getTriple();
+ const Optional<llvm::Triple> TargetVariantTriple =
+ getToolChain().getTargetVariantTriple();
const auto &D = getToolChain().getDriver();
// Don't warn about "clang -w -c foo.s"
@@ -7692,6 +7832,10 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
// Add the "effective" target triple.
CmdArgs.push_back("-triple");
CmdArgs.push_back(Args.MakeArgString(TripleStr));
+ if (TargetVariantTriple) {
+ CmdArgs.push_back("-darwin-target-variant-triple");
+ CmdArgs.push_back(Args.MakeArgString(TargetVariantTriple->getTriple()));
+ }
// Set the output mode, we currently only expect to be used as a real
// assembler.
@@ -7759,7 +7903,8 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
DebugInfoKind = (WantDebug ? codegenoptions::DebugInfoConstructor
: codegenoptions::NoDebugInfo);
- addDebugPrefixMapArg(getToolChain().getDriver(), Args, CmdArgs);
+ addDebugPrefixMapArg(getToolChain().getDriver(), getToolChain(), Args,
+ CmdArgs);
// Set the AT_producer to the clang version when using the integrated
// assembler on assembly source files.
@@ -7774,7 +7919,6 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
renderDwarfFormat(D, Triple, Args, CmdArgs, DwarfVersion);
RenderDebugInfoCompressionArgs(Args, CmdArgs, D, getToolChain());
-
// Handle -fPIC et al -- the relocation-model affects the assembler
// for some targets.
llvm::Reloc::Model RelocationModel;
@@ -7904,7 +8048,7 @@ void ClangAs::ConstructJob(Compilation &C, const JobAction &JA,
}
if (Triple.isAMDGPU())
- handleAMDGPUCodeObjectVersionOptions(D, Args, CmdArgs);
+ handleAMDGPUCodeObjectVersionOptions(D, Args, CmdArgs, /*IsCC1As=*/true);
assert(Input.isFilename() && "Invalid input.");
CmdArgs.push_back(Input.getFilename());
@@ -7935,8 +8079,10 @@ void OffloadBundler::ConstructJob(Compilation &C, const JobAction &JA,
// The bundling command looks like this:
// clang-offload-bundler -type=bc
// -targets=host-triple,openmp-triple1,openmp-triple2
- // -outputs=input_file
- // -inputs=unbundle_file_host,unbundle_file_tgt1,unbundle_file_tgt2"
+ // -output=output_file
+ // -input=unbundle_file_host
+ // -input=unbundle_file_tgt1
+ // -input=unbundle_file_tgt2
ArgStringList CmdArgs;
@@ -7997,14 +8143,12 @@ void OffloadBundler::ConstructJob(Compilation &C, const JobAction &JA,
// Get bundled file command.
CmdArgs.push_back(
- TCArgs.MakeArgString(Twine("-outputs=") + Output.getFilename()));
+ TCArgs.MakeArgString(Twine("-output=") + Output.getFilename()));
// Get unbundled files command.
- SmallString<128> UB;
- UB += "-inputs=";
for (unsigned I = 0; I < Inputs.size(); ++I) {
- if (I)
- UB += ',';
+ SmallString<128> UB;
+ UB += "-input=";
// Find ToolChain for this input.
const ToolChain *CurTC = &getToolChain();
@@ -8019,9 +8163,8 @@ void OffloadBundler::ConstructJob(Compilation &C, const JobAction &JA,
} else {
UB += CurTC->getInputFilename(Inputs[I]);
}
+ CmdArgs.push_back(TCArgs.MakeArgString(UB));
}
- CmdArgs.push_back(TCArgs.MakeArgString(UB));
-
// All the inputs are encoded as commands.
C.addCommand(std::make_unique<Command>(
JA, *this, ResponseFileSupport::None(),
@@ -8039,8 +8182,10 @@ void OffloadBundler::ConstructJobMultipleOutputs(
// The unbundling command looks like this:
// clang-offload-bundler -type=bc
// -targets=host-triple,openmp-triple1,openmp-triple2
- // -inputs=input_file
- // -outputs=unbundle_file_host,unbundle_file_tgt1,unbundle_file_tgt2"
+ // -input=input_file
+ // -output=unbundle_file_host
+ // -output=unbundle_file_tgt1
+ // -output=unbundle_file_tgt2
// -unbundle
ArgStringList CmdArgs;
@@ -8092,17 +8237,15 @@ void OffloadBundler::ConstructJobMultipleOutputs(
// Get bundled file command.
CmdArgs.push_back(
- TCArgs.MakeArgString(Twine("-inputs=") + Input.getFilename()));
+ TCArgs.MakeArgString(Twine("-input=") + Input.getFilename()));
// Get unbundled files command.
- SmallString<128> UB;
- UB += "-outputs=";
for (unsigned I = 0; I < Outputs.size(); ++I) {
- if (I)
- UB += ',';
+ SmallString<128> UB;
+ UB += "-output=";
UB += DepInfo[I].DependentToolChain->getInputFilename(Outputs[I]);
+ CmdArgs.push_back(TCArgs.MakeArgString(UB));
}
- CmdArgs.push_back(TCArgs.MakeArgString(UB));
CmdArgs.push_back("-unbundle");
CmdArgs.push_back("-allow-missing-bundles");
@@ -8143,6 +8286,58 @@ void OffloadWrapper::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs, Inputs, Output));
}
+void OffloadPackager::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const llvm::opt::ArgList &Args,
+ const char *LinkingOutput) const {
+ ArgStringList CmdArgs;
+
+ // Add the output file name.
+ assert(Output.isFilename() && "Invalid output.");
+ CmdArgs.push_back("-o");
+ CmdArgs.push_back(Output.getFilename());
+
+ // Create the inputs to bundle the needed metadata.
+ for (const InputInfo &Input : Inputs) {
+ const Action *OffloadAction = Input.getAction();
+ const ToolChain *TC = OffloadAction->getOffloadingToolChain();
+ const ArgList &TCArgs =
+ C.getArgsForToolChain(TC, OffloadAction->getOffloadingArch(),
+ OffloadAction->getOffloadingDeviceKind());
+ StringRef File = C.getArgs().MakeArgString(TC->getInputFilename(Input));
+ StringRef Arch = (OffloadAction->getOffloadingArch())
+ ? OffloadAction->getOffloadingArch()
+ : TCArgs.getLastArgValue(options::OPT_march_EQ);
+ StringRef Kind =
+ Action::GetOffloadKindName(OffloadAction->getOffloadingDeviceKind());
+
+ ArgStringList Features;
+ SmallVector<StringRef> FeatureArgs;
+ getTargetFeatures(TC->getDriver(), TC->getTriple(), Args, Features, false);
+ llvm::copy_if(Features, std::back_inserter(FeatureArgs),
+ [](StringRef Arg) { return !Arg.startswith("-target"); });
+
+ SmallVector<std::string> Parts{
+ "file=" + File.str(),
+ "triple=" + TC->getTripleString(),
+ "arch=" + Arch.str(),
+ "kind=" + Kind.str(),
+ };
+
+ if (TC->getDriver().isUsingLTO(/* IsOffload */ true))
+ for (StringRef Feature : FeatureArgs)
+ Parts.emplace_back("feature=" + Feature.str());
+
+ CmdArgs.push_back(Args.MakeArgString("--image=" + llvm::join(Parts, ",")));
+ }
+
+ C.addCommand(std::make_unique<Command>(
+ JA, *this, ResponseFileSupport::None(),
+ Args.MakeArgString(getToolChain().GetProgramPath(getShortName())),
+ CmdArgs, Inputs, Output));
+}
+
void LinkerWrapper::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
const InputInfoList &Inputs,
@@ -8154,58 +8349,44 @@ void LinkerWrapper::ConstructJob(Compilation &C, const JobAction &JA,
ArgStringList CmdArgs;
// Pass the CUDA path to the linker wrapper tool.
- for (auto &I : llvm::make_range(OpenMPTCRange.first, OpenMPTCRange.second)) {
- const ToolChain *TC = I.second;
- if (TC->getTriple().isNVPTX()) {
- CudaInstallationDetector CudaInstallation(D, TheTriple, Args);
- if (CudaInstallation.isValid())
- CmdArgs.push_back(Args.MakeArgString(
- "--cuda-path=" + CudaInstallation.getInstallPath()));
- break;
+ for (Action::OffloadKind Kind : {Action::OFK_Cuda, Action::OFK_OpenMP}) {
+ auto TCRange = C.getOffloadToolChains(Kind);
+ for (auto &I : llvm::make_range(TCRange.first, TCRange.second)) {
+ const ToolChain *TC = I.second;
+ if (TC->getTriple().isNVPTX()) {
+ CudaInstallationDetector CudaInstallation(D, TheTriple, Args);
+ if (CudaInstallation.isValid())
+ CmdArgs.push_back(Args.MakeArgString(
+ "--cuda-path=" + CudaInstallation.getInstallPath()));
+ break;
+ }
}
}
- if (D.isUsingLTO(/* IsOffload */ true)) {
- // Pass in target features for each toolchain.
- for (auto &I :
- llvm::make_range(OpenMPTCRange.first, OpenMPTCRange.second)) {
- const ToolChain *TC = I.second;
- const ArgList &TCArgs = C.getArgsForToolChain(TC, "", Action::OFK_OpenMP);
- ArgStringList FeatureArgs;
- TC->addClangTargetOptions(TCArgs, FeatureArgs, Action::OFK_OpenMP);
- auto FeatureIt = llvm::find(FeatureArgs, "-target-feature");
- CmdArgs.push_back(Args.MakeArgString(
- "-target-feature=" + TC->getTripleString() + "=" + *(FeatureIt + 1)));
- }
+ // Get the AMDGPU math libraries.
+ // FIXME: This method is bad, remove once AMDGPU has a proper math library
+ // (see AMDGCN::OpenMPLinker::constructLLVMLinkCommand).
+ for (auto &I : llvm::make_range(OpenMPTCRange.first, OpenMPTCRange.second)) {
+ const ToolChain *TC = I.second;
- // Pass in the bitcode library to be linked during LTO.
- for (auto &I :
- llvm::make_range(OpenMPTCRange.first, OpenMPTCRange.second)) {
- const ToolChain *TC = I.second;
- const Driver &TCDriver = TC->getDriver();
- const ArgList &TCArgs = C.getArgsForToolChain(TC, "", Action::OFK_OpenMP);
- StringRef Arch = TCArgs.getLastArgValue(options::OPT_march_EQ);
-
- std::string BitcodeSuffix;
- if (TCArgs.hasFlag(options::OPT_fopenmp_target_new_runtime,
- options::OPT_fno_openmp_target_new_runtime, true))
- BitcodeSuffix += "new-";
- if (TC->getTriple().isNVPTX())
- BitcodeSuffix += "nvptx-";
- else if (TC->getTriple().isAMDGPU())
- BitcodeSuffix += "amdgpu-";
- BitcodeSuffix += Arch;
-
- ArgStringList BitcodeLibrary;
- addOpenMPDeviceRTL(TCDriver, TCArgs, BitcodeLibrary, BitcodeSuffix,
- TC->getTriple());
-
- if (!BitcodeLibrary.empty())
- CmdArgs.push_back(
- Args.MakeArgString("-target-library=" + TC->getTripleString() +
- "-" + Arch + "=" + BitcodeLibrary.back()));
- }
+ if (!TC->getTriple().isAMDGPU() || Args.hasArg(options::OPT_nogpulib))
+ continue;
+ const ArgList &TCArgs = C.getArgsForToolChain(TC, "", Action::OFK_OpenMP);
+ StringRef Arch = TCArgs.getLastArgValue(options::OPT_march_EQ);
+ const toolchains::ROCMToolChain RocmTC(TC->getDriver(), TC->getTriple(),
+ TCArgs);
+
+ SmallVector<std::string, 12> BCLibs =
+ RocmTC.getCommonDeviceLibNames(TCArgs, Arch.str());
+
+ for (StringRef LibName : BCLibs)
+ CmdArgs.push_back(Args.MakeArgString(
+ "-target-library=" + Action::GetOffloadKindName(Action::OFK_OpenMP) +
+ "-" + TC->getTripleString() + "-" + Arch + "=" + LibName));
+ }
+
+ if (D.isUsingLTO(/* IsOffload */ true)) {
// Pass in the optimization level to use for LTO.
if (const Arg *A = Args.getLastArg(options::OPT_O_Group)) {
StringRef OOpt;
@@ -8261,6 +8442,20 @@ void LinkerWrapper::ConstructJob(Compilation &C, const JobAction &JA,
Linker->ConstructJob(C, JA, Output, Inputs, Args, LinkingOutput);
const auto &LinkCommand = C.getJobs().getJobs().back();
+ // Forward -Xoffload-linker<-triple> arguments to the device link job.
+ for (auto *Arg : Args.filtered(options::OPT_Xoffload_linker)) {
+ StringRef Val = Arg->getValue(0);
+ if (Val.empty())
+ CmdArgs.push_back(
+ Args.MakeArgString(Twine("-device-linker=") + Arg->getValue(1)));
+ else
+ CmdArgs.push_back(Args.MakeArgString(
+ "-device-linker=" +
+ ToolChain::getOpenMPTriple(Val.drop_front()).getTriple() + "=" +
+ Arg->getValue(1)));
+ }
+ Args.ClaimAllArgs(options::OPT_Xoffload_linker);
+
// Add the linker arguments to be forwarded by the wrapper.
CmdArgs.push_back("-linker-path");
CmdArgs.push_back(LinkCommand->getExecutable());
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.h
index 79407c9884d5..37263efd57a5 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Clang.h
@@ -170,6 +170,19 @@ public:
const char *LinkingOutput) const override;
};
+/// Offload binary tool.
+class LLVM_LIBRARY_VISIBILITY OffloadPackager final : public Tool {
+public:
+ OffloadPackager(const ToolChain &TC)
+ : Tool("Offload::Packager", "clang-offload-packager", TC) {}
+
+ bool hasIntegratedCPP() const override { return false; }
+ void ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output, const InputInfoList &Inputs,
+ const llvm::opt::ArgList &TCArgs,
+ const char *LinkingOutput) const override;
+};
+
/// Linker wrapper tool.
class LLVM_LIBRARY_VISIBILITY LinkerWrapper final : public Tool {
const Tool *Linker;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.cpp
index 8f9244cae8db..2d53b829b01c 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.cpp
@@ -143,28 +143,16 @@ void tools::handleTargetFeaturesGroup(const ArgList &Args,
}
}
-std::vector<StringRef>
-tools::unifyTargetFeatures(const std::vector<StringRef> &Features) {
- std::vector<StringRef> UnifiedFeatures;
- // Find the last of each feature.
- llvm::StringMap<unsigned> LastOpt;
- for (unsigned I = 0, N = Features.size(); I < N; ++I) {
- StringRef Name = Features[I];
- assert(Name[0] == '-' || Name[0] == '+');
- LastOpt[Name.drop_front(1)] = I;
- }
-
- for (unsigned I = 0, N = Features.size(); I < N; ++I) {
- // If this feature was overridden, ignore it.
- StringRef Name = Features[I];
- llvm::StringMap<unsigned>::iterator LastI = LastOpt.find(Name.drop_front(1));
- assert(LastI != LastOpt.end());
- unsigned Last = LastI->second;
- if (Last != I)
- continue;
-
- UnifiedFeatures.push_back(Name);
+SmallVector<StringRef>
+tools::unifyTargetFeatures(ArrayRef<StringRef> Features) {
+ // Only add a feature if it hasn't been seen before starting from the end.
+ SmallVector<StringRef> UnifiedFeatures;
+ llvm::DenseSet<StringRef> UsedFeatures;
+ for (StringRef Feature : llvm::reverse(Features)) {
+ if (UsedFeatures.insert(Feature.drop_front()).second)
+ UnifiedFeatures.insert(UnifiedFeatures.begin(), Feature);
}
+
return UnifiedFeatures;
}
@@ -255,6 +243,10 @@ void tools::AddLinkerInputs(const ToolChain &TC, const InputInfoList &Inputs,
continue;
}
+ // In some error cases, the input could be Nothing; skip those.
+ if (II.isNothing())
+ continue;
+
// Otherwise, this is a linker input argument.
const Arg &A = II.getInputArg();
@@ -424,6 +416,13 @@ std::string tools::getCPUName(const Driver &D, const ArgList &Args,
return TargetCPUName;
}
+ case llvm::Triple::csky:
+ if (const Arg *A = Args.getLastArg(options::OPT_mcpu_EQ))
+ return A->getValue();
+ else if (const Arg *A = Args.getLastArg(options::OPT_march_EQ))
+ return A->getValue();
+ else
+ return "ck810";
case llvm::Triple::riscv32:
case llvm::Triple::riscv64:
if (const Arg *A = Args.getLastArg(options::OPT_mcpu_EQ))
@@ -475,9 +474,9 @@ llvm::StringRef tools::getLTOParallelism(const ArgList &Args, const Driver &D) {
return LtoJobsArg->getValue();
}
-// CloudABI uses -ffunction-sections and -fdata-sections by default.
+// CloudABI and PS4/PS5 use -ffunction-sections and -fdata-sections by default.
bool tools::isUseSeparateSections(const llvm::Triple &Triple) {
- return Triple.getOS() == llvm::Triple::CloudABI;
+ return Triple.getOS() == llvm::Triple::CloudABI || Triple.isPS();
}
void tools::addLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
@@ -548,6 +547,9 @@ void tools::addLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
CmdArgs.push_back(
Args.MakeArgString("-plugin-opt=jobs=" + Twine(Parallelism)));
+ if (!CLANG_ENABLE_OPAQUE_POINTERS_INTERNAL)
+ CmdArgs.push_back(Args.MakeArgString("-plugin-opt=no-opaque-pointers"));
+
// If an explicit debugger tuning argument appeared, pass it along.
if (Arg *A = Args.getLastArg(options::OPT_gTune_Group,
options::OPT_ggdbN_Group)) {
@@ -574,6 +576,13 @@ void tools::addLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
CmdArgs.push_back("-plugin-opt=-data-sections");
}
+ // Pass an option to enable split machine functions.
+ if (auto *A = Args.getLastArg(options::OPT_fsplit_machine_functions,
+ options::OPT_fno_split_machine_functions)) {
+ if (A->getOption().matches(options::OPT_fsplit_machine_functions))
+ CmdArgs.push_back("-plugin-opt=-split-machine-functions");
+ }
+
if (Arg *A = getLastProfileSampleUseArg(Args)) {
StringRef FName = A->getValue();
if (!llvm::sys::fs::exists(FName))
@@ -612,15 +621,6 @@ void tools::addLTOOptions(const ToolChain &ToolChain, const ArgList &Args,
Path));
}
- // Pass an option to enable/disable the new pass manager.
- if (auto *A = Args.getLastArg(options::OPT_flegacy_pass_manager,
- options::OPT_fno_legacy_pass_manager)) {
- if (A->getOption().matches(options::OPT_flegacy_pass_manager))
- CmdArgs.push_back("-plugin-opt=legacy-pass-manager");
- else
- CmdArgs.push_back("-plugin-opt=new-pass-manager");
- }
-
// Setup statistics file output.
SmallString<128> StatsFile = getStatsFileName(Args, Output, Input, D);
if (!StatsFile.empty())
@@ -727,6 +727,9 @@ bool tools::addOpenMPRuntime(ArgStringList &CmdArgs, const ToolChain &TC,
if (IsOffloadingHost)
CmdArgs.push_back("-lomptarget");
+ if (IsOffloadingHost && TC.getDriver().isUsingLTO(/* IsOffload */ true))
+ CmdArgs.push_back("-lomptarget.devicertl");
+
addArchSpecificRPath(TC, Args, CmdArgs);
if (RTKind == Driver::OMPRT_OMP)
@@ -736,6 +739,41 @@ bool tools::addOpenMPRuntime(ArgStringList &CmdArgs, const ToolChain &TC,
return true;
}
+void tools::addFortranRuntimeLibs(const ToolChain &TC,
+ llvm::opt::ArgStringList &CmdArgs) {
+ if (TC.getTriple().isKnownWindowsMSVCEnvironment()) {
+ CmdArgs.push_back("Fortran_main.lib");
+ CmdArgs.push_back("FortranRuntime.lib");
+ CmdArgs.push_back("FortranDecimal.lib");
+ } else {
+ CmdArgs.push_back("-lFortran_main");
+ CmdArgs.push_back("-lFortranRuntime");
+ CmdArgs.push_back("-lFortranDecimal");
+ }
+}
+
+void tools::addFortranRuntimeLibraryPath(const ToolChain &TC,
+ const llvm::opt::ArgList &Args,
+ ArgStringList &CmdArgs) {
+ // NOTE: Generating executables by Flang is considered an "experimental"
+ // feature and hence this is guarded with a command line option.
+ // TODO: Make this work unconditionally once Flang is mature enough.
+ if (!Args.hasArg(options::OPT_flang_experimental_exec))
+ return;
+
+ // Default to the <driver-path>/../lib directory. This works fine on the
+ // platforms that we have tested so far. We will probably have to re-fine
+ // this in the future. In particular, on some platforms, we may need to use
+ // lib64 instead of lib.
+ SmallString<256> DefaultLibPath =
+ llvm::sys::path::parent_path(TC.getDriver().Dir);
+ llvm::sys::path::append(DefaultLibPath, "lib");
+ if (TC.getTriple().isKnownWindowsMSVCEnvironment())
+ CmdArgs.push_back(Args.MakeArgString("-libpath:" + DefaultLibPath));
+ else
+ CmdArgs.push_back(Args.MakeArgString("-L" + DefaultLibPath));
+}
+
static void addSanitizerRuntime(const ToolChain &TC, const ArgList &Args,
ArgStringList &CmdArgs, StringRef Sanitizer,
bool IsShared, bool IsWhole) {
@@ -783,11 +821,6 @@ const char *tools::getAsNeededOption(const ToolChain &TC, bool as_needed) {
void tools::linkSanitizerRuntimeDeps(const ToolChain &TC,
ArgStringList &CmdArgs) {
- // Fuchsia never needs these. Any sanitizer runtimes with system
- // dependencies use the `.deplibs` feature instead.
- if (TC.getTriple().isOSFuchsia())
- return;
-
// Force linking against the system libraries sanitizers depends on
// (see PR15823 why this is necessary).
CmdArgs.push_back(getAsNeededOption(TC, false));
@@ -809,6 +842,12 @@ void tools::linkSanitizerRuntimeDeps(const ToolChain &TC,
TC.getTriple().isOSNetBSD() ||
TC.getTriple().isOSOpenBSD())
CmdArgs.push_back("-lexecinfo");
+ // There is no libresolv on Android, FreeBSD, OpenBSD, etc. On musl
+ // libresolv.a, even if exists, is an empty archive to satisfy POSIX -lresolv
+ // requirement.
+ if (TC.getTriple().isOSLinux() && !TC.getTriple().isAndroid() &&
+ !TC.getTriple().isMusl())
+ CmdArgs.push_back("-lresolv");
}
static void
@@ -850,6 +889,8 @@ collectSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
SharedRuntimes.push_back("hwasan_aliases");
else
SharedRuntimes.push_back("hwasan");
+ if (!Args.hasArg(options::OPT_shared))
+ HelperStaticRuntimes.push_back("hwasan-preinit");
}
}
@@ -1003,6 +1044,19 @@ bool tools::addSanitizerRuntimes(const ToolChain &TC, const ArgList &Args,
if (SanArgs.hasCrossDsoCfi() && !AddExportDynamic)
CmdArgs.push_back("--export-dynamic-symbol=__cfi_check");
+ if (SanArgs.hasMemTag()) {
+ if (!TC.getTriple().isAndroid()) {
+ TC.getDriver().Diag(diag::err_drv_unsupported_opt_for_target)
+ << "-fsanitize=memtag*" << TC.getTriple().str();
+ }
+ CmdArgs.push_back(
+ Args.MakeArgString("--android-memtag-mode=" + SanArgs.getMemtagMode()));
+ if (SanArgs.hasMemtagHeap())
+ CmdArgs.push_back("--android-memtag-heap");
+ if (SanArgs.hasMemtagStack())
+ CmdArgs.push_back("--android-memtag-stack");
+ }
+
return !StaticRuntimes.empty() || !NonWholeStaticRuntimes.empty();
}
@@ -1245,30 +1299,31 @@ tools::ParsePICArgs(const ToolChain &ToolChain, const ArgList &Args) {
O.matches(options::OPT_fPIE) || O.matches(options::OPT_fPIC);
} else {
PIE = PIC = false;
- if (EffectiveTriple.isPS4CPU()) {
+ if (EffectiveTriple.isPS()) {
Arg *ModelArg = Args.getLastArg(options::OPT_mcmodel_EQ);
StringRef Model = ModelArg ? ModelArg->getValue() : "";
if (Model != "kernel") {
PIC = true;
- ToolChain.getDriver().Diag(diag::warn_drv_ps4_force_pic)
- << LastPICArg->getSpelling();
+ ToolChain.getDriver().Diag(diag::warn_drv_ps_force_pic)
+ << LastPICArg->getSpelling()
+ << (EffectiveTriple.isPS4() ? "PS4" : "PS5");
}
}
}
}
}
- // Introduce a Darwin and PS4-specific hack. If the default is PIC, but the
- // PIC level would've been set to level 1, force it back to level 2 PIC
+ // Introduce a Darwin and PS4/PS5-specific hack. If the default is PIC, but
+ // the PIC level would've been set to level 1, force it back to level 2 PIC
// instead.
- if (PIC && (Triple.isOSDarwin() || EffectiveTriple.isPS4CPU()))
+ if (PIC && (Triple.isOSDarwin() || EffectiveTriple.isPS()))
IsPICLevelTwo |= ToolChain.isPICDefault();
// This kernel flags are a trump-card: they will disable PIC/PIE
// generation, independent of the argument order.
if (KernelOrKext &&
((!EffectiveTriple.isiOS() || EffectiveTriple.isOSVersionLT(6)) &&
- !EffectiveTriple.isWatchOS()))
+ !EffectiveTriple.isWatchOS() && !EffectiveTriple.isDriverKit()))
PIC = PIE = false;
if (Arg *A = Args.getLastArg(options::OPT_mdynamic_no_pic)) {
@@ -1423,17 +1478,12 @@ enum class LibGccType { UnspecifiedLibGcc, StaticLibGcc, SharedLibGcc };
static LibGccType getLibGccType(const ToolChain &TC, const Driver &D,
const ArgList &Args) {
if (Args.hasArg(options::OPT_static_libgcc) ||
- Args.hasArg(options::OPT_static) || Args.hasArg(options::OPT_static_pie))
+ Args.hasArg(options::OPT_static) || Args.hasArg(options::OPT_static_pie) ||
+ // The Android NDK only provides libunwind.a, not libunwind.so.
+ TC.getTriple().isAndroid())
return LibGccType::StaticLibGcc;
if (Args.hasArg(options::OPT_shared_libgcc))
return LibGccType::SharedLibGcc;
- // The Android NDK only provides libunwind.a, not libunwind.so.
- if (TC.getTriple().isAndroid())
- return LibGccType::StaticLibGcc;
- // For MinGW, don't imply a shared libgcc here, we only want to return
- // SharedLibGcc if that was explicitly requested.
- if (D.CCCIsCXX() && !TC.getTriple().isOSCygMing())
- return LibGccType::SharedLibGcc;
return LibGccType::UnspecifiedLibGcc;
}
@@ -1461,6 +1511,7 @@ static void AddUnwindLibrary(const ToolChain &TC, const Driver &D,
LibGccType LGT = getLibGccType(TC, D, Args);
bool AsNeeded = LGT == LibGccType::UnspecifiedLibGcc &&
+ (UNW == ToolChain::UNW_CompilerRT || !D.CCCIsCXX()) &&
!TC.getTriple().isAndroid() &&
!TC.getTriple().isOSCygMing() && !TC.getTriple().isOSAIX();
if (AsNeeded)
@@ -1484,15 +1535,15 @@ static void AddUnwindLibrary(const ToolChain &TC, const Driver &D,
CmdArgs.push_back("-lunwind");
} else if (LGT == LibGccType::StaticLibGcc) {
CmdArgs.push_back("-l:libunwind.a");
- } else if (TC.getTriple().isOSCygMing()) {
- if (LGT == LibGccType::SharedLibGcc)
+ } else if (LGT == LibGccType::SharedLibGcc) {
+ if (TC.getTriple().isOSCygMing())
CmdArgs.push_back("-l:libunwind.dll.a");
else
- // Let the linker choose between libunwind.dll.a and libunwind.a
- // depending on what's available, and depending on the -static flag
- CmdArgs.push_back("-lunwind");
+ CmdArgs.push_back("-l:libunwind.so");
} else {
- CmdArgs.push_back("-l:libunwind.so");
+ // Let the linker choose between libunwind.so and libunwind.a
+ // depending on what's available, and depending on the -static flag
+ CmdArgs.push_back("-lunwind");
}
break;
}
@@ -1504,10 +1555,12 @@ static void AddUnwindLibrary(const ToolChain &TC, const Driver &D,
static void AddLibgcc(const ToolChain &TC, const Driver &D,
ArgStringList &CmdArgs, const ArgList &Args) {
LibGccType LGT = getLibGccType(TC, D, Args);
- if (LGT != LibGccType::SharedLibGcc)
+ if (LGT == LibGccType::StaticLibGcc ||
+ (LGT == LibGccType::UnspecifiedLibGcc && !D.CCCIsCXX()))
CmdArgs.push_back("-lgcc");
AddUnwindLibrary(TC, D, CmdArgs, Args);
- if (LGT == LibGccType::SharedLibGcc)
+ if (LGT == LibGccType::SharedLibGcc ||
+ (LGT == LibGccType::UnspecifiedLibGcc && D.CCCIsCXX()))
CmdArgs.push_back("-lgcc");
}
@@ -1740,8 +1793,13 @@ bool tools::GetSDLFromOffloadArchive(
for (auto LPath : LibraryPaths) {
ArchiveOfBundles.clear();
- AOBFileNames.push_back(Twine(LPath + "/libdevice/lib" + Lib + ".a").str());
- AOBFileNames.push_back(Twine(LPath + "/lib" + Lib + ".a").str());
+ llvm::Triple Triple(D.getTargetTriple());
+ bool IsMSVC = Triple.isWindowsMSVCEnvironment();
+ for (auto Prefix : {"/libdevice/", "/"}) {
+ if (IsMSVC)
+ AOBFileNames.push_back(Twine(LPath + Prefix + Lib + ".lib").str());
+ AOBFileNames.push_back(Twine(LPath + Prefix + "lib" + Lib + ".a").str());
+ }
for (auto AOB : AOBFileNames) {
if (llvm::sys::fs::exists(AOB)) {
@@ -1773,9 +1831,9 @@ bool tools::GetSDLFromOffloadArchive(
std::string UnbundleArg("-unbundle");
std::string TypeArg("-type=a");
- std::string InputArg("-inputs=" + ArchiveOfBundles);
+ std::string InputArg("-input=" + ArchiveOfBundles);
std::string OffloadArg("-targets=" + std::string(DeviceTriple));
- std::string OutputArg("-outputs=" + OutputLib);
+ std::string OutputArg("-output=" + OutputLib);
const char *UBProgram = DriverArgs.MakeArgString(
T.getToolChain().GetProgramPath("clang-offload-bundler"));
@@ -1792,6 +1850,12 @@ bool tools::GetSDLFromOffloadArchive(
std::string AdditionalArgs("-allow-missing-bundles");
UBArgs.push_back(C.getArgs().MakeArgString(AdditionalArgs));
+ // Add this flag to treat hip and hipv4 offload kinds as compatible with
+ // openmp offload kind while extracting code objects from a heterogenous
+ // archive library. Vice versa is also considered compatible.
+ std::string HipCompatibleArgs("-hip-openmp-compatible");
+ UBArgs.push_back(C.getArgs().MakeArgString(HipCompatibleArgs));
+
C.addCommand(std::make_unique<Command>(
JA, T, ResponseFileSupport::AtFileCurCP(), UBProgram, UBArgs, Inputs,
InputInfo(&JA, C.getArgs().MakeArgString(OutputLib))));
@@ -1920,7 +1984,7 @@ getAMDGPUCodeObjectArgument(const Driver &D, const llvm::opt::ArgList &Args) {
void tools::checkAMDGPUCodeObjectVersion(const Driver &D,
const llvm::opt::ArgList &Args) {
const unsigned MinCodeObjVer = 2;
- const unsigned MaxCodeObjVer = 4;
+ const unsigned MaxCodeObjVer = 5;
// Emit warnings for legacy options even if they are overridden.
if (Args.hasArg(options::OPT_mno_code_object_v3_legacy))
@@ -2023,11 +2087,12 @@ void tools::addOpenMPDeviceRTL(const Driver &D,
}
OptSpecifier LibomptargetBCPathOpt =
- Triple.isAMDGCN() ? options::OPT_libomptarget_amdgcn_bc_path_EQ
+ Triple.isAMDGCN() ? options::OPT_libomptarget_amdgpu_bc_path_EQ
: options::OPT_libomptarget_nvptx_bc_path_EQ;
- StringRef ArchPrefix = Triple.isAMDGCN() ? "amdgcn" : "nvptx";
- std::string LibOmpTargetName = "libomptarget-" + BitcodeSuffix.str() + ".bc";
+ StringRef ArchPrefix = Triple.isAMDGCN() ? "amdgpu" : "nvptx";
+ std::string LibOmpTargetName =
+ ("libomptarget-" + ArchPrefix + "-" + BitcodeSuffix + ".bc").str();
// First check whether user specifies bc library
if (const Arg *A = DriverArgs.getLastArg(LibomptargetBCPathOpt)) {
@@ -2063,3 +2128,17 @@ void tools::addOpenMPDeviceRTL(const Driver &D,
<< LibOmpTargetName << ArchPrefix;
}
}
+void tools::addHIPRuntimeLibArgs(const ToolChain &TC,
+ const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) {
+ if (Args.hasArg(options::OPT_hip_link) &&
+ !Args.hasArg(options::OPT_nostdlib) &&
+ !Args.hasArg(options::OPT_no_hip_rt)) {
+ TC.AddHIPRuntimeLibArgs(Args, CmdArgs);
+ } else {
+ // Claim "no HIP libraries" arguments if any
+ for (auto Arg : Args.filtered(options::OPT_no_hip_rt)) {
+ Arg->claim();
+ }
+ }
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.h
index 2bba1ee285e6..8e62af70ff7f 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/CommonArgs.h
@@ -120,6 +120,18 @@ bool addOpenMPRuntime(llvm::opt::ArgStringList &CmdArgs, const ToolChain &TC,
bool ForceStaticHostRuntime = false,
bool IsOffloadingHost = false, bool GompNeedsRT = false);
+/// Adds Fortran runtime libraries to \p CmdArgs.
+void addFortranRuntimeLibs(const ToolChain &TC,
+ llvm::opt::ArgStringList &CmdArgs);
+
+/// Adds the path for the Fortran runtime libraries to \p CmdArgs.
+void addFortranRuntimeLibraryPath(const ToolChain &TC,
+ const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs);
+
+void addHIPRuntimeLibArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs);
+
const char *getAsNeededOption(const ToolChain &TC, bool as_needed);
llvm::opt::Arg *getLastProfileUseArg(const llvm::opt::ArgList &Args);
@@ -162,8 +174,7 @@ void handleTargetFeaturesGroup(const llvm::opt::ArgList &Args,
llvm::opt::OptSpecifier Group);
/// If there are multiple +xxx or -xxx features, keep the last one.
-std::vector<StringRef>
-unifyTargetFeatures(const std::vector<StringRef> &Features);
+SmallVector<StringRef> unifyTargetFeatures(ArrayRef<StringRef> Features);
/// Handles the -save-stats option and returns the filename to save statistics
/// to.
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.cpp
index 4a9f6d4c4e3e..5e59677947e6 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.cpp
@@ -447,7 +447,10 @@ void NVPTX::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("--gpu-name");
CmdArgs.push_back(Args.MakeArgString(CudaArchToString(gpu_arch)));
CmdArgs.push_back("--output-file");
- CmdArgs.push_back(Args.MakeArgString(TC.getInputFilename(Output)));
+ const char *OutputFileName = Args.MakeArgString(TC.getInputFilename(Output));
+ if (std::string(OutputFileName) != std::string(Output.getFilename()))
+ C.addTempFile(OutputFileName);
+ CmdArgs.push_back(OutputFileName);
for (const auto& II : Inputs)
CmdArgs.push_back(Args.MakeArgString(II.getFilename()));
@@ -533,8 +536,9 @@ void NVPTX::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const char *Arch = (II.getType() == types::TY_PP_Asm)
? CudaArchToVirtualArchString(gpu_arch)
: gpu_arch_str;
- CmdArgs.push_back(Args.MakeArgString(llvm::Twine("--image=profile=") +
- Arch + ",file=" + II.getFilename()));
+ CmdArgs.push_back(
+ Args.MakeArgString(llvm::Twine("--image=profile=") + Arch +
+ ",file=" + getToolChain().getInputFilename(II)));
}
for (const auto& A : Args.getAllArgValues(options::OPT_Xcuda_fatbinary))
@@ -606,8 +610,8 @@ void NVPTX::OpenMPLinker::ConstructJob(Compilation &C, const JobAction &JA,
if (!II.isFilename())
continue;
- const char *CubinF = C.addTempFile(
- C.getArgs().MakeArgString(getToolChain().getInputFilename(II)));
+ const char *CubinF =
+ C.getArgs().MakeArgString(getToolChain().getInputFilename(II));
CmdArgs.push_back(CubinF);
}
@@ -630,6 +634,45 @@ void NVPTX::OpenMPLinker::ConstructJob(Compilation &C, const JobAction &JA,
Exec, CmdArgs, Inputs, Output));
}
+void NVPTX::getNVPTXTargetFeatures(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args,
+ std::vector<StringRef> &Features) {
+ if (Args.hasArg(options::OPT_cuda_feature_EQ)) {
+ StringRef PtxFeature =
+ Args.getLastArgValue(options::OPT_cuda_feature_EQ, "+ptx42");
+ Features.push_back(Args.MakeArgString(PtxFeature));
+ return;
+ }
+ CudaInstallationDetector CudaInstallation(D, Triple, Args);
+
+ // New CUDA versions often introduce new instructions that are only supported
+ // by new PTX version, so we need to raise PTX level to enable them in NVPTX
+ // back-end.
+ const char *PtxFeature = nullptr;
+ switch (CudaInstallation.version()) {
+#define CASE_CUDA_VERSION(CUDA_VER, PTX_VER) \
+ case CudaVersion::CUDA_##CUDA_VER: \
+ PtxFeature = "+ptx" #PTX_VER; \
+ break;
+ CASE_CUDA_VERSION(115, 75);
+ CASE_CUDA_VERSION(114, 74);
+ CASE_CUDA_VERSION(113, 73);
+ CASE_CUDA_VERSION(112, 72);
+ CASE_CUDA_VERSION(111, 71);
+ CASE_CUDA_VERSION(110, 70);
+ CASE_CUDA_VERSION(102, 65);
+ CASE_CUDA_VERSION(101, 64);
+ CASE_CUDA_VERSION(100, 63);
+ CASE_CUDA_VERSION(92, 61);
+ CASE_CUDA_VERSION(91, 61);
+ CASE_CUDA_VERSION(90, 60);
+#undef CASE_CUDA_VERSION
+ default:
+ PtxFeature = "+ptx42";
+ }
+ Features.push_back(PtxFeature);
+}
+
/// CUDA toolchain. Our assembler is ptxas, and our "linker" is fatbinary,
/// which isn't properly a linker but nonetheless performs the step of stitching
/// together object files from the assembler into a single blob.
@@ -650,9 +693,8 @@ CudaToolChain::CudaToolChain(const Driver &D, const llvm::Triple &Triple,
std::string CudaToolChain::getInputFilename(const InputInfo &Input) const {
// Only object files are changed, for example assembly files keep their .s
- // extensions. CUDA also continues to use .o as they don't use nvlink but
- // fatbinary.
- if (!(OK == Action::OFK_OpenMP && Input.getType() == types::TY_Object))
+ // extensions.
+ if (Input.getType() != types::TY_Object)
return ToolChain::getInputFilename(Input);
// Replace extension for object files with cubin because nvlink relies on
@@ -701,32 +743,6 @@ void CudaToolChain::addClangTargetOptions(
clang::CudaVersion CudaInstallationVersion = CudaInstallation.version();
- // New CUDA versions often introduce new instructions that are only supported
- // by new PTX version, so we need to raise PTX level to enable them in NVPTX
- // back-end.
- const char *PtxFeature = nullptr;
- switch (CudaInstallationVersion) {
-#define CASE_CUDA_VERSION(CUDA_VER, PTX_VER) \
- case CudaVersion::CUDA_##CUDA_VER: \
- PtxFeature = "+ptx" #PTX_VER; \
- break;
- CASE_CUDA_VERSION(115, 75);
- CASE_CUDA_VERSION(114, 74);
- CASE_CUDA_VERSION(113, 73);
- CASE_CUDA_VERSION(112, 72);
- CASE_CUDA_VERSION(111, 71);
- CASE_CUDA_VERSION(110, 70);
- CASE_CUDA_VERSION(102, 65);
- CASE_CUDA_VERSION(101, 64);
- CASE_CUDA_VERSION(100, 63);
- CASE_CUDA_VERSION(92, 61);
- CASE_CUDA_VERSION(91, 61);
- CASE_CUDA_VERSION(90, 60);
-#undef CASE_CUDA_VERSION
- default:
- PtxFeature = "+ptx42";
- }
- CC1Args.append({"-target-feature", PtxFeature});
if (DriverArgs.hasFlag(options::OPT_fcuda_short_ptr,
options::OPT_fno_cuda_short_ptr, false))
CC1Args.append({"-mllvm", "--nvptx-short-ptr"});
@@ -748,14 +764,7 @@ void CudaToolChain::addClangTargetOptions(
if (getDriver().isUsingLTO(/* IsOffload */ true))
return;
- std::string BitcodeSuffix;
- if (DriverArgs.hasFlag(options::OPT_fopenmp_target_new_runtime,
- options::OPT_fno_openmp_target_new_runtime, true))
- BitcodeSuffix = "new-nvptx-" + GpuArch.str();
- else
- BitcodeSuffix = "nvptx-" + GpuArch.str();
-
- addOpenMPDeviceRTL(getDriver(), DriverArgs, CC1Args, BitcodeSuffix,
+ addOpenMPDeviceRTL(getDriver(), DriverArgs, CC1Args, GpuArch.str(),
getTriple());
AddStaticDeviceLibsPostLinking(getDriver(), DriverArgs, CC1Args, "nvptx",
GpuArch, /*isBitCodeSDL=*/true,
@@ -835,10 +844,10 @@ CudaToolChain::TranslateArgs(const llvm::opt::DerivedArgList &Args,
if (!llvm::is_contained(*DAL, A))
DAL->append(A);
- StringRef Arch = DAL->getLastArgValue(options::OPT_march_EQ);
- if (Arch.empty())
+ if (!DAL->hasArg(options::OPT_march_EQ))
DAL->AddJoinedArg(nullptr, Opts.getOption(options::OPT_march_EQ),
- CLANG_OPENMP_NVPTX_DEFAULT_ARCH);
+ !BoundArch.empty() ? BoundArch
+ : CLANG_OPENMP_NVPTX_DEFAULT_ARCH);
return DAL;
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.h
index a7e6e84f4902..809a25227ac4 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Cuda.h
@@ -124,6 +124,10 @@ class LLVM_LIBRARY_VISIBILITY OpenMPLinker : public Tool {
const char *LinkingOutput) const override;
};
+void getNVPTXTargetFeatures(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args,
+ std::vector<StringRef> &Features);
+
} // end namespace NVPTX
} // end namespace tools
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.cpp
index f7da3f187814..0a8a9c6eb6ff 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.cpp
@@ -209,19 +209,18 @@ static bool shouldLinkerNotDedup(bool IsLinkerOnlyAction, const ArgList &Args) {
void darwin::Linker::AddLinkArgs(Compilation &C, const ArgList &Args,
ArgStringList &CmdArgs,
const InputInfoList &Inputs,
- unsigned Version[5], bool LinkerIsLLD) const {
+ VersionTuple Version, bool LinkerIsLLD) const {
const Driver &D = getToolChain().getDriver();
const toolchains::MachO &MachOTC = getMachOToolChain();
// Newer linkers support -demangle. Pass it if supported and not disabled by
// the user.
- if ((Version[0] >= 100 || LinkerIsLLD) &&
+ if ((Version >= VersionTuple(100) || LinkerIsLLD) &&
!Args.hasArg(options::OPT_Z_Xlinker__no_demangle))
CmdArgs.push_back("-demangle");
- // FIXME: Pass most of the flags below that check Version if LinkerIsLLD too.
-
- if (Args.hasArg(options::OPT_rdynamic) && Version[0] >= 137)
+ if (Args.hasArg(options::OPT_rdynamic) &&
+ (Version >= VersionTuple(137) || LinkerIsLLD))
CmdArgs.push_back("-export_dynamic");
// If we are using App Extension restrictions, pass a flag to the linker
@@ -230,7 +229,8 @@ void darwin::Linker::AddLinkArgs(Compilation &C, const ArgList &Args,
options::OPT_fno_application_extension, false))
CmdArgs.push_back("-application_extension");
- if (D.isUsingLTO() && Version[0] >= 116 && NeedsTempPath(Inputs)) {
+ if (D.isUsingLTO() && (Version >= VersionTuple(116) || LinkerIsLLD) &&
+ NeedsTempPath(Inputs)) {
std::string TmpPathName;
if (D.getLTOMode() == LTOK_Full) {
// If we are using full LTO, then automatically create a temporary file
@@ -259,7 +259,7 @@ void darwin::Linker::AddLinkArgs(Compilation &C, const ArgList &Args,
// clang version won't work anyways.
// lld is built at the same revision as clang and statically links in
// LLVM libraries, so it doesn't need libLTO.dylib.
- if (Version[0] >= 133 && !LinkerIsLLD) {
+ if (Version >= VersionTuple(133) && !LinkerIsLLD) {
// Search for libLTO in <InstalledDir>/../lib/libLTO.dylib
StringRef P = llvm::sys::path::parent_path(D.Dir);
SmallString<128> LibLTOPath(P);
@@ -269,8 +269,11 @@ void darwin::Linker::AddLinkArgs(Compilation &C, const ArgList &Args,
CmdArgs.push_back(C.getArgs().MakeArgString(LibLTOPath));
}
- // ld64 version 262 and above run the deduplicate pass by default.
- if (Version[0] >= 262 && shouldLinkerNotDedup(C.getJobs().empty(), Args))
+ // ld64 version 262 and above runs the deduplicate pass by default.
+ // FIXME: lld doesn't dedup by default. Should we pass `--icf=safe`
+ // if `!shouldLinkerNotDedup()` if LinkerIsLLD here?
+ if (Version >= VersionTuple(262) &&
+ shouldLinkerNotDedup(C.getJobs().empty(), Args))
CmdArgs.push_back("-no_deduplicate");
// Derived from the "link" spec.
@@ -342,7 +345,7 @@ void darwin::Linker::AddLinkArgs(Compilation &C, const ArgList &Args,
Args.AddAllArgs(CmdArgs, options::OPT_init);
// Add the deployment target.
- if (Version[0] >= 520 || LinkerIsLLD)
+ if (Version >= VersionTuple(520) || LinkerIsLLD)
MachOTC.addPlatformVersionArgs(Args, CmdArgs);
else
MachOTC.addMinVersionArgs(Args, CmdArgs);
@@ -368,7 +371,9 @@ void darwin::Linker::AddLinkArgs(Compilation &C, const ArgList &Args,
// Check if the toolchain supports bitcode build flow.
if (MachOTC.SupportsEmbeddedBitcode()) {
CmdArgs.push_back("-bitcode_bundle");
- if (C.getDriver().embedBitcodeMarkerOnly() && Version[0] >= 278) {
+ // FIXME: Pass this if LinkerIsLLD too, once it implements this flag.
+ if (C.getDriver().embedBitcodeMarkerOnly() &&
+ Version >= VersionTuple(278)) {
CmdArgs.push_back("-bitcode_process_mode");
CmdArgs.push_back("marker");
}
@@ -548,12 +553,7 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
return;
}
- unsigned Version[5] = {0, 0, 0, 0, 0};
- if (Arg *A = Args.getLastArg(options::OPT_mlinker_version_EQ)) {
- if (!Driver::GetReleaseVersion(A->getValue(), Version))
- getToolChain().getDriver().Diag(diag::err_drv_invalid_version_number)
- << A->getAsString(Args);
- }
+ VersionTuple Version = getMachOToolChain().getLinkerVersion(Args);
bool LinkerIsLLD;
const char *Exec =
@@ -635,6 +635,13 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
InputFileList.push_back(II.getFilename());
}
+ // Additional linker set-up and flags for Fortran. This is required in order
+ // to generate executables.
+ if (getToolChain().getDriver().IsFlangMode()) {
+ addFortranRuntimeLibraryPath(getToolChain(), Args, CmdArgs);
+ addFortranRuntimeLibs(getToolChain(), CmdArgs);
+ }
+
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs))
addOpenMPRuntime(CmdArgs, getToolChain(), Args);
@@ -712,8 +719,28 @@ void darwin::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
}
+ // DriverKit's framework doesn't have the same layout as other frameworks.
+ // Add missing search paths if necessary.
+ if (getToolChain().getTriple().getOS() == llvm::Triple::DriverKit) {
+ if (const Arg *Root = Args.getLastArg(options::OPT_isysroot)) {
+ // ld64 fixed the implicit -F and -L paths in ld64-605.1+.
+ if (Version.getMajor() < 605 ||
+ (Version.getMajor() == 605 && Version.getMinor().value_or(0) < 1)) {
+
+ SmallString<128> L(Root->getValue());
+ llvm::sys::path::append(L, "System", "DriverKit", "usr", "lib");
+ CmdArgs.push_back(Args.MakeArgString(std::string("-L") + L));
+
+ SmallString<128> F(Root->getValue());
+ llvm::sys::path::append(F, "System", "DriverKit");
+ llvm::sys::path::append(F, "System", "Library", "Frameworks");
+ CmdArgs.push_back(Args.MakeArgString(std::string("-F") + F));
+ }
+ }
+ }
+
ResponseFileSupport ResponseSupport;
- if (Version[0] >= 705 || LinkerIsLLD) {
+ if (Version >= VersionTuple(705) || LinkerIsLLD) {
ResponseSupport = ResponseFileSupport::AtFileUTF8();
} else {
// For older versions of the linker, use the legacy filelist method instead.
@@ -869,13 +896,13 @@ types::ID MachO::LookupTypeForExtension(StringRef Ext) const {
bool MachO::HasNativeLLVMSupport() const { return true; }
ToolChain::CXXStdlibType Darwin::GetDefaultCXXStdlibType() const {
- // Default to use libc++ on OS X 10.9+ and iOS 7+.
- if ((isTargetMacOSBased() && !isMacosxVersionLT(10, 9)) ||
- (isTargetIOSBased() && !isIPhoneOSVersionLT(7, 0)) ||
- isTargetWatchOSBased())
- return ToolChain::CST_Libcxx;
+ // Use libstdc++ on old targets (OSX < 10.9 and iOS < 7)
+ if ((isTargetMacOSBased() && isMacosxVersionLT(10, 9)) ||
+ (isTargetIOSBased() && isIPhoneOSVersionLT(7, 0)))
+ return ToolChain::CST_Libstdcxx;
- return ToolChain::CST_Libstdcxx;
+ // On all other targets, use libc++
+ return ToolChain::CST_Libcxx;
}
/// Darwin provides an ARC runtime starting in MacOS X 10.7 and iOS 5.0.
@@ -891,7 +918,7 @@ ObjCRuntime Darwin::getDefaultObjCRuntime(bool isNonFragile) const {
/// Darwin provides a blocks runtime starting in MacOS X 10.6 and iOS 3.2.
bool Darwin::hasBlocksRuntime() const {
- if (isTargetWatchOSBased())
+ if (isTargetWatchOSBased() || isTargetDriverKit())
return true;
else if (isTargetIOSBased())
return !isIPhoneOSVersionLT(3, 2);
@@ -979,6 +1006,27 @@ StringRef MachO::getMachOArchName(const ArgList &Args) const {
}
}
+VersionTuple MachO::getLinkerVersion(const llvm::opt::ArgList &Args) const {
+ if (LinkerVersion) {
+#ifndef NDEBUG
+ VersionTuple NewLinkerVersion;
+ if (Arg *A = Args.getLastArg(options::OPT_mlinker_version_EQ))
+ (void)NewLinkerVersion.tryParse(A->getValue());
+ assert(NewLinkerVersion == LinkerVersion);
+#endif
+ return *LinkerVersion;
+ }
+
+ VersionTuple NewLinkerVersion;
+ if (Arg *A = Args.getLastArg(options::OPT_mlinker_version_EQ))
+ if (NewLinkerVersion.tryParse(A->getValue()))
+ getDriver().Diag(diag::err_drv_invalid_version_number)
+ << A->getAsString(Args);
+
+ LinkerVersion = NewLinkerVersion;
+ return *LinkerVersion;
+}
+
Darwin::~Darwin() {}
MachO::~MachO() {}
@@ -997,6 +1045,8 @@ std::string Darwin::ComputeEffectiveClangTriple(const ArgList &Args,
Str += "watchos";
else if (isTargetTvOSBased())
Str += "tvos";
+ else if (isTargetDriverKit())
+ Str += "driverkit";
else if (isTargetIOSBased() || isTargetMacCatalyst())
Str += "ios";
else
@@ -1152,8 +1202,9 @@ void MachO::AddLinkRuntimeLib(const ArgList &Args, ArgStringList &CmdArgs,
DarwinLibName += getOSLibraryNameSuffix();
DarwinLibName += IsShared ? "_dynamic.dylib" : ".a";
SmallString<128> Dir(getDriver().ResourceDir);
- llvm::sys::path::append(
- Dir, "lib", (Opts & RLO_IsEmbedded) ? "macho_embedded" : "darwin");
+ llvm::sys::path::append(Dir, "lib", "darwin");
+ if (Opts & RLO_IsEmbedded)
+ llvm::sys::path::append(Dir, "macho_embedded");
SmallString<128> P(Dir);
llvm::sys::path::append(P, DarwinLibName);
@@ -1197,6 +1248,8 @@ StringRef Darwin::getPlatformFamily() const {
return "AppleTV";
case DarwinPlatformKind::WatchOS:
return "Watch";
+ case DarwinPlatformKind::DriverKit:
+ return "DriverKit";
}
llvm_unreachable("Unsupported platform");
}
@@ -1228,6 +1281,8 @@ StringRef Darwin::getOSLibraryNameSuffix(bool IgnoreSim) const {
case DarwinPlatformKind::WatchOS:
return TargetEnvironment == NativeEnvironment || IgnoreSim ? "watchos"
: "watchossim";
+ case DarwinPlatformKind::DriverKit:
+ return "driverkit";
}
llvm_unreachable("Unsupported platform");
}
@@ -1289,7 +1344,6 @@ void Darwin::addProfileRTLibs(const ArgList &Args,
addExportedSymbol(CmdArgs, "___llvm_profile_filename");
addExportedSymbol(CmdArgs, "___llvm_profile_raw_version");
}
- addExportedSymbol(CmdArgs, "_lprofDirMode");
}
// Align __llvm_prf_{cnts,data} sections to the maximum expected page
@@ -1385,9 +1439,15 @@ void DarwinClang::AddLinkRuntimeLibArgs(const ArgList &Args,
AddLinkRuntimeLib(Args, CmdArgs, "xray-fdr");
}
+ if (isTargetDriverKit() && !Args.hasArg(options::OPT_nodriverkitlib)) {
+ CmdArgs.push_back("-framework");
+ CmdArgs.push_back("DriverKit");
+ }
+
// Otherwise link libSystem, then the dynamic runtime library, and finally any
// target specific static runtime library.
- CmdArgs.push_back("-lSystem");
+ if (!isTargetDriverKit())
+ CmdArgs.push_back("-lSystem");
// Select the dynamic runtime library and the target specific static library.
if (isTargetIOSBased()) {
@@ -1478,6 +1538,10 @@ struct DarwinPlatform {
/// Returns true if the simulator environment can be inferred from the arch.
bool canInferSimulatorFromArch() const { return InferSimulatorFromArch; }
+ const Optional<llvm::Triple> &getTargetVariantTriple() const {
+ return TargetVariantTriple;
+ }
+
/// Adds the -m<os>-version-min argument to the compiler invocation.
void addOSVersionMinArgument(DerivedArgList &Args, const OptTable &Opts) {
if (Argument)
@@ -1498,6 +1562,9 @@ struct DarwinPlatform {
case DarwinPlatformKind::WatchOS:
Opt = options::OPT_mwatchos_version_min_EQ;
break;
+ case DarwinPlatformKind::DriverKit:
+ // DriverKit always explicitly provides a version in the triple.
+ return;
}
Argument = Args.MakeJoinedArg(nullptr, Opts.getOption(Opt), OSVersion);
Args.append(Argument);
@@ -1540,6 +1607,16 @@ struct DarwinPlatform {
}
}
}
+ // In a zippered build, we could be building for a macOS target that's
+ // lower than the version that's implied by the OS version. In that case
+ // we need to use the minimum version as the native target version.
+ if (TargetVariantTriple) {
+ auto TargetVariantVersion = TargetVariantTriple->getOSVersion();
+ if (TargetVariantVersion.getMajor()) {
+ if (TargetVariantVersion < NativeTargetVersion)
+ NativeTargetVersion = TargetVariantVersion;
+ }
+ }
break;
}
default:
@@ -1549,12 +1626,14 @@ struct DarwinPlatform {
static DarwinPlatform
createFromTarget(const llvm::Triple &TT, StringRef OSVersion, Arg *A,
+ Optional<llvm::Triple> TargetVariantTriple,
const Optional<DarwinSDKInfo> &SDKInfo) {
DarwinPlatform Result(TargetArg, getPlatformFromOS(TT.getOS()), OSVersion,
A);
VersionTuple OsVersion = TT.getOSVersion();
if (OsVersion.getMajor() == 0)
Result.HasOSVersion = false;
+ Result.TargetVariantTriple = TargetVariantTriple;
Result.setEnvironment(TT.getEnvironment(), OsVersion, SDKInfo);
return Result;
}
@@ -1625,6 +1704,8 @@ private:
return DarwinPlatformKind::TvOS;
case llvm::Triple::WatchOS:
return DarwinPlatformKind::WatchOS;
+ case llvm::Triple::DriverKit:
+ return DarwinPlatformKind::DriverKit;
default:
llvm_unreachable("Unable to infer Darwin variant");
}
@@ -1638,6 +1719,7 @@ private:
bool HasOSVersion = true, InferSimulatorFromArch = true;
Arg *Argument;
StringRef EnvVarName;
+ Optional<llvm::Triple> TargetVariantTriple;
};
/// Returns the deployment target that's specified using the -m<os>-version-min
@@ -1693,6 +1775,7 @@ getDeploymentTargetFromEnvironmentVariables(const Driver &TheDriver,
"IPHONEOS_DEPLOYMENT_TARGET",
"TVOS_DEPLOYMENT_TARGET",
"WATCHOS_DEPLOYMENT_TARGET",
+ "DRIVERKIT_DEPLOYMENT_TARGET",
};
static_assert(llvm::array_lengthof(EnvVars) == Darwin::LastDarwinPlatform + 1,
"Missing platform");
@@ -1791,6 +1874,8 @@ inferDeploymentTargetFromSDK(DerivedArgList &Args,
return DarwinPlatform::createFromSDK(
Darwin::TvOS, Version,
/*IsSimulator=*/SDK.startswith("AppleTVSimulator"));
+ else if (SDK.startswith("DriverKit"))
+ return DarwinPlatform::createFromSDK(Darwin::DriverKit, Version);
return None;
};
if (auto Result = CreatePlatformFromSDKName(SDK))
@@ -1827,6 +1912,9 @@ std::string getOSVersion(llvm::Triple::OSType OS, const llvm::Triple &Triple,
case llvm::Triple::WatchOS:
OsVersion = Triple.getWatchOSVersion();
break;
+ case llvm::Triple::DriverKit:
+ OsVersion = Triple.getDriverKitVersion();
+ break;
default:
llvm_unreachable("Unexpected OS type");
break;
@@ -1834,8 +1922,8 @@ std::string getOSVersion(llvm::Triple::OSType OS, const llvm::Triple &Triple,
std::string OSVersion;
llvm::raw_string_ostream(OSVersion)
- << OsVersion.getMajor() << '.' << OsVersion.getMinor().getValueOr(0)
- << '.' << OsVersion.getSubminor().getValueOr(0);
+ << OsVersion.getMajor() << '.' << OsVersion.getMinor().value_or(0) << '.'
+ << OsVersion.getSubminor().value_or(0);
return OSVersion;
}
@@ -1847,23 +1935,15 @@ inferDeploymentTargetFromArch(DerivedArgList &Args, const Darwin &Toolchain,
llvm::Triple::OSType OSTy = llvm::Triple::UnknownOS;
StringRef MachOArchName = Toolchain.getMachOArchName(Args);
- if (MachOArchName == "arm64" || MachOArchName == "arm64e") {
-#if __arm64__
- // A clang running on an Apple Silicon mac defaults
- // to building for mac when building for arm64 rather than
- // defaulting to iOS.
+ if (MachOArchName == "arm64" || MachOArchName == "arm64e")
OSTy = llvm::Triple::MacOSX;
-#else
- OSTy = llvm::Triple::IOS;
-#endif
- } else if (MachOArchName == "armv7" || MachOArchName == "armv7s")
+ else if (MachOArchName == "armv7" || MachOArchName == "armv7s")
OSTy = llvm::Triple::IOS;
else if (MachOArchName == "armv7k" || MachOArchName == "arm64_32")
OSTy = llvm::Triple::WatchOS;
else if (MachOArchName != "armv6m" && MachOArchName != "armv7m" &&
MachOArchName != "armv7em")
OSTy = llvm::Triple::MacOSX;
-
if (OSTy == llvm::Triple::UnknownOS)
return None;
return DarwinPlatform::createFromArch(OSTy,
@@ -1880,8 +1960,34 @@ Optional<DarwinPlatform> getDeploymentTargetFromTargetArg(
Triple.getOS() == llvm::Triple::UnknownOS)
return None;
std::string OSVersion = getOSVersion(Triple.getOS(), Triple, TheDriver);
- return DarwinPlatform::createFromTarget(
- Triple, OSVersion, Args.getLastArg(options::OPT_target), SDKInfo);
+ Optional<llvm::Triple> TargetVariantTriple;
+ for (const Arg *A : Args.filtered(options::OPT_darwin_target_variant)) {
+ llvm::Triple TVT(A->getValue());
+ // Find a matching <arch>-<vendor> target variant triple that can be used.
+ if ((Triple.getArch() == llvm::Triple::aarch64 ||
+ TVT.getArchName() == Triple.getArchName()) &&
+ TVT.getArch() == Triple.getArch() &&
+ TVT.getSubArch() == Triple.getSubArch() &&
+ TVT.getVendor() == Triple.getVendor()) {
+ if (TargetVariantTriple)
+ continue;
+ A->claim();
+ // Accept a -target-variant triple when compiling code that may run on
+ // macOS or Mac Catalust.
+ if ((Triple.isMacOSX() && TVT.getOS() == llvm::Triple::IOS &&
+ TVT.isMacCatalystEnvironment()) ||
+ (TVT.isMacOSX() && Triple.getOS() == llvm::Triple::IOS &&
+ Triple.isMacCatalystEnvironment())) {
+ TargetVariantTriple = TVT;
+ continue;
+ }
+ TheDriver.Diag(diag::err_drv_target_variant_invalid)
+ << A->getSpelling() << A->getValue();
+ }
+ }
+ return DarwinPlatform::createFromTarget(Triple, OSVersion,
+ Args.getLastArg(options::OPT_target),
+ TargetVariantTriple, SDKInfo);
}
/// Returns the deployment target that's specified using the -mtargetos option.
@@ -2103,19 +2209,27 @@ void Darwin::AddDeploymentTarget(DerivedArgList &Args) const {
HadExtra || Major >= 10 || Minor >= 100 || Micro >= 100)
getDriver().Diag(diag::err_drv_invalid_version_number)
<< OSTarget->getAsString(Args, Opts);
+ } else if (Platform == DriverKit) {
+ if (!Driver::GetReleaseVersion(OSTarget->getOSVersion(), Major, Minor,
+ Micro, HadExtra) ||
+ HadExtra || Major < 19 || Major >= 100 || Minor >= 100 || Micro >= 100)
+ getDriver().Diag(diag::err_drv_invalid_version_number)
+ << OSTarget->getAsString(Args, Opts);
} else
llvm_unreachable("unknown kind of Darwin platform");
DarwinEnvironmentKind Environment = OSTarget->getEnvironment();
// Recognize iOS targets with an x86 architecture as the iOS simulator.
if (Environment == NativeEnvironment && Platform != MacOS &&
- OSTarget->canInferSimulatorFromArch() && getTriple().isX86())
+ Platform != DriverKit && OSTarget->canInferSimulatorFromArch() &&
+ getTriple().isX86())
Environment = Simulator;
VersionTuple NativeTargetVersion;
if (Environment == MacCatalyst)
NativeTargetVersion = OSTarget->getNativeTargetVersion();
setTarget(Platform, Environment, Major, Minor, Micro, NativeTargetVersion);
+ TargetVariantTriple = OSTarget->getTargetVariantTriple();
if (const Arg *A = Args.getLastArg(options::OPT_isysroot)) {
StringRef SDK = getSDKName(A->getValue());
@@ -2395,6 +2509,8 @@ void DarwinClang::AddCCKextLibArgs(const ArgList &Args,
llvm::sys::path::append(P, "libclang_rt.cc_kext_tvos.a");
} else if (isTargetIPhoneOS()) {
llvm::sys::path::append(P, "libclang_rt.cc_kext_ios.a");
+ } else if (isTargetDriverKit()) {
+ // DriverKit doesn't want extra runtime support.
} else {
llvm::sys::path::append(P, "libclang_rt.cc_kext.a");
}
@@ -2422,12 +2538,9 @@ DerivedArgList *MachO::TranslateArgs(const DerivedArgList &Args,
if (A->getOption().matches(options::OPT_Xarch__)) {
// Skip this argument unless the architecture matches either the toolchain
// triple arch, or the arch being bound.
- llvm::Triple::ArchType XarchArch =
- tools::darwin::getArchTypeForMachOArchName(A->getValue(0));
- if (!(XarchArch == getArch() ||
- (!BoundArch.empty() &&
- XarchArch ==
- tools::darwin::getArchTypeForMachOArchName(BoundArch))))
+ StringRef XarchArch = A->getValue(0);
+ if (!(XarchArch == getArchName() ||
+ (!BoundArch.empty() && XarchArch == BoundArch)))
continue;
Arg *OriginalArg = A;
@@ -2497,14 +2610,6 @@ DerivedArgList *MachO::TranslateArgs(const DerivedArgList &Args,
DAL->AddFlagArg(
A, Opts.getOption(options::OPT_mno_warn_nonportable_cfstrings));
break;
-
- case options::OPT_fpascal_strings:
- DAL->AddFlagArg(A, Opts.getOption(options::OPT_mpascal_strings));
- break;
-
- case options::OPT_fno_pascal_strings:
- DAL->AddFlagArg(A, Opts.getOption(options::OPT_mno_pascal_strings));
- break;
}
}
@@ -2620,6 +2725,8 @@ bool Darwin::isAlignedAllocationUnavailable() const {
case WatchOS: // Earlier than 4.0.
OS = llvm::Triple::WatchOS;
break;
+ case DriverKit: // Always available.
+ return false;
}
return TargetVersion < alignedAllocMinVersion(OS);
@@ -2635,6 +2742,12 @@ void Darwin::addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
isAlignedAllocationUnavailable())
CC1Args.push_back("-faligned-alloc-unavailable");
+ if (TargetVariantTriple) {
+ CC1Args.push_back("-darwin-target-variant-triple");
+ CC1Args.push_back(
+ DriverArgs.MakeArgString(TargetVariantTriple->getTriple()));
+ }
+
if (SDKInfo) {
/// Pass the SDK version to the compiler when the SDK information is
/// available.
@@ -2656,6 +2769,28 @@ void Darwin::addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
} else {
EmitTargetSDKVersionArg(SDKInfo->getVersion());
}
+
+ /// Pass the target variant SDK version to the compiler when the SDK
+ /// information is available and is required for target variant.
+ if (TargetVariantTriple) {
+ if (isTargetMacCatalyst()) {
+ std::string Arg;
+ llvm::raw_string_ostream OS(Arg);
+ OS << "-darwin-target-variant-sdk-version=" << SDKInfo->getVersion();
+ CC1Args.push_back(DriverArgs.MakeArgString(OS.str()));
+ } else if (const auto *MacOStoMacCatalystMapping =
+ SDKInfo->getVersionMapping(
+ DarwinSDKInfo::OSEnvPair::macOStoMacCatalystPair())) {
+ if (Optional<VersionTuple> SDKVersion = MacOStoMacCatalystMapping->map(
+ SDKInfo->getVersion(), minimumMacCatalystDeploymentTarget(),
+ None)) {
+ std::string Arg;
+ llvm::raw_string_ostream OS(Arg);
+ OS << "-darwin-target-variant-sdk-version=" << *SDKVersion;
+ CC1Args.push_back(DriverArgs.MakeArgString(OS.str()));
+ }
+ }
+ }
}
// Enable compatibility mode for NSItemProviderCompletionHandler in
@@ -2691,7 +2826,7 @@ Darwin::TranslateArgs(const DerivedArgList &Args, StringRef BoundArch,
// FIXME: It would be far better to avoid inserting those -static arguments,
// but we can't check the deployment target in the translation code until
// it is set here.
- if (isTargetWatchOSBased() ||
+ if (isTargetWatchOSBased() || isTargetDriverKit() ||
(isTargetIOSBased() && !isIPhoneOSVersionLT(6, 0))) {
for (ArgList::iterator it = DAL->begin(), ie = DAL->end(); it != ie; ) {
Arg *A = *it;
@@ -2754,6 +2889,12 @@ bool MachO::UseDwarfDebugFlags() const {
return false;
}
+std::string MachO::GetGlobalDebugPathRemapping() const {
+ if (const char *S = ::getenv("RC_DEBUG_PREFIX_MAP"))
+ return S;
+ return {};
+}
+
llvm::ExceptionHandling Darwin::GetExceptionModel(const ArgList &Args) const {
// Darwin uses SjLj exceptions on ARM.
if (getTriple().getArch() != llvm::Triple::arm &&
@@ -2801,6 +2942,8 @@ void Darwin::addMinVersionArgs(const ArgList &Args,
CmdArgs.push_back("-tvos_version_min");
else if (isTargetTvOSSimulator())
CmdArgs.push_back("-tvos_simulator_version_min");
+ else if (isTargetDriverKit())
+ CmdArgs.push_back("-driverkit_version_min");
else if (isTargetIOSSimulator())
CmdArgs.push_back("-ios_simulator_version_min");
else if (isTargetIOSBased())
@@ -2816,6 +2959,25 @@ void Darwin::addMinVersionArgs(const ArgList &Args,
if (!MinTgtVers.empty() && MinTgtVers > TargetVersion)
TargetVersion = MinTgtVers;
CmdArgs.push_back(Args.MakeArgString(TargetVersion.getAsString()));
+ if (TargetVariantTriple) {
+ assert(isTargetMacOSBased() && "unexpected target");
+ VersionTuple VariantTargetVersion;
+ if (TargetVariantTriple->isMacOSX()) {
+ CmdArgs.push_back("-macosx_version_min");
+ TargetVariantTriple->getMacOSXVersion(VariantTargetVersion);
+ } else {
+ assert(TargetVariantTriple->isiOS() &&
+ TargetVariantTriple->isMacCatalystEnvironment() &&
+ "unexpected target variant triple");
+ CmdArgs.push_back("-maccatalyst_version_min");
+ VariantTargetVersion = TargetVariantTriple->getiOSVersion();
+ }
+ VersionTuple MinTgtVers =
+ TargetVariantTriple->getMinimumSupportedOSVersion();
+ if (MinTgtVers.getMajor() && MinTgtVers > VariantTargetVersion)
+ VariantTargetVersion = MinTgtVers;
+ CmdArgs.push_back(Args.MakeArgString(VariantTargetVersion.getAsString()));
+ }
}
static const char *getPlatformName(Darwin::DarwinPlatformKind Platform,
@@ -2831,58 +2993,96 @@ static const char *getPlatformName(Darwin::DarwinPlatformKind Platform,
return "tvos";
case Darwin::WatchOS:
return "watchos";
+ case Darwin::DriverKit:
+ return "driverkit";
}
llvm_unreachable("invalid platform");
}
void Darwin::addPlatformVersionArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const {
- // -platform_version <platform> <target_version> <sdk_version>
- // Both the target and SDK version support only up to 3 components.
- CmdArgs.push_back("-platform_version");
- std::string PlatformName = getPlatformName(TargetPlatform, TargetEnvironment);
- if (TargetEnvironment == Darwin::Simulator)
- PlatformName += "-simulator";
- CmdArgs.push_back(Args.MakeArgString(PlatformName));
- VersionTuple TargetVersion = getTripleTargetVersion().withoutBuild();
- VersionTuple MinTgtVers = getEffectiveTriple().getMinimumSupportedOSVersion();
- if (!MinTgtVers.empty() && MinTgtVers > TargetVersion)
- TargetVersion = MinTgtVers;
- CmdArgs.push_back(Args.MakeArgString(TargetVersion.getAsString()));
+ auto EmitPlatformVersionArg =
+ [&](const VersionTuple &TV, Darwin::DarwinPlatformKind TargetPlatform,
+ Darwin::DarwinEnvironmentKind TargetEnvironment,
+ const llvm::Triple &TT) {
+ // -platform_version <platform> <target_version> <sdk_version>
+ // Both the target and SDK version support only up to 3 components.
+ CmdArgs.push_back("-platform_version");
+ std::string PlatformName =
+ getPlatformName(TargetPlatform, TargetEnvironment);
+ if (TargetEnvironment == Darwin::Simulator)
+ PlatformName += "-simulator";
+ CmdArgs.push_back(Args.MakeArgString(PlatformName));
+ VersionTuple TargetVersion = TV.withoutBuild();
+ if ((TargetPlatform == Darwin::IPhoneOS ||
+ TargetPlatform == Darwin::TvOS) &&
+ getTriple().getArchName() == "arm64e" &&
+ TargetVersion.getMajor() < 14) {
+ // arm64e slice is supported on iOS/tvOS 14+ only.
+ TargetVersion = VersionTuple(14, 0);
+ }
+ VersionTuple MinTgtVers = TT.getMinimumSupportedOSVersion();
+ if (!MinTgtVers.empty() && MinTgtVers > TargetVersion)
+ TargetVersion = MinTgtVers;
+ CmdArgs.push_back(Args.MakeArgString(TargetVersion.getAsString()));
+
+ if (TargetPlatform == IPhoneOS && TargetEnvironment == MacCatalyst) {
+ // Mac Catalyst programs must use the appropriate iOS SDK version
+ // that corresponds to the macOS SDK version used for the compilation.
+ Optional<VersionTuple> iOSSDKVersion;
+ if (SDKInfo) {
+ if (const auto *MacOStoMacCatalystMapping =
+ SDKInfo->getVersionMapping(
+ DarwinSDKInfo::OSEnvPair::macOStoMacCatalystPair())) {
+ iOSSDKVersion = MacOStoMacCatalystMapping->map(
+ SDKInfo->getVersion().withoutBuild(),
+ minimumMacCatalystDeploymentTarget(), None);
+ }
+ }
+ CmdArgs.push_back(Args.MakeArgString(
+ (iOSSDKVersion ? *iOSSDKVersion
+ : minimumMacCatalystDeploymentTarget())
+ .getAsString()));
+ return;
+ }
- if (isTargetMacCatalyst()) {
- // Mac Catalyst programs must use the appropriate iOS SDK version
- // that corresponds to the macOS SDK version used for the compilation.
- Optional<VersionTuple> iOSSDKVersion;
- if (SDKInfo) {
- if (const auto *MacOStoMacCatalystMapping = SDKInfo->getVersionMapping(
- DarwinSDKInfo::OSEnvPair::macOStoMacCatalystPair())) {
- iOSSDKVersion = MacOStoMacCatalystMapping->map(
- SDKInfo->getVersion().withoutBuild(),
- minimumMacCatalystDeploymentTarget(), None);
- }
- }
- CmdArgs.push_back(Args.MakeArgString(
- (iOSSDKVersion ? *iOSSDKVersion : minimumMacCatalystDeploymentTarget())
- .getAsString()));
+ if (SDKInfo) {
+ VersionTuple SDKVersion = SDKInfo->getVersion().withoutBuild();
+ CmdArgs.push_back(Args.MakeArgString(SDKVersion.getAsString()));
+ } else {
+ // Use an SDK version that's matching the deployment target if the SDK
+ // version is missing. This is preferred over an empty SDK version
+ // (0.0.0) as the system's runtime might expect the linked binary to
+ // contain a valid SDK version in order for the binary to work
+ // correctly. It's reasonable to use the deployment target version as
+ // a proxy for the SDK version because older SDKs don't guarantee
+ // support for deployment targets newer than the SDK versions, so that
+ // rules out using some predetermined older SDK version, which leaves
+ // the deployment target version as the only reasonable choice.
+ CmdArgs.push_back(Args.MakeArgString(TargetVersion.getAsString()));
+ }
+ };
+ EmitPlatformVersionArg(getTripleTargetVersion(), TargetPlatform,
+ TargetEnvironment, getEffectiveTriple());
+ if (!TargetVariantTriple)
return;
- }
-
- if (SDKInfo) {
- VersionTuple SDKVersion = SDKInfo->getVersion().withoutBuild();
- CmdArgs.push_back(Args.MakeArgString(SDKVersion.getAsString()));
+ Darwin::DarwinPlatformKind Platform;
+ Darwin::DarwinEnvironmentKind Environment;
+ VersionTuple TargetVariantVersion;
+ if (TargetVariantTriple->isMacOSX()) {
+ TargetVariantTriple->getMacOSXVersion(TargetVariantVersion);
+ Platform = Darwin::MacOS;
+ Environment = Darwin::NativeEnvironment;
} else {
- // Use an SDK version that's matching the deployment target if the SDK
- // version is missing. This is preferred over an empty SDK version (0.0.0)
- // as the system's runtime might expect the linked binary to contain a
- // valid SDK version in order for the binary to work correctly. It's
- // reasonable to use the deployment target version as a proxy for the
- // SDK version because older SDKs don't guarantee support for deployment
- // targets newer than the SDK versions, so that rules out using some
- // predetermined older SDK version, which leaves the deployment target
- // version as the only reasonable choice.
- CmdArgs.push_back(Args.MakeArgString(TargetVersion.getAsString()));
- }
+ assert(TargetVariantTriple->isiOS() &&
+ TargetVariantTriple->isMacCatalystEnvironment() &&
+ "unexpected target variant triple");
+ TargetVariantVersion = TargetVariantTriple->getiOSVersion();
+ Platform = Darwin::IPhoneOS;
+ Environment = Darwin::MacCatalyst;
+ }
+ EmitPlatformVersionArg(TargetVariantVersion, Platform, Environment,
+ *TargetVariantTriple);
}
// Add additional link args for the -dynamiclib option.
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.h
index 5e23047a5512..4535d021262e 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Darwin.h
@@ -63,7 +63,7 @@ class LLVM_LIBRARY_VISIBILITY Linker : public MachOTool {
bool NeedsTempPath(const InputInfoList &Inputs) const;
void AddLinkArgs(Compilation &C, const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs,
- const InputInfoList &Inputs, unsigned Version[5],
+ const InputInfoList &Inputs, VersionTuple Version,
bool LinkerIsLLD) const;
public:
@@ -147,6 +147,9 @@ private:
mutable std::unique_ptr<tools::darwin::Dsymutil> Dsymutil;
mutable std::unique_ptr<tools::darwin::VerifyDebug> VerifyDebug;
+ /// The version of the linker known to be available in the tool chain.
+ mutable Optional<VersionTuple> LinkerVersion;
+
public:
MachO(const Driver &D, const llvm::Triple &Triple,
const llvm::opt::ArgList &Args);
@@ -159,6 +162,10 @@ public:
/// example, Apple treats different ARM variations as distinct architectures.
StringRef getMachOArchName(const llvm::opt::ArgList &Args) const;
+ /// Get the version of the linker known to be available for a particular
+ /// compiler invocation (via the `-mlinker-version=` arg).
+ VersionTuple getLinkerVersion(const llvm::opt::ArgList &Args) const;
+
/// Add the linker arguments to link the ARC runtime library.
virtual void AddLinkARCArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const {}
@@ -260,6 +267,7 @@ public:
bool SupportsProfiling() const override;
bool UseDwarfDebugFlags() const override;
+ std::string GetGlobalDebugPathRemapping() const override;
llvm::ExceptionHandling
GetExceptionModel(const llvm::opt::ArgList &Args) const override {
@@ -291,7 +299,8 @@ public:
IPhoneOS,
TvOS,
WatchOS,
- LastDarwinPlatform = WatchOS
+ DriverKit,
+ LastDarwinPlatform = DriverKit
};
enum DarwinEnvironmentKind {
NativeEnvironment,
@@ -310,6 +319,9 @@ public:
/// The information about the darwin SDK that was used.
mutable Optional<DarwinSDKInfo> SDKInfo;
+ /// The target variant triple that was specified (if any).
+ mutable Optional<llvm::Triple> TargetVariantTriple;
+
CudaInstallationDetector CudaInstallation;
RocmInstallationDetector RocmInstallation;
@@ -338,7 +350,7 @@ public:
bool isKernelStatic() const override {
return (!(isTargetIPhoneOS() && !isIPhoneOSVersionLT(6, 0)) &&
- !isTargetWatchOS());
+ !isTargetWatchOS() && !isTargetDriverKit());
}
void addProfileRTLibs(const llvm::opt::ArgList &Args,
@@ -424,6 +436,11 @@ public:
return TargetPlatform == WatchOS;
}
+ bool isTargetDriverKit() const {
+ assert(TargetInitialized && "Target not initialized!");
+ return TargetPlatform == DriverKit;
+ }
+
bool isTargetMacCatalyst() const {
return TargetPlatform == IPhoneOS && TargetEnvironment == MacCatalyst;
}
@@ -478,6 +495,12 @@ public:
: TargetVersion) < VersionTuple(V0, V1, V2);
}
+ /// Returns the darwin target variant triple, the variant of the deployment
+ /// target for which the code is being compiled.
+ Optional<llvm::Triple> getTargetVariantTriple() const override {
+ return TargetVariantTriple;
+ }
+
protected:
/// Return true if c++17 aligned allocation/deallocation functions are not
/// implemented in the c++ standard library of the deployment target we are
@@ -527,7 +550,7 @@ public:
GetDefaultStackProtectorLevel(bool KernelOrKext) const override {
// Stack protectors default to on for user code on 10.5,
// and for everything in 10.6 and beyond
- if (isTargetIOSBased() || isTargetWatchOSBased())
+ if (isTargetIOSBased() || isTargetWatchOSBased() || isTargetDriverKit())
return LangOptions::SSPOn;
else if (isTargetMacOSBased() && !isMacosxVersionLT(10, 6))
return LangOptions::SSPOn;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Flang.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Flang.cpp
index c169e3d45793..3368f6785718 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Flang.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Flang.cpp
@@ -19,6 +19,14 @@ using namespace clang::driver::tools;
using namespace clang;
using namespace llvm::opt;
+/// Add -x lang to \p CmdArgs for \p Input.
+static void addDashXForInput(const ArgList &Args, const InputInfo &Input,
+ ArgStringList &CmdArgs) {
+ CmdArgs.push_back("-x");
+ // Map the driver type to the frontend type.
+ CmdArgs.push_back(types::getTypeName(Input.getType()));
+}
+
void Flang::AddFortranDialectOptions(const ArgList &Args,
ArgStringList &CmdArgs) const {
Args.AddAllArgs(
@@ -54,21 +62,18 @@ void Flang::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output, const InputInfoList &Inputs,
const ArgList &Args, const char *LinkingOutput) const {
const auto &TC = getToolChain();
- // TODO: Once code-generation is available, this will need to be commented
- // out.
- // const llvm::Triple &Triple = TC.getEffectiveTriple();
- // const std::string &TripleStr = Triple.getTriple();
+ const llvm::Triple &Triple = TC.getEffectiveTriple();
+ const std::string &TripleStr = Triple.getTriple();
+ const Driver &D = TC.getDriver();
ArgStringList CmdArgs;
// Invoke ourselves in -fc1 mode.
CmdArgs.push_back("-fc1");
- // TODO: Once code-generation is available, this will need to be commented
- // out.
// Add the "effective" target triple.
- // CmdArgs.push_back("-triple");
- // CmdArgs.push_back(Args.MakeArgString(TripleStr));
+ CmdArgs.push_back("-triple");
+ CmdArgs.push_back(Args.MakeArgString(TripleStr));
if (isa<PreprocessJobAction>(JA)) {
CmdArgs.push_back("-E");
@@ -104,12 +109,42 @@ void Flang::ConstructJob(Compilation &C, const JobAction &JA,
AddFortranDialectOptions(Args, CmdArgs);
+ // Color diagnostics are parsed by the driver directly from argv and later
+ // re-parsed to construct this job; claim any possible color diagnostic here
+ // to avoid warn_drv_unused_argument.
+ Args.getLastArg(options::OPT_fcolor_diagnostics,
+ options::OPT_fno_color_diagnostics);
+ if (D.getDiags().getDiagnosticOptions().ShowColors)
+ CmdArgs.push_back("-fcolor-diagnostics");
+
// Add other compile options
AddOtherOptions(Args, CmdArgs);
// Forward -Xflang arguments to -fc1
Args.AddAllArgValues(CmdArgs, options::OPT_Xflang);
+ // Forward -mllvm options to the LLVM option parser. In practice, this means
+ // forwarding to `-fc1` as that's where the LLVM parser is run.
+ for (const Arg *A : Args.filtered(options::OPT_mllvm)) {
+ A->claim();
+ A->render(Args, CmdArgs);
+ }
+
+ for (const Arg *A : Args.filtered(options::OPT_mmlir)) {
+ A->claim();
+ A->render(Args, CmdArgs);
+ }
+
+ // Optimization level for CodeGen.
+ if (const Arg *A = Args.getLastArg(options::OPT_O_Group)) {
+ if (A->getOption().matches(options::OPT_O4)) {
+ CmdArgs.push_back("-O3");
+ D.Diag(diag::warn_O4_is_O3);
+ } else {
+ A->render(Args, CmdArgs);
+ }
+ }
+
if (Output.isFilename()) {
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
@@ -118,9 +153,11 @@ void Flang::ConstructJob(Compilation &C, const JobAction &JA,
}
assert(Input.isFilename() && "Invalid input.");
+
+ addDashXForInput(Args, Input, CmdArgs);
+
CmdArgs.push_back(Input.getFilename());
- const auto& D = C.getDriver();
// TODO: Replace flang-new with flang once the new driver replaces the
// throwaway driver
const char *Exec = Args.MakeArgString(D.GetProgramPath("flang-new", TC));
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp
index 05c58a8f43a8..79e3c5cbca5f 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/FreeBSD.cpp
@@ -223,10 +223,12 @@ void freebsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
case llvm::Triple::riscv32:
CmdArgs.push_back("-m");
CmdArgs.push_back("elf32lriscv");
+ CmdArgs.push_back("-X");
break;
case llvm::Triple::riscv64:
CmdArgs.push_back("-m");
CmdArgs.push_back("elf64lriscv");
+ CmdArgs.push_back("-X");
break;
default:
break;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.cpp
index bd1600d060c8..03ff9fe894c8 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.cpp
@@ -37,6 +37,8 @@ void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
static_cast<const toolchains::Fuchsia &>(getToolChain());
const Driver &D = ToolChain.getDriver();
+ const llvm::Triple &Triple = ToolChain.getEffectiveTriple();
+
ArgStringList CmdArgs;
// Silence warning for "clang -g foo.o -o foo"
@@ -84,6 +86,12 @@ void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("--hash-style=gnu");
}
+ if (ToolChain.getArch() == llvm::Triple::aarch64) {
+ std::string CPU = getCPUName(D, Args, Triple);
+ if (CPU.empty() || CPU == "generic" || CPU == "cortex-a53")
+ CmdArgs.push_back("--fix-cortex-a53-843419");
+ }
+
CmdArgs.push_back("--eh-frame-hdr");
if (Args.hasArg(options::OPT_static))
@@ -106,6 +114,9 @@ void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back(Args.MakeArgString(Dyld));
}
+ if (ToolChain.getArch() == llvm::Triple::riscv64)
+ CmdArgs.push_back("-X");
+
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
@@ -127,10 +138,8 @@ void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
D.getLTOMode() == LTOK_Thin);
}
- bool NeedsSanitizerDeps = addSanitizerRuntimes(ToolChain, Args, CmdArgs);
- bool NeedsXRayDeps = addXRayRuntime(ToolChain, Args, CmdArgs);
+ addLinkerCompressDebugSectionsOption(ToolChain, Args, CmdArgs);
AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
- ToolChain.addProfileRTLibs(Args, CmdArgs);
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs,
options::OPT_r)) {
@@ -153,11 +162,14 @@ void fuchsia::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
}
- if (NeedsSanitizerDeps)
- linkSanitizerRuntimeDeps(ToolChain, CmdArgs);
+ // Note that Fuchsia never needs to link in sanitizer runtime deps. Any
+ // sanitizer runtimes with system dependencies use the `.deplibs` feature
+ // instead.
+ addSanitizerRuntimes(ToolChain, Args, CmdArgs);
+
+ addXRayRuntime(ToolChain, Args, CmdArgs);
- if (NeedsXRayDeps)
- linkXRayRuntimeDeps(ToolChain, CmdArgs);
+ ToolChain.addProfileRTLibs(Args, CmdArgs);
AddRunTimeLibs(ToolChain, D, CmdArgs, Args);
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.h
index c0e69df22821..f9f3bbfa9fbf 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Fuchsia.h
@@ -75,24 +75,27 @@ public:
RuntimeLibType
GetRuntimeLibType(const llvm::opt::ArgList &Args) const override;
- CXXStdlibType
- GetCXXStdlibType(const llvm::opt::ArgList &Args) const override;
+ CXXStdlibType GetCXXStdlibType(const llvm::opt::ArgList &Args) const override;
+
+ bool IsAArch64OutlineAtomicsDefault(
+ const llvm::opt::ArgList &Args) const override {
+ return true;
+ }
- void addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args,
- Action::OffloadKind DeviceOffloadKind) const override;
+ void
+ addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadKind) const override;
void
AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
- void
- AddClangCXXStdlibIncludeArgs(const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args) const override;
+ void AddClangCXXStdlibIncludeArgs(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override;
void AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs) const override;
- const char *getDefaultLinker() const override {
- return "ld.lld";
- }
+ const char *getDefaultLinker() const override { return "ld.lld"; }
protected:
Tool *buildLinker() const override;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.cpp
index 7a9570a686f4..f52bb8af5ec9 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Gnu.cpp
@@ -8,6 +8,7 @@
#include "Gnu.h"
#include "Arch/ARM.h"
+#include "Arch/CSKY.h"
#include "Arch/Mips.h"
#include "Arch/PPC.h"
#include "Arch/RISCV.h"
@@ -299,6 +300,8 @@ static const char *getLDMOption(const llvm::Triple &T, const ArgList &Args) {
return "elf_x86_64";
case llvm::Triple::ve:
return "elf64ve";
+ case llvm::Triple::csky:
+ return "cskyelf_linux";
default:
return nullptr;
}
@@ -451,11 +454,6 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("--fix-cortex-a53-843419");
}
- // Android does not allow shared text relocations. Emit a warning if the
- // user's code contains any.
- if (isAndroid)
- CmdArgs.push_back("--warn-shared-textrel");
-
ToolChain.addExtraOpts(CmdArgs);
CmdArgs.push_back("--eh-frame-hdr");
@@ -467,6 +465,8 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
D.Diag(diag::err_target_unknown_triple) << Triple.str();
return;
}
+ if (Triple.isRISCV())
+ CmdArgs.push_back("-X");
if (Args.hasArg(options::OPT_shared))
CmdArgs.push_back("-shared");
@@ -477,7 +477,8 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_rdynamic))
CmdArgs.push_back("-export-dynamic");
- if (!Args.hasArg(options::OPT_shared) && !IsStaticPIE) {
+ if (!Args.hasArg(options::OPT_shared) && !IsStaticPIE &&
+ !Args.hasArg(options::OPT_r)) {
CmdArgs.push_back("-dynamic-linker");
CmdArgs.push_back(Args.MakeArgString(Twine(D.DyldPrefix) +
ToolChain.getDynamicLinker(Args)));
@@ -560,6 +561,9 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
bool NeedsXRayDeps = addXRayRuntime(ToolChain, Args, CmdArgs);
addLinkerCompressDebugSectionsOption(ToolChain, Args, CmdArgs);
AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
+
+ addHIPRuntimeLibArgs(ToolChain, Args, CmdArgs);
+
// The profile runtime also needs access to system libraries.
getToolChain().addProfileRTLibs(Args, CmdArgs);
@@ -577,9 +581,27 @@ void tools::gnutools::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
CmdArgs.push_back("-lm");
}
+
+ // If we are linking for the device all symbols should be bound locally. The
+ // symbols are already protected which makes this redundant. This is only
+ // necessary to work around a problem in bfd.
+ // TODO: Remove this once 'lld' becomes the only linker for offloading.
+ if (JA.isDeviceOffloading(Action::OFK_OpenMP))
+ CmdArgs.push_back("-Bsymbolic");
+
// Silence warnings when linking C code with a C++ '-stdlib' argument.
Args.ClaimAllArgs(options::OPT_stdlib_EQ);
+ // Additional linker set-up and flags for Fortran. This is required in order
+ // to generate executables. As Fortran runtime depends on the C runtime,
+ // these dependencies need to be listed before the C runtime below (i.e.
+ // AddRuntTimeLibs).
+ if (D.IsFlangMode()) {
+ addFortranRuntimeLibraryPath(ToolChain, Args, CmdArgs);
+ addFortranRuntimeLibs(ToolChain, CmdArgs);
+ CmdArgs.push_back("-lm");
+ }
+
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_r)) {
if (!Args.hasArg(options::OPT_nodefaultlibs)) {
if (IsStatic || IsStaticPIE)
@@ -758,6 +780,8 @@ void tools::gnutools::Assembler::ConstructJob(Compilation &C,
StringRef MArchName = riscv::getRISCVArch(Args, getToolChain().getTriple());
CmdArgs.push_back("-march");
CmdArgs.push_back(MArchName.data());
+ if (!Args.hasFlag(options::OPT_mrelax, options::OPT_mno_relax, true))
+ Args.addOptOutFlag(CmdArgs, options::OPT_mrelax, options::OPT_mno_relax);
break;
}
case llvm::Triple::sparc:
@@ -1574,6 +1598,68 @@ static bool findMSP430Multilibs(const Driver &D,
return false;
}
+static void findCSKYMultilibs(const Driver &D, const llvm::Triple &TargetTriple,
+ StringRef Path, const ArgList &Args,
+ DetectedMultilibs &Result) {
+ FilterNonExistent NonExistent(Path, "/crtbegin.o", D.getVFS());
+
+ tools::csky::FloatABI TheFloatABI = tools::csky::getCSKYFloatABI(D, Args);
+ llvm::Optional<llvm::StringRef> Res = tools::csky::getCSKYArchName(D, Args, TargetTriple);
+
+ if (!Res)
+ return;
+ auto ARCHName = *Res;
+
+ Multilib::flags_list Flags;
+ addMultilibFlag(TheFloatABI == tools::csky::FloatABI::Hard, "hard-fp", Flags);
+ addMultilibFlag(TheFloatABI == tools::csky::FloatABI::SoftFP, "soft-fp",
+ Flags);
+ addMultilibFlag(TheFloatABI == tools::csky::FloatABI::Soft, "soft", Flags);
+ addMultilibFlag(ARCHName == "ck801", "march=ck801", Flags);
+ addMultilibFlag(ARCHName == "ck802", "march=ck802", Flags);
+ addMultilibFlag(ARCHName == "ck803", "march=ck803", Flags);
+ addMultilibFlag(ARCHName == "ck804", "march=ck804", Flags);
+ addMultilibFlag(ARCHName == "ck805", "march=ck805", Flags);
+ addMultilibFlag(ARCHName == "ck807", "march=ck807", Flags);
+ addMultilibFlag(ARCHName == "ck810", "march=ck810", Flags);
+ addMultilibFlag(ARCHName == "ck810v", "march=ck810v", Flags);
+ addMultilibFlag(ARCHName == "ck860", "march=ck860", Flags);
+ addMultilibFlag(ARCHName == "ck860v", "march=ck860v", Flags);
+
+ bool isBigEndian = false;
+ if (Arg *A = Args.getLastArg(options::OPT_mlittle_endian,
+ options::OPT_mbig_endian))
+ isBigEndian = !A->getOption().matches(options::OPT_mlittle_endian);
+ addMultilibFlag(isBigEndian, "EB", Flags);
+
+ auto HardFloat = makeMultilib("/hard-fp").flag("+hard-fp");
+ auto SoftFpFloat = makeMultilib("/soft-fp").flag("+soft-fp");
+ auto SoftFloat = makeMultilib("").flag("+soft");
+ auto Arch801 = makeMultilib("/ck801").flag("+march=ck801");
+ auto Arch802 = makeMultilib("/ck802").flag("+march=ck802");
+ auto Arch803 = makeMultilib("/ck803").flag("+march=ck803");
+ // CK804 use the same library as CK803
+ auto Arch804 = makeMultilib("/ck803").flag("+march=ck804");
+ auto Arch805 = makeMultilib("/ck805").flag("+march=ck805");
+ auto Arch807 = makeMultilib("/ck807").flag("+march=ck807");
+ auto Arch810 = makeMultilib("").flag("+march=ck810");
+ auto Arch810v = makeMultilib("/ck810v").flag("+march=ck810v");
+ auto Arch860 = makeMultilib("/ck860").flag("+march=ck860");
+ auto Arch860v = makeMultilib("/ck860v").flag("+march=ck860v");
+ auto BigEndian = makeMultilib("/big").flag("+EB");
+
+ MultilibSet CSKYMultilibs =
+ MultilibSet()
+ .Maybe(BigEndian)
+ .Either({Arch801, Arch802, Arch803, Arch804, Arch805, Arch807,
+ Arch810, Arch810v, Arch860, Arch860v})
+ .Either(HardFloat, SoftFpFloat, SoftFloat)
+ .FilterOut(NonExistent);
+
+ if (CSKYMultilibs.select(Flags, Result.SelectedMultilib))
+ Result.Multilibs = CSKYMultilibs;
+}
+
static void findRISCVBareMetalMultilibs(const Driver &D,
const llvm::Triple &TargetTriple,
StringRef Path, const ArgList &Args,
@@ -1951,7 +2037,7 @@ void Generic_GCC::GCCInstallationDetector::init(
if (!VFS.exists(Prefix))
continue;
for (StringRef Suffix : CandidateLibDirs) {
- const std::string LibDir = Prefix + Suffix.str();
+ const std::string LibDir = concat(Prefix, Suffix);
if (!VFS.exists(LibDir))
continue;
// Maybe filter out <libdir>/gcc and <libdir>/gcc-cross.
@@ -2000,7 +2086,7 @@ void Generic_GCC::GCCInstallationDetector::print(raw_ostream &OS) const {
}
bool Generic_GCC::GCCInstallationDetector::getBiarchSibling(Multilib &M) const {
- if (BiarchSibling.hasValue()) {
+ if (BiarchSibling) {
M = BiarchSibling.getValue();
return true;
}
@@ -2017,7 +2103,7 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
// so we need to find those /usr/gcc/*/lib/gcc libdirs and go with
// /usr/gcc/<version> as a prefix.
- std::string PrefixDir = SysRoot.str() + "/usr/gcc";
+ std::string PrefixDir = concat(SysRoot, "/usr/gcc");
std::error_code EC;
for (llvm::vfs::directory_iterator LI = D.getVFS().dir_begin(PrefixDir, EC),
LE;
@@ -2039,20 +2125,39 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
return;
}
- // Non-Solaris is much simpler - most systems just go with "/usr".
- if (SysRoot.empty() && TargetTriple.getOS() == llvm::Triple::Linux) {
- // Yet, still look for RHEL/CentOS devtoolsets and gcc-toolsets.
- Prefixes.push_back("/opt/rh/gcc-toolset-10/root/usr");
- Prefixes.push_back("/opt/rh/devtoolset-10/root/usr");
- Prefixes.push_back("/opt/rh/devtoolset-9/root/usr");
- Prefixes.push_back("/opt/rh/devtoolset-8/root/usr");
- Prefixes.push_back("/opt/rh/devtoolset-7/root/usr");
- Prefixes.push_back("/opt/rh/devtoolset-6/root/usr");
- Prefixes.push_back("/opt/rh/devtoolset-4/root/usr");
- Prefixes.push_back("/opt/rh/devtoolset-3/root/usr");
- Prefixes.push_back("/opt/rh/devtoolset-2/root/usr");
+ // For Linux, if --sysroot is not specified, look for RHEL/CentOS devtoolsets
+ // and gcc-toolsets.
+ if (SysRoot.empty() && TargetTriple.getOS() == llvm::Triple::Linux &&
+ D.getVFS().exists("/opt/rh")) {
+ // Find the directory in /opt/rh/ starting with gcc-toolset-* or
+ // devtoolset-* with the highest version number and add that
+ // one to our prefixes.
+ std::string ChosenToolsetDir;
+ unsigned ChosenToolsetVersion = 0;
+ std::error_code EC;
+ for (llvm::vfs::directory_iterator LI = D.getVFS().dir_begin("/opt/rh", EC),
+ LE;
+ !EC && LI != LE; LI = LI.increment(EC)) {
+ StringRef ToolsetDir = llvm::sys::path::filename(LI->path());
+ unsigned ToolsetVersion;
+ if ((!ToolsetDir.startswith("gcc-toolset-") &&
+ !ToolsetDir.startswith("devtoolset-")) ||
+ ToolsetDir.substr(ToolsetDir.rfind('-') + 1)
+ .getAsInteger(10, ToolsetVersion))
+ continue;
+
+ if (ToolsetVersion > ChosenToolsetVersion) {
+ ChosenToolsetVersion = ToolsetVersion;
+ ChosenToolsetDir = "/opt/rh/" + ToolsetDir.str();
+ }
+ }
+
+ if (ChosenToolsetVersion > 0)
+ Prefixes.push_back(ChosenToolsetDir + "/root/usr");
}
- Prefixes.push_back(SysRoot.str() + "/usr");
+
+ // Fall back to /usr which is used by most non-Solaris systems.
+ Prefixes.push_back(concat(SysRoot, "/usr"));
}
/*static*/ void Generic_GCC::GCCInstallationDetector::CollectLibDirsAndTriples(
@@ -2092,6 +2197,10 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
static const char *const AVRLibDirs[] = {"/lib"};
static const char *const AVRTriples[] = {"avr"};
+ static const char *const CSKYLibDirs[] = {"/lib"};
+ static const char *const CSKYTriples[] = {
+ "csky-linux-gnuabiv2", "csky-linux-uclibcabiv2", "csky-elf-noneabiv2"};
+
static const char *const X86_64LibDirs[] = {"/lib64", "/lib"};
static const char *const X86_64Triples[] = {
"x86_64-linux-gnu", "x86_64-unknown-linux-gnu",
@@ -2326,6 +2435,10 @@ void Generic_GCC::GCCInstallationDetector::AddDefaultGCCPrefixes(
LibDirs.append(begin(AVRLibDirs), end(AVRLibDirs));
TripleAliases.append(begin(AVRTriples), end(AVRTriples));
break;
+ case llvm::Triple::csky:
+ LibDirs.append(begin(CSKYLibDirs), end(CSKYLibDirs));
+ TripleAliases.append(begin(CSKYTriples), end(CSKYTriples));
+ break;
case llvm::Triple::x86_64:
if (TargetTriple.isX32()) {
LibDirs.append(begin(X32LibDirs), end(X32LibDirs));
@@ -2475,6 +2588,8 @@ bool Generic_GCC::GCCInstallationDetector::ScanGCCForMultilibs(
if (isArmOrThumbArch(TargetArch) && TargetTriple.isAndroid()) {
// It should also work without multilibs in a simplified toolchain.
findAndroidArmMultilibs(D, TargetTriple, Path, Args, Detected);
+ } else if (TargetTriple.isCSKY()) {
+ findCSKYMultilibs(D, TargetTriple, Path, Args, Detected);
} else if (TargetTriple.isMIPS()) {
if (!findMIPSMultilibs(D, TargetTriple, Path, Args, Detected))
return false;
@@ -2565,7 +2680,7 @@ bool Generic_GCC::GCCInstallationDetector::ScanGentooConfigs(
const llvm::Triple &TargetTriple, const ArgList &Args,
const SmallVectorImpl<StringRef> &CandidateTriples,
const SmallVectorImpl<StringRef> &CandidateBiarchTriples) {
- if (!D.getVFS().exists(D.SysRoot + GentooConfigDir))
+ if (!D.getVFS().exists(concat(D.SysRoot, GentooConfigDir)))
return false;
for (StringRef CandidateTriple : CandidateTriples) {
@@ -2584,8 +2699,8 @@ bool Generic_GCC::GCCInstallationDetector::ScanGentooGccConfig(
const llvm::Triple &TargetTriple, const ArgList &Args,
StringRef CandidateTriple, bool NeedsBiarchSuffix) {
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> File =
- D.getVFS().getBufferForFile(D.SysRoot + GentooConfigDir + "/config-" +
- CandidateTriple.str());
+ D.getVFS().getBufferForFile(concat(D.SysRoot, GentooConfigDir,
+ "/config-" + CandidateTriple.str()));
if (File) {
SmallVector<StringRef, 2> Lines;
File.get()->getBuffer().split(Lines, "\n");
@@ -2596,8 +2711,8 @@ bool Generic_GCC::GCCInstallationDetector::ScanGentooGccConfig(
continue;
// Process the config file pointed to by CURRENT.
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> ConfigFile =
- D.getVFS().getBufferForFile(D.SysRoot + GentooConfigDir + "/" +
- Line.str());
+ D.getVFS().getBufferForFile(
+ concat(D.SysRoot, GentooConfigDir, "/" + Line));
std::pair<StringRef, StringRef> ActiveVersion = Line.rsplit('-');
// List of paths to scan for libraries.
SmallVector<StringRef, 4> GentooScanPaths;
@@ -2630,7 +2745,7 @@ bool Generic_GCC::GCCInstallationDetector::ScanGentooGccConfig(
// Scan all paths for GCC libraries.
for (const auto &GentooScanPath : GentooScanPaths) {
- std::string GentooPath = D.SysRoot + std::string(GentooScanPath);
+ std::string GentooPath = concat(D.SysRoot, GentooScanPath);
if (D.getVFS().exists(GentooPath + "/crtbegin.o")) {
if (!ScanGCCForMultilibs(TargetTriple, Args, GentooPath,
NeedsBiarchSuffix))
@@ -2726,8 +2841,6 @@ bool Generic_GCC::isPICDefaultForced() const {
bool Generic_GCC::IsIntegratedAssemblerDefault() const {
switch (getTriple().getArch()) {
- case llvm::Triple::x86:
- case llvm::Triple::x86_64:
case llvm::Triple::aarch64:
case llvm::Triple::aarch64_be:
case llvm::Triple::arm:
@@ -2735,29 +2848,31 @@ bool Generic_GCC::IsIntegratedAssemblerDefault() const {
case llvm::Triple::avr:
case llvm::Triple::bpfel:
case llvm::Triple::bpfeb:
- case llvm::Triple::thumb:
- case llvm::Triple::thumbeb:
+ case llvm::Triple::csky:
+ case llvm::Triple::hexagon:
+ case llvm::Triple::lanai:
+ case llvm::Triple::m68k:
+ case llvm::Triple::mips:
+ case llvm::Triple::mipsel:
+ case llvm::Triple::mips64:
+ case llvm::Triple::mips64el:
+ case llvm::Triple::msp430:
case llvm::Triple::ppc:
case llvm::Triple::ppcle:
case llvm::Triple::ppc64:
case llvm::Triple::ppc64le:
case llvm::Triple::riscv32:
case llvm::Triple::riscv64:
- case llvm::Triple::systemz:
- case llvm::Triple::mips:
- case llvm::Triple::mipsel:
- case llvm::Triple::mips64:
- case llvm::Triple::mips64el:
- case llvm::Triple::msp430:
- case llvm::Triple::m68k:
- return true;
case llvm::Triple::sparc:
case llvm::Triple::sparcel:
case llvm::Triple::sparcv9:
- if (getTriple().isOSFreeBSD() || getTriple().isOSOpenBSD() ||
- getTriple().isOSSolaris())
- return true;
- return false;
+ case llvm::Triple::systemz:
+ case llvm::Triple::thumb:
+ case llvm::Triple::thumbeb:
+ case llvm::Triple::ve:
+ case llvm::Triple::x86:
+ case llvm::Triple::x86_64:
+ return true;
default:
return false;
}
@@ -2800,6 +2915,11 @@ void Generic_GCC::AddMultilibPaths(const Driver &D,
D, GCCInstallation.getInstallPath() + SelectedMultilib.gccSuffix(),
Paths);
+ // Add lib/gcc/$triple/$libdir
+ // For GCC built with --enable-version-specific-runtime-libs.
+ addPathIfExists(D, GCCInstallation.getInstallPath() + "/../" + OSLibDir,
+ Paths);
+
// GCC cross compiling toolchains will install target libraries which ship
// as part of the toolchain under <prefix>/<triple>/<libdir> rather than as
// any part of the GCC installation in
@@ -2918,9 +3038,9 @@ Generic_GCC::addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
// If this is a development, non-installed, clang, libcxx will
// not be found at ../include/c++ but it likely to be found at
// one of the following two locations:
- if (AddIncludePath(SysRoot + "/usr/local/include"))
+ if (AddIncludePath(concat(SysRoot, "/usr/local/include")))
return;
- if (AddIncludePath(SysRoot + "/usr/include"))
+ if (AddIncludePath(concat(SysRoot, "/usr/include")))
return;
}
@@ -2978,6 +3098,15 @@ bool Generic_GCC::addGCCLibStdCxxIncludePaths(
TripleStr, Multilib.includeSuffix(), DriverArgs, CC1Args))
return true;
+ // Try /gcc/$triple/$version/include/c++/ (gcc --print-multiarch is not
+ // empty). Like above but for GCC built with
+ // --enable-version-specific-runtime-libs.
+ if (addLibStdCXXIncludePaths(LibDir.str() + "/gcc/" + TripleStr + "/" +
+ Version.Text + "/include/c++/",
+ TripleStr, Multilib.includeSuffix(), DriverArgs,
+ CC1Args))
+ return true;
+
// Detect Debian g++-multiarch-incdir.diff.
if (addLibStdCXXIncludePaths(LibDir.str() + "/../include/c++/" + Version.Text,
DebianMultiarch, Multilib.includeSuffix(),
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPAMD.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPAMD.cpp
index 6d553791b394..b31077c1fd3b 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPAMD.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPAMD.cpp
@@ -47,7 +47,7 @@ static bool shouldSkipSanitizeOption(const ToolChain &TC,
return false;
if (!DriverArgs.hasFlag(options::OPT_fgpu_sanitize,
- -options::OPT_fno_gpu_sanitize))
+ options::OPT_fno_gpu_sanitize, true))
return true;
auto &Diags = TC.getDriver().getDiags();
@@ -72,6 +72,36 @@ static bool shouldSkipSanitizeOption(const ToolChain &TC,
return false;
}
+void AMDGCN::Linker::constructLlvmLinkCommand(Compilation &C,
+ const JobAction &JA,
+ const InputInfoList &Inputs,
+ const InputInfo &Output,
+ const llvm::opt::ArgList &Args) const {
+ // Construct llvm-link command.
+ // The output from llvm-link is a bitcode file.
+ ArgStringList LlvmLinkArgs;
+
+ assert(!Inputs.empty() && "Must have at least one input.");
+
+ LlvmLinkArgs.append({"-o", Output.getFilename()});
+ for (auto Input : Inputs)
+ LlvmLinkArgs.push_back(Input.getFilename());
+
+ // Look for archive of bundled bitcode in arguments, and add temporary files
+ // for the extracted archive of bitcode to inputs.
+ auto TargetID = Args.getLastArgValue(options::OPT_mcpu_EQ);
+ AddStaticDeviceLibsLinking(C, *this, JA, Inputs, Args, LlvmLinkArgs, "amdgcn",
+ TargetID,
+ /*IsBitCodeSDL=*/true,
+ /*PostClangLink=*/false);
+
+ const char *LlvmLink =
+ Args.MakeArgString(getToolChain().GetProgramPath("llvm-link"));
+ C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
+ LlvmLink, LlvmLinkArgs, Inputs,
+ Output));
+}
+
void AMDGCN::Linker::constructLldCommand(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs,
const InputInfo &Output,
@@ -117,17 +147,29 @@ void AMDGCN::Linker::constructLldCommand(Compilation &C, const JobAction &JA,
addLinkerCompressDebugSectionsOption(TC, Args, LldArgs);
+ for (auto *Arg : Args.filtered(options::OPT_Xoffload_linker))
+ LldArgs.push_back(Arg->getValue(1));
+
LldArgs.append({"-o", Output.getFilename()});
for (auto Input : Inputs)
LldArgs.push_back(Input.getFilename());
+ // Look for archive of bundled bitcode in arguments, and add temporary files
+ // for the extracted archive of bitcode to inputs.
+ auto TargetID = Args.getLastArgValue(options::OPT_mcpu_EQ);
+ AddStaticDeviceLibsLinking(C, *this, JA, Inputs, Args, LldArgs, "amdgcn",
+ TargetID,
+ /*IsBitCodeSDL=*/true,
+ /*PostClangLink=*/false);
+
const char *Lld = Args.MakeArgString(getToolChain().GetProgramPath("lld"));
C.addCommand(std::make_unique<Command>(JA, *this, ResponseFileSupport::None(),
Lld, LldArgs, Inputs, Output));
}
// For amdgcn the inputs of the linker job are device bitcode and output is
-// object file. It calls llvm-link, opt, llc, then lld steps.
+// either an object file or bitcode (-emit-llvm). It calls llvm-link, opt,
+// llc, then lld steps.
void AMDGCN::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
const InputInfoList &Inputs,
@@ -143,6 +185,9 @@ void AMDGCN::Linker::ConstructJob(Compilation &C, const JobAction &JA,
return HIP::constructHIPFatbinCommand(C, JA, Output.getFilename(), Inputs,
Args, *this);
+ if (JA.getType() == types::TY_LLVM_BC)
+ return constructLlvmLinkCommand(C, JA, Inputs, Output, Args);
+
return constructLldCommand(C, JA, Inputs, Output, Args);
}
@@ -154,6 +199,9 @@ HIPAMDToolChain::HIPAMDToolChain(const Driver &D, const llvm::Triple &Triple,
getProgramPaths().push_back(getDriver().Dir);
// Diagnose unsupported sanitizer options only once.
+ if (!Args.hasFlag(options::OPT_fgpu_sanitize, options::OPT_fno_gpu_sanitize,
+ true))
+ return;
for (auto A : Args.filtered(options::OPT_fsanitize_EQ)) {
SanitizerMask K = parseSanitizerValue(A->getValue(), /*AllowGroups=*/false);
if (K != SanitizerKind::Address)
@@ -198,11 +246,11 @@ void HIPAMDToolChain::addClangTargetOptions(
CC1Args.push_back("-fapply-global-visibility-to-externs");
}
- llvm::for_each(getHIPDeviceLibs(DriverArgs), [&](auto BCFile) {
+ for (auto BCFile : getHIPDeviceLibs(DriverArgs)) {
CC1Args.push_back(BCFile.ShouldInternalize ? "-mlink-builtin-bitcode"
: "-mlink-bitcode-file");
CC1Args.push_back(DriverArgs.MakeArgString(BCFile.Path));
- });
+ }
}
llvm::opt::DerivedArgList *
@@ -322,7 +370,7 @@ HIPAMDToolChain::getHIPDeviceLibs(const llvm::opt::ArgList &DriverArgs) const {
// If --hip-device-lib is not set, add the default bitcode libraries.
if (DriverArgs.hasFlag(options::OPT_fgpu_sanitize,
- options::OPT_fno_gpu_sanitize) &&
+ options::OPT_fno_gpu_sanitize, true) &&
getSanitizerArgs(DriverArgs).needsAsanRt()) {
auto AsanRTL = RocmInstallation.getAsanRTLPath();
if (AsanRTL.empty()) {
@@ -363,6 +411,6 @@ void HIPAMDToolChain::checkTargetID(
auto PTID = getParsedTargetID(DriverArgs);
if (PTID.OptionalTargetID && !PTID.OptionalGPUArch) {
getDriver().Diag(clang::diag::err_drv_bad_target_id)
- << PTID.OptionalTargetID.getValue();
+ << *PTID.OptionalTargetID;
}
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPAMD.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPAMD.h
index cc472a595db9..25d4a998e500 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPAMD.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPAMD.h
@@ -36,6 +36,10 @@ private:
void constructLldCommand(Compilation &C, const JobAction &JA,
const InputInfoList &Inputs, const InputInfo &Output,
const llvm::opt::ArgList &Args) const;
+ void constructLlvmLinkCommand(Compilation &C, const JobAction &JA,
+ const InputInfoList &Inputs,
+ const InputInfo &Output,
+ const llvm::opt::ArgList &Args) const;
};
} // end namespace AMDGCN
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPUtility.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPUtility.cpp
index 1b04a20bacbf..6f8c563c22aa 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPUtility.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/HIPUtility.cpp
@@ -53,8 +53,6 @@ void HIP::constructHIPFatbinCommand(Compilation &C, const JobAction &JA,
// ToDo: Remove the dummy host binary entry which is required by
// clang-offload-bundler.
std::string BundlerTargetArg = "-targets=host-x86_64-unknown-linux";
- std::string BundlerInputArg = "-inputs=" NULL_FILE;
-
// AMDGCN:
// For code object version 2 and 3, the offload kind in bundle ID is 'hip'
// for backward compatibility. For code object version 4 and greater, the
@@ -70,14 +68,20 @@ void HIP::constructHIPFatbinCommand(Compilation &C, const JobAction &JA,
"," + OffloadKind + "-" + normalizeForBundler(TT, !ArchStr.empty());
if (!ArchStr.empty())
BundlerTargetArg += "-" + ArchStr.str();
- BundlerInputArg = BundlerInputArg + "," + II.getFilename();
}
BundlerArgs.push_back(Args.MakeArgString(BundlerTargetArg));
+
+ // Use a NULL file as input for the dummy host binary entry
+ std::string BundlerInputArg = "-input=" NULL_FILE;
BundlerArgs.push_back(Args.MakeArgString(BundlerInputArg));
+ for (const auto &II : Inputs) {
+ BundlerInputArg = std::string("-input=") + II.getFilename();
+ BundlerArgs.push_back(Args.MakeArgString(BundlerInputArg));
+ }
std::string Output = std::string(OutputFileName);
auto *BundlerOutputArg =
- Args.MakeArgString(std::string("-outputs=").append(Output));
+ Args.MakeArgString(std::string("-output=").append(Output));
BundlerArgs.push_back(BundlerOutputArg);
const char *Bundler = Args.MakeArgString(
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/HLSL.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/HLSL.cpp
new file mode 100644
index 000000000000..584e00bb7f05
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/HLSL.cpp
@@ -0,0 +1,183 @@
+//===--- HLSL.cpp - HLSL ToolChain Implementations --------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "HLSL.h"
+#include "CommonArgs.h"
+#include "clang/Driver/DriverDiagnostic.h"
+#include "llvm/ADT/StringSwitch.h"
+#include "llvm/ADT/Triple.h"
+
+using namespace clang::driver;
+using namespace clang::driver::tools;
+using namespace clang::driver::toolchains;
+using namespace clang;
+using namespace llvm::opt;
+using namespace llvm;
+
+namespace {
+
+const unsigned OfflineLibMinor = 0xF;
+
+bool isLegalShaderModel(Triple &T) {
+ if (T.getOS() != Triple::OSType::ShaderModel)
+ return false;
+
+ auto Version = T.getOSVersion();
+ if (Version.getBuild())
+ return false;
+ if (Version.getSubminor())
+ return false;
+
+ auto Kind = T.getEnvironment();
+
+ switch (Kind) {
+ default:
+ return false;
+ case Triple::EnvironmentType::Vertex:
+ case Triple::EnvironmentType::Hull:
+ case Triple::EnvironmentType::Domain:
+ case Triple::EnvironmentType::Geometry:
+ case Triple::EnvironmentType::Pixel:
+ case Triple::EnvironmentType::Compute: {
+ VersionTuple MinVer(4, 0);
+ return MinVer <= Version;
+ } break;
+ case Triple::EnvironmentType::Library: {
+ VersionTuple SM6x(6, OfflineLibMinor);
+ if (Version == SM6x)
+ return true;
+
+ VersionTuple MinVer(6, 3);
+ return MinVer <= Version;
+ } break;
+ case Triple::EnvironmentType::Amplification:
+ case Triple::EnvironmentType::Mesh: {
+ VersionTuple MinVer(6, 5);
+ return MinVer <= Version;
+ } break;
+ }
+ return false;
+}
+
+llvm::Optional<std::string> tryParseProfile(StringRef Profile) {
+ // [ps|vs|gs|hs|ds|cs|ms|as]_[major]_[minor]
+ SmallVector<StringRef, 3> Parts;
+ Profile.split(Parts, "_");
+ if (Parts.size() != 3)
+ return NoneType();
+
+ Triple::EnvironmentType Kind =
+ StringSwitch<Triple::EnvironmentType>(Parts[0])
+ .Case("ps", Triple::EnvironmentType::Pixel)
+ .Case("vs", Triple::EnvironmentType::Vertex)
+ .Case("gs", Triple::EnvironmentType::Geometry)
+ .Case("hs", Triple::EnvironmentType::Hull)
+ .Case("ds", Triple::EnvironmentType::Domain)
+ .Case("cs", Triple::EnvironmentType::Compute)
+ .Case("lib", Triple::EnvironmentType::Library)
+ .Case("ms", Triple::EnvironmentType::Mesh)
+ .Case("as", Triple::EnvironmentType::Amplification)
+ .Default(Triple::EnvironmentType::UnknownEnvironment);
+ if (Kind == Triple::EnvironmentType::UnknownEnvironment)
+ return NoneType();
+
+ unsigned long long Major = 0;
+ if (llvm::getAsUnsignedInteger(Parts[1], 0, Major))
+ return NoneType();
+
+ unsigned long long Minor = 0;
+ if (Parts[2] == "x" && Kind == Triple::EnvironmentType::Library)
+ Minor = OfflineLibMinor;
+ else if (llvm::getAsUnsignedInteger(Parts[2], 0, Minor))
+ return NoneType();
+
+ // dxil-unknown-shadermodel-hull
+ llvm::Triple T;
+ T.setArch(Triple::ArchType::dxil);
+ T.setOSName(Triple::getOSTypeName(Triple::OSType::ShaderModel).str() +
+ VersionTuple(Major, Minor).getAsString());
+ T.setEnvironment(Kind);
+ if (isLegalShaderModel(T))
+ return T.getTriple();
+ else
+ return NoneType();
+}
+
+bool isLegalValidatorVersion(StringRef ValVersionStr, const Driver &D) {
+ VersionTuple Version;
+ if (Version.tryParse(ValVersionStr) || Version.getBuild() ||
+ Version.getSubminor() || !Version.getMinor()) {
+ D.Diag(diag::err_drv_invalid_format_dxil_validator_version)
+ << ValVersionStr;
+ return false;
+ }
+
+ uint64_t Major = Version.getMajor();
+ uint64_t Minor = *Version.getMinor();
+ if (Major == 0 && Minor != 0) {
+ D.Diag(diag::err_drv_invalid_empty_dxil_validator_version) << ValVersionStr;
+ return false;
+ }
+ VersionTuple MinVer(1, 0);
+ if (Version < MinVer) {
+ D.Diag(diag::err_drv_invalid_range_dxil_validator_version) << ValVersionStr;
+ return false;
+ }
+ return true;
+}
+
+} // namespace
+
+/// DirectX Toolchain
+HLSLToolChain::HLSLToolChain(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args)
+ : ToolChain(D, Triple, Args) {}
+
+llvm::Optional<std::string>
+clang::driver::toolchains::HLSLToolChain::parseTargetProfile(
+ StringRef TargetProfile) {
+ return tryParseProfile(TargetProfile);
+}
+
+DerivedArgList *
+HLSLToolChain::TranslateArgs(const DerivedArgList &Args, StringRef BoundArch,
+ Action::OffloadKind DeviceOffloadKind) const {
+ DerivedArgList *DAL = new DerivedArgList(Args.getBaseArgs());
+
+ const OptTable &Opts = getDriver().getOpts();
+
+ for (Arg *A : Args) {
+ if (A->getOption().getID() == options::OPT_dxil_validator_version) {
+ StringRef ValVerStr = A->getValue();
+ std::string ErrorMsg;
+ if (!isLegalValidatorVersion(ValVerStr, getDriver()))
+ continue;
+ }
+ if (A->getOption().getID() == options::OPT_emit_pristine_llvm) {
+ // Translate fcgl into -S -emit-llvm and -disable-llvm-passes.
+ DAL->AddFlagArg(nullptr, Opts.getOption(options::OPT_S));
+ DAL->AddFlagArg(nullptr, Opts.getOption(options::OPT_emit_llvm));
+ DAL->AddFlagArg(nullptr,
+ Opts.getOption(options::OPT_disable_llvm_passes));
+ A->claim();
+ continue;
+ }
+ DAL->append(A);
+ }
+ // Add default validator version if not set.
+ // TODO: remove this once read validator version from validator.
+ if (!DAL->hasArg(options::OPT_dxil_validator_version)) {
+ const StringRef DefaultValidatorVer = "1.7";
+ DAL->AddSeparateArg(nullptr,
+ Opts.getOption(options::OPT_dxil_validator_version),
+ DefaultValidatorVer);
+ }
+ // FIXME: add validation for enable_16bit_types should be after HLSL 2018 and
+ // shader model 6.2.
+ return DAL;
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/HLSL.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/HLSL.h
new file mode 100644
index 000000000000..5573b0cc69e2
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/HLSL.h
@@ -0,0 +1,40 @@
+//===--- HLSL.h - HLSL ToolChain Implementations ----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_HLSL_H
+#define LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_HLSL_H
+
+#include "clang/Driver/ToolChain.h"
+
+namespace clang {
+namespace driver {
+
+namespace toolchains {
+
+class LLVM_LIBRARY_VISIBILITY HLSLToolChain : public ToolChain {
+public:
+ HLSLToolChain(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+ bool isPICDefault() const override { return false; }
+ bool isPIEDefault(const llvm::opt::ArgList &Args) const override {
+ return false;
+ }
+ bool isPICDefaultForced() const override { return false; }
+
+ llvm::opt::DerivedArgList *
+ TranslateArgs(const llvm::opt::DerivedArgList &Args, StringRef BoundArch,
+ Action::OffloadKind DeviceOffloadKind) const override;
+ static llvm::Optional<std::string>
+ parseTargetProfile(StringRef TargetProfile);
+};
+
+} // end namespace toolchains
+} // end namespace driver
+} // end namespace clang
+
+#endif // LLVM_CLANG_LIB_DRIVER_TOOLCHAINS_HLSL_H
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.cpp
index e772122f5ff5..9142dba81d54 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.cpp
@@ -230,7 +230,7 @@ void hexagon::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
}
if (auto G = toolchains::HexagonToolChain::getSmallDataThreshold(Args)) {
- CmdArgs.push_back(Args.MakeArgString("-gpsize=" + Twine(G.getValue())));
+ CmdArgs.push_back(Args.MakeArgString("-gpsize=" + Twine(*G)));
}
Args.AddAllArgValues(CmdArgs, options::OPT_Wa_COMMA, options::OPT_Xassembler);
@@ -551,8 +551,7 @@ void HexagonToolChain::getHexagonLibraryPaths(const ArgList &Args,
// -L Args
//----------------------------------------------------------------------------
for (Arg *A : Args.filtered(options::OPT_L))
- for (const char *Value : A->getValues())
- LibPaths.push_back(Value);
+ llvm::append_range(LibPaths, A->getValues());
//----------------------------------------------------------------------------
// Other standard paths
@@ -570,7 +569,7 @@ void HexagonToolChain::getHexagonLibraryPaths(const ArgList &Args,
// Assume G0 with -shared.
bool HasG0 = Args.hasArg(options::OPT_shared);
if (auto G = getSmallDataThreshold(Args))
- HasG0 = G.getValue() == 0;
+ HasG0 = *G == 0;
const std::string CpuVer = GetTargetCPUVersion(Args).str();
for (auto &Dir : RootDirs) {
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.h
index 899630555352..c742012444b4 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Hexagon.h
@@ -94,9 +94,6 @@ public:
llvm::opt::ArgStringList &CmdArgs) const override;
StringRef GetGCCLibAndIncVersion() const { return GCCLibAndIncVersion.Text; }
- bool IsIntegratedAssemblerDefault() const override {
- return true;
- }
std::string getHexagonTargetDir(
const std::string &InstalledDir,
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Lanai.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Lanai.h
index dc04b0cfe2ee..33701f7cc045 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Lanai.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Lanai.h
@@ -29,8 +29,6 @@ public:
void addLibStdCxxIncludePaths(
const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override {}
-
- bool IsIntegratedAssemblerDefault() const override { return true; }
};
} // end namespace toolchains
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.cpp
index 83cb41159de7..ceb1a982c3a4 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.cpp
@@ -97,9 +97,9 @@ std::string Linux::getMultiarchTriple(const Driver &D,
case llvm::Triple::mips64: {
std::string MT = std::string(IsMipsR6 ? "mipsisa64r6" : "mips64") +
"-linux-" + (IsMipsN32Abi ? "gnuabin32" : "gnuabi64");
- if (D.getVFS().exists(SysRoot + "/lib/" + MT))
+ if (D.getVFS().exists(concat(SysRoot, "/lib", MT)))
return MT;
- if (D.getVFS().exists(SysRoot + "/lib/mips64-linux-gnu"))
+ if (D.getVFS().exists(concat(SysRoot, "/lib/mips64-linux-gnu")))
return "mips64-linux-gnu";
break;
}
@@ -108,14 +108,14 @@ std::string Linux::getMultiarchTriple(const Driver &D,
return "mips64el-linux-android";
std::string MT = std::string(IsMipsR6 ? "mipsisa64r6el" : "mips64el") +
"-linux-" + (IsMipsN32Abi ? "gnuabin32" : "gnuabi64");
- if (D.getVFS().exists(SysRoot + "/lib/" + MT))
+ if (D.getVFS().exists(concat(SysRoot, "/lib", MT)))
return MT;
- if (D.getVFS().exists(SysRoot + "/lib/mips64el-linux-gnu"))
+ if (D.getVFS().exists(concat(SysRoot, "/lib/mips64el-linux-gnu")))
return "mips64el-linux-gnu";
break;
}
case llvm::Triple::ppc:
- if (D.getVFS().exists(SysRoot + "/lib/powerpc-linux-gnuspe"))
+ if (D.getVFS().exists(concat(SysRoot, "/lib/powerpc-linux-gnuspe")))
return "powerpc-linux-gnuspe";
return "powerpc-linux-gnu";
case llvm::Triple::ppcle:
@@ -124,6 +124,8 @@ std::string Linux::getMultiarchTriple(const Driver &D,
return "powerpc64-linux-gnu";
case llvm::Triple::ppc64le:
return "powerpc64le-linux-gnu";
+ case llvm::Triple::riscv64:
+ return "riscv64-linux-gnu";
case llvm::Triple::sparc:
return "sparc-linux-gnu";
case llvm::Triple::sparcv9:
@@ -221,8 +223,12 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
const bool IsMips = Triple.isMIPS();
const bool IsHexagon = Arch == llvm::Triple::hexagon;
const bool IsRISCV = Triple.isRISCV();
+ const bool IsCSKY = Triple.isCSKY();
- if (IsMips && !SysRoot.empty())
+ if (IsCSKY)
+ SysRoot = SysRoot + SelectedMultilib.osSuffix();
+
+ if ((IsMips || IsCSKY) && !SysRoot.empty())
ExtraOpts.push_back("--sysroot=" + SysRoot);
// Do not use 'gnu' hash style for Mips targets because .gnu.hash
@@ -265,13 +271,13 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
// used. We need add both libo32 and /lib.
if (Arch == llvm::Triple::mips || Arch == llvm::Triple::mipsel) {
Generic_GCC::AddMultilibPaths(D, SysRoot, "libo32", MultiarchTriple, Paths);
- addPathIfExists(D, SysRoot + "/libo32", Paths);
- addPathIfExists(D, SysRoot + "/usr/libo32", Paths);
+ addPathIfExists(D, concat(SysRoot, "/libo32"), Paths);
+ addPathIfExists(D, concat(SysRoot, "/usr/libo32"), Paths);
}
Generic_GCC::AddMultilibPaths(D, SysRoot, OSLibDir, MultiarchTriple, Paths);
- addPathIfExists(D, SysRoot + "/lib/" + MultiarchTriple, Paths);
- addPathIfExists(D, SysRoot + "/lib/../" + OSLibDir, Paths);
+ addPathIfExists(D, concat(SysRoot, "/lib", MultiarchTriple), Paths);
+ addPathIfExists(D, concat(SysRoot, "/lib/..", OSLibDir), Paths);
if (IsAndroid) {
// Android sysroots contain a library directory for each supported OS
@@ -279,24 +285,24 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
// directory.
addPathIfExists(
D,
- SysRoot + "/usr/lib/" + MultiarchTriple + "/" +
- llvm::to_string(Triple.getEnvironmentVersion().getMajor()),
+ concat(SysRoot, "/usr/lib", MultiarchTriple,
+ llvm::to_string(Triple.getEnvironmentVersion().getMajor())),
Paths);
}
- addPathIfExists(D, SysRoot + "/usr/lib/" + MultiarchTriple, Paths);
+ addPathIfExists(D, concat(SysRoot, "/usr/lib", MultiarchTriple), Paths);
// 64-bit OpenEmbedded sysroots may not have a /usr/lib dir. So they cannot
// find /usr/lib64 as it is referenced as /usr/lib/../lib64. So we handle
// this here.
if (Triple.getVendor() == llvm::Triple::OpenEmbedded &&
Triple.isArch64Bit())
- addPathIfExists(D, SysRoot + "/usr/" + OSLibDir, Paths);
+ addPathIfExists(D, concat(SysRoot, "/usr", OSLibDir), Paths);
else
- addPathIfExists(D, SysRoot + "/usr/lib/../" + OSLibDir, Paths);
+ addPathIfExists(D, concat(SysRoot, "/usr/lib/..", OSLibDir), Paths);
if (IsRISCV) {
StringRef ABIName = tools::riscv::getRISCVABI(Args, Triple);
- addPathIfExists(D, SysRoot + "/" + OSLibDir + "/" + ABIName, Paths);
- addPathIfExists(D, SysRoot + "/usr/" + OSLibDir + "/" + ABIName, Paths);
+ addPathIfExists(D, concat(SysRoot, "/", OSLibDir, ABIName), Paths);
+ addPathIfExists(D, concat(SysRoot, "/usr", OSLibDir, ABIName), Paths);
}
Generic_GCC::AddMultiarchPaths(D, SysRoot, OSLibDir, Paths);
@@ -308,8 +314,8 @@ Linux::Linux(const Driver &D, const llvm::Triple &Triple, const ArgList &Args)
D.getVFS().exists(D.Dir + "/../lib/libc++.so"))
addPathIfExists(D, D.Dir + "/../lib", Paths);
- addPathIfExists(D, SysRoot + "/lib", Paths);
- addPathIfExists(D, SysRoot + "/usr/lib", Paths);
+ addPathIfExists(D, concat(SysRoot, "/lib"), Paths);
+ addPathIfExists(D, concat(SysRoot, "/usr/lib"), Paths);
}
ToolChain::RuntimeLibType Linux::GetDefaultRuntimeLibType() const {
@@ -355,6 +361,21 @@ std::string Linux::computeSysRoot() const {
return AndroidSysRootPath;
}
+ if (getTriple().isCSKY()) {
+ // CSKY toolchains use different names for sysroot folder.
+ if (!GCCInstallation.isValid())
+ return std::string();
+ // GCCInstallation.getInstallPath() =
+ // $GCCToolchainPath/lib/gcc/csky-linux-gnuabiv2/6.3.0
+ // Path = $GCCToolchainPath/csky-linux-gnuabiv2/libc
+ std::string Path = (GCCInstallation.getInstallPath() + "/../../../../" +
+ GCCInstallation.getTriple().str() + "/libc")
+ .str();
+ if (getVFS().exists(Path))
+ return Path;
+ return std::string();
+ }
+
if (!GCCInstallation.isValid() || !getTriple().isMIPS())
return std::string();
@@ -531,6 +552,11 @@ std::string Linux::getDynamicLinker(const ArgList &Args) const {
}
case llvm::Triple::ve:
return "/opt/nec/ve/lib/ld-linux-ve.so.1";
+ case llvm::Triple::csky: {
+ LibDir = "lib";
+ Loader = "ld.so.1";
+ break;
+ }
}
if (Distro == Distro::Exherbo &&
@@ -562,7 +588,7 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
return;
// LOCAL_INCLUDE_DIR
- addSystemInclude(DriverArgs, CC1Args, SysRoot + "/usr/local/include");
+ addSystemInclude(DriverArgs, CC1Args, concat(SysRoot, "/usr/local/include"));
// TOOL_INCLUDE_DIR
AddMultilibIncludeArgs(DriverArgs, CC1Args);
@@ -583,9 +609,10 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
// /usr/include.
std::string MultiarchIncludeDir = getMultiarchTriple(D, getTriple(), SysRoot);
if (!MultiarchIncludeDir.empty() &&
- D.getVFS().exists(SysRoot + "/usr/include/" + MultiarchIncludeDir))
- addExternCSystemInclude(DriverArgs, CC1Args,
- SysRoot + "/usr/include/" + MultiarchIncludeDir);
+ D.getVFS().exists(concat(SysRoot, "/usr/include", MultiarchIncludeDir)))
+ addExternCSystemInclude(
+ DriverArgs, CC1Args,
+ concat(SysRoot, "/usr/include", MultiarchIncludeDir));
if (getTriple().getOS() == llvm::Triple::RTEMS)
return;
@@ -593,9 +620,9 @@ void Linux::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
// Add an include of '/include' directly. This isn't provided by default by
// system GCCs, but is often used with cross-compiling GCCs, and harmless to
// add even when Clang is acting as-if it were a system compiler.
- addExternCSystemInclude(DriverArgs, CC1Args, SysRoot + "/include");
+ addExternCSystemInclude(DriverArgs, CC1Args, concat(SysRoot, "/include"));
- addExternCSystemInclude(DriverArgs, CC1Args, SysRoot + "/usr/include");
+ addExternCSystemInclude(DriverArgs, CC1Args, concat(SysRoot, "/usr/include"));
if (!DriverArgs.hasArg(options::OPT_nobuiltininc) && getTriple().isMusl())
addSystemInclude(DriverArgs, CC1Args, ResourceDirInclude);
@@ -652,6 +679,15 @@ void Linux::AddHIPIncludeArgs(const ArgList &DriverArgs,
RocmInstallation.AddHIPIncludeArgs(DriverArgs, CC1Args);
}
+void Linux::AddHIPRuntimeLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ CmdArgs.append(
+ {Args.MakeArgString(StringRef("-L") + RocmInstallation.getLibPath()),
+ "-rpath", Args.MakeArgString(RocmInstallation.getLibPath())});
+
+ CmdArgs.push_back("-lamdhip64");
+}
+
void Linux::AddIAMCUIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
if (GCCInstallation.isValid()) {
@@ -663,8 +699,11 @@ void Linux::AddIAMCUIncludeArgs(const ArgList &DriverArgs,
}
bool Linux::isPIEDefault(const llvm::opt::ArgList &Args) const {
- return CLANG_DEFAULT_PIE_ON_LINUX || getTriple().isAndroid() ||
- getTriple().isMusl() || getSanitizerArgs(Args).requiresPIE();
+ // TODO: Remove the special treatment for Flang once its frontend driver can
+ // generate position independent code.
+ return !getDriver().IsFlangMode() &&
+ (CLANG_DEFAULT_PIE_ON_LINUX || getTriple().isAndroid() ||
+ getTriple().isMusl() || getSanitizerArgs(Args).requiresPIE());
}
bool Linux::IsAArch64OutlineAtomicsDefault(const ArgList &Args) const {
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.h
index a5648d79d655..188cb1f09788 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Linux.h
@@ -37,6 +37,8 @@ public:
llvm::opt::ArgStringList &CC1Args) const override;
void AddHIPIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
+ void AddHIPRuntimeLibArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const override;
void AddIAMCUIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
RuntimeLibType GetDefaultRuntimeLibType() const override;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSP430.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/MSP430.cpp
index 96994ba77fac..bc789853049a 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSP430.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/MSP430.cpp
@@ -101,7 +101,7 @@ void msp430::getMSP430TargetFeatures(const Driver &D, const ArgList &Args,
Features.push_back("+hwmultf5");
} else {
D.Diag(clang::diag::err_drv_unsupported_option_argument)
- << HWMultArg->getAsString(Args) << HWMult;
+ << HWMultArg->getOption().getName() << HWMult;
}
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.cpp
index 9f4751167ac1..14ebe38ee191 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.cpp
@@ -40,91 +40,12 @@
#include <windows.h>
#endif
-#ifdef _MSC_VER
-// Don't support SetupApi on MinGW.
-#define USE_MSVC_SETUP_API
-
-// Make sure this comes before MSVCSetupApi.h
-#include <comdef.h>
-
-#ifdef __clang__
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wnon-virtual-dtor"
-#endif
-#include "MSVCSetupApi.h"
-#ifdef __clang__
-#pragma clang diagnostic pop
-#endif
-#include "llvm/Support/COM.h"
-_COM_SMARTPTR_TYPEDEF(ISetupConfiguration, __uuidof(ISetupConfiguration));
-_COM_SMARTPTR_TYPEDEF(ISetupConfiguration2, __uuidof(ISetupConfiguration2));
-_COM_SMARTPTR_TYPEDEF(ISetupHelper, __uuidof(ISetupHelper));
-_COM_SMARTPTR_TYPEDEF(IEnumSetupInstances, __uuidof(IEnumSetupInstances));
-_COM_SMARTPTR_TYPEDEF(ISetupInstance, __uuidof(ISetupInstance));
-_COM_SMARTPTR_TYPEDEF(ISetupInstance2, __uuidof(ISetupInstance2));
-#endif
-
using namespace clang::driver;
using namespace clang::driver::toolchains;
using namespace clang::driver::tools;
using namespace clang;
using namespace llvm::opt;
-// Windows SDKs and VC Toolchains group their contents into subdirectories based
-// on the target architecture. This function converts an llvm::Triple::ArchType
-// to the corresponding subdirectory name.
-static const char *llvmArchToWindowsSDKArch(llvm::Triple::ArchType Arch) {
- using ArchType = llvm::Triple::ArchType;
- switch (Arch) {
- case ArchType::x86:
- return "x86";
- case ArchType::x86_64:
- return "x64";
- case ArchType::arm:
- return "arm";
- case ArchType::aarch64:
- return "arm64";
- default:
- return "";
- }
-}
-
-// Similar to the above function, but for Visual Studios before VS2017.
-static const char *llvmArchToLegacyVCArch(llvm::Triple::ArchType Arch) {
- using ArchType = llvm::Triple::ArchType;
- switch (Arch) {
- case ArchType::x86:
- // x86 is default in legacy VC toolchains.
- // e.g. x86 libs are directly in /lib as opposed to /lib/x86.
- return "";
- case ArchType::x86_64:
- return "amd64";
- case ArchType::arm:
- return "arm";
- case ArchType::aarch64:
- return "arm64";
- default:
- return "";
- }
-}
-
-// Similar to the above function, but for DevDiv internal builds.
-static const char *llvmArchToDevDivInternalArch(llvm::Triple::ArchType Arch) {
- using ArchType = llvm::Triple::ArchType;
- switch (Arch) {
- case ArchType::x86:
- return "i386";
- case ArchType::x86_64:
- return "amd64";
- case ArchType::arm:
- return "arm";
- case ArchType::aarch64:
- return "arm64";
- default:
- return "";
- }
-}
-
static bool canExecute(llvm::vfs::FileSystem &VFS, StringRef Path) {
auto Status = VFS.status(Path);
if (!Status)
@@ -132,294 +53,6 @@ static bool canExecute(llvm::vfs::FileSystem &VFS, StringRef Path) {
return (Status->getPermissions() & llvm::sys::fs::perms::all_exe) != 0;
}
-// Defined below.
-// Forward declare this so there aren't too many things above the constructor.
-static bool getSystemRegistryString(const char *keyPath, const char *valueName,
- std::string &value, std::string *phValue);
-
-static std::string getHighestNumericTupleInDirectory(llvm::vfs::FileSystem &VFS,
- StringRef Directory) {
- std::string Highest;
- llvm::VersionTuple HighestTuple;
-
- std::error_code EC;
- for (llvm::vfs::directory_iterator DirIt = VFS.dir_begin(Directory, EC),
- DirEnd;
- !EC && DirIt != DirEnd; DirIt.increment(EC)) {
- auto Status = VFS.status(DirIt->path());
- if (!Status || !Status->isDirectory())
- continue;
- StringRef CandidateName = llvm::sys::path::filename(DirIt->path());
- llvm::VersionTuple Tuple;
- if (Tuple.tryParse(CandidateName)) // tryParse() returns true on error.
- continue;
- if (Tuple > HighestTuple) {
- HighestTuple = Tuple;
- Highest = CandidateName.str();
- }
- }
-
- return Highest;
-}
-
-// Check command line arguments to try and find a toolchain.
-static bool
-findVCToolChainViaCommandLine(llvm::vfs::FileSystem &VFS, const ArgList &Args,
- std::string &Path,
- MSVCToolChain::ToolsetLayout &VSLayout) {
- // Don't validate the input; trust the value supplied by the user.
- // The primary motivation is to prevent unnecessary file and registry access.
- if (Arg *A = Args.getLastArg(options::OPT__SLASH_vctoolsdir,
- options::OPT__SLASH_winsysroot)) {
- if (A->getOption().getID() == options::OPT__SLASH_winsysroot) {
- llvm::SmallString<128> ToolsPath(A->getValue());
- llvm::sys::path::append(ToolsPath, "VC", "Tools", "MSVC");
- std::string VCToolsVersion;
- if (Arg *A = Args.getLastArg(options::OPT__SLASH_vctoolsversion))
- VCToolsVersion = A->getValue();
- else
- VCToolsVersion = getHighestNumericTupleInDirectory(VFS, ToolsPath);
- llvm::sys::path::append(ToolsPath, VCToolsVersion);
- Path = std::string(ToolsPath.str());
- } else {
- Path = A->getValue();
- }
- VSLayout = MSVCToolChain::ToolsetLayout::VS2017OrNewer;
- return true;
- }
- return false;
-}
-
-// Check various environment variables to try and find a toolchain.
-static bool
-findVCToolChainViaEnvironment(llvm::vfs::FileSystem &VFS, std::string &Path,
- MSVCToolChain::ToolsetLayout &VSLayout) {
- // These variables are typically set by vcvarsall.bat
- // when launching a developer command prompt.
- if (llvm::Optional<std::string> VCToolsInstallDir =
- llvm::sys::Process::GetEnv("VCToolsInstallDir")) {
- // This is only set by newer Visual Studios, and it leads straight to
- // the toolchain directory.
- Path = std::move(*VCToolsInstallDir);
- VSLayout = MSVCToolChain::ToolsetLayout::VS2017OrNewer;
- return true;
- }
- if (llvm::Optional<std::string> VCInstallDir =
- llvm::sys::Process::GetEnv("VCINSTALLDIR")) {
- // If the previous variable isn't set but this one is, then we've found
- // an older Visual Studio. This variable is set by newer Visual Studios too,
- // so this check has to appear second.
- // In older Visual Studios, the VC directory is the toolchain.
- Path = std::move(*VCInstallDir);
- VSLayout = MSVCToolChain::ToolsetLayout::OlderVS;
- return true;
- }
-
- // We couldn't find any VC environment variables. Let's walk through PATH and
- // see if it leads us to a VC toolchain bin directory. If it does, pick the
- // first one that we find.
- if (llvm::Optional<std::string> PathEnv =
- llvm::sys::Process::GetEnv("PATH")) {
- llvm::SmallVector<llvm::StringRef, 8> PathEntries;
- llvm::StringRef(*PathEnv).split(PathEntries, llvm::sys::EnvPathSeparator);
- for (llvm::StringRef PathEntry : PathEntries) {
- if (PathEntry.empty())
- continue;
-
- llvm::SmallString<256> ExeTestPath;
-
- // If cl.exe doesn't exist, then this definitely isn't a VC toolchain.
- ExeTestPath = PathEntry;
- llvm::sys::path::append(ExeTestPath, "cl.exe");
- if (!VFS.exists(ExeTestPath))
- continue;
-
- // cl.exe existing isn't a conclusive test for a VC toolchain; clang also
- // has a cl.exe. So let's check for link.exe too.
- ExeTestPath = PathEntry;
- llvm::sys::path::append(ExeTestPath, "link.exe");
- if (!VFS.exists(ExeTestPath))
- continue;
-
- // whatever/VC/bin --> old toolchain, VC dir is toolchain dir.
- llvm::StringRef TestPath = PathEntry;
- bool IsBin =
- llvm::sys::path::filename(TestPath).equals_insensitive("bin");
- if (!IsBin) {
- // Strip any architecture subdir like "amd64".
- TestPath = llvm::sys::path::parent_path(TestPath);
- IsBin = llvm::sys::path::filename(TestPath).equals_insensitive("bin");
- }
- if (IsBin) {
- llvm::StringRef ParentPath = llvm::sys::path::parent_path(TestPath);
- llvm::StringRef ParentFilename = llvm::sys::path::filename(ParentPath);
- if (ParentFilename.equals_insensitive("VC")) {
- Path = std::string(ParentPath);
- VSLayout = MSVCToolChain::ToolsetLayout::OlderVS;
- return true;
- }
- if (ParentFilename.equals_insensitive("x86ret") ||
- ParentFilename.equals_insensitive("x86chk") ||
- ParentFilename.equals_insensitive("amd64ret") ||
- ParentFilename.equals_insensitive("amd64chk")) {
- Path = std::string(ParentPath);
- VSLayout = MSVCToolChain::ToolsetLayout::DevDivInternal;
- return true;
- }
-
- } else {
- // This could be a new (>=VS2017) toolchain. If it is, we should find
- // path components with these prefixes when walking backwards through
- // the path.
- // Note: empty strings match anything.
- llvm::StringRef ExpectedPrefixes[] = {"", "Host", "bin", "",
- "MSVC", "Tools", "VC"};
-
- auto It = llvm::sys::path::rbegin(PathEntry);
- auto End = llvm::sys::path::rend(PathEntry);
- for (llvm::StringRef Prefix : ExpectedPrefixes) {
- if (It == End)
- goto NotAToolChain;
- if (!It->startswith_insensitive(Prefix))
- goto NotAToolChain;
- ++It;
- }
-
- // We've found a new toolchain!
- // Back up 3 times (/bin/Host/arch) to get the root path.
- llvm::StringRef ToolChainPath(PathEntry);
- for (int i = 0; i < 3; ++i)
- ToolChainPath = llvm::sys::path::parent_path(ToolChainPath);
-
- Path = std::string(ToolChainPath);
- VSLayout = MSVCToolChain::ToolsetLayout::VS2017OrNewer;
- return true;
- }
-
- NotAToolChain:
- continue;
- }
- }
- return false;
-}
-
-// Query the Setup Config server for installs, then pick the newest version
-// and find its default VC toolchain.
-// This is the preferred way to discover new Visual Studios, as they're no
-// longer listed in the registry.
-static bool
-findVCToolChainViaSetupConfig(llvm::vfs::FileSystem &VFS, std::string &Path,
- MSVCToolChain::ToolsetLayout &VSLayout) {
-#if !defined(USE_MSVC_SETUP_API)
- return false;
-#else
- // FIXME: This really should be done once in the top-level program's main
- // function, as it may have already been initialized with a different
- // threading model otherwise.
- llvm::sys::InitializeCOMRAII COM(llvm::sys::COMThreadingMode::SingleThreaded);
- HRESULT HR;
-
- // _com_ptr_t will throw a _com_error if a COM calls fail.
- // The LLVM coding standards forbid exception handling, so we'll have to
- // stop them from being thrown in the first place.
- // The destructor will put the regular error handler back when we leave
- // this scope.
- struct SuppressCOMErrorsRAII {
- static void __stdcall handler(HRESULT hr, IErrorInfo *perrinfo) {}
-
- SuppressCOMErrorsRAII() { _set_com_error_handler(handler); }
-
- ~SuppressCOMErrorsRAII() { _set_com_error_handler(_com_raise_error); }
-
- } COMErrorSuppressor;
-
- ISetupConfigurationPtr Query;
- HR = Query.CreateInstance(__uuidof(SetupConfiguration));
- if (FAILED(HR))
- return false;
-
- IEnumSetupInstancesPtr EnumInstances;
- HR = ISetupConfiguration2Ptr(Query)->EnumAllInstances(&EnumInstances);
- if (FAILED(HR))
- return false;
-
- ISetupInstancePtr Instance;
- HR = EnumInstances->Next(1, &Instance, nullptr);
- if (HR != S_OK)
- return false;
-
- ISetupInstancePtr NewestInstance;
- Optional<uint64_t> NewestVersionNum;
- do {
- bstr_t VersionString;
- uint64_t VersionNum;
- HR = Instance->GetInstallationVersion(VersionString.GetAddress());
- if (FAILED(HR))
- continue;
- HR = ISetupHelperPtr(Query)->ParseVersion(VersionString, &VersionNum);
- if (FAILED(HR))
- continue;
- if (!NewestVersionNum || (VersionNum > NewestVersionNum)) {
- NewestInstance = Instance;
- NewestVersionNum = VersionNum;
- }
- } while ((HR = EnumInstances->Next(1, &Instance, nullptr)) == S_OK);
-
- if (!NewestInstance)
- return false;
-
- bstr_t VCPathWide;
- HR = NewestInstance->ResolvePath(L"VC", VCPathWide.GetAddress());
- if (FAILED(HR))
- return false;
-
- std::string VCRootPath;
- llvm::convertWideToUTF8(std::wstring(VCPathWide), VCRootPath);
-
- llvm::SmallString<256> ToolsVersionFilePath(VCRootPath);
- llvm::sys::path::append(ToolsVersionFilePath, "Auxiliary", "Build",
- "Microsoft.VCToolsVersion.default.txt");
-
- auto ToolsVersionFile = llvm::MemoryBuffer::getFile(ToolsVersionFilePath);
- if (!ToolsVersionFile)
- return false;
-
- llvm::SmallString<256> ToolchainPath(VCRootPath);
- llvm::sys::path::append(ToolchainPath, "Tools", "MSVC",
- ToolsVersionFile->get()->getBuffer().rtrim());
- auto Status = VFS.status(ToolchainPath);
- if (!Status || !Status->isDirectory())
- return false;
-
- Path = std::string(ToolchainPath.str());
- VSLayout = MSVCToolChain::ToolsetLayout::VS2017OrNewer;
- return true;
-#endif
-}
-
-// Look in the registry for Visual Studio installs, and use that to get
-// a toolchain path. VS2017 and newer don't get added to the registry.
-// So if we find something here, we know that it's an older version.
-static bool findVCToolChainViaRegistry(std::string &Path,
- MSVCToolChain::ToolsetLayout &VSLayout) {
- std::string VSInstallPath;
- if (getSystemRegistryString(R"(SOFTWARE\Microsoft\VisualStudio\$VERSION)",
- "InstallDir", VSInstallPath, nullptr) ||
- getSystemRegistryString(R"(SOFTWARE\Microsoft\VCExpress\$VERSION)",
- "InstallDir", VSInstallPath, nullptr)) {
- if (!VSInstallPath.empty()) {
- llvm::SmallString<256> VCPath(llvm::StringRef(
- VSInstallPath.c_str(), VSInstallPath.find(R"(\Common7\IDE)")));
- llvm::sys::path::append(VCPath, "VC");
-
- Path = std::string(VCPath.str());
- VSLayout = MSVCToolChain::ToolsetLayout::OlderVS;
- return true;
- }
- }
- return false;
-}
-
// Try to find Exe from a Visual Studio distribution. This first tries to find
// an installed copy of Visual Studio and, failing that, looks in the PATH,
// making sure that whatever executable that's found is not a same-named exe
@@ -427,8 +60,8 @@ static bool findVCToolChainViaRegistry(std::string &Path,
static std::string FindVisualStudioExecutable(const ToolChain &TC,
const char *Exe) {
const auto &MSVC = static_cast<const toolchains::MSVCToolChain &>(TC);
- SmallString<128> FilePath(MSVC.getSubDirectoryPath(
- toolchains::MSVCToolChain::SubDirectoryType::Bin));
+ SmallString<128> FilePath(
+ MSVC.getSubDirectoryPath(llvm::SubDirectoryType::Bin));
llvm::sys::path::append(FilePath, Exe);
return std::string(canExecute(TC.getVFS(), FilePath) ? FilePath.str() : Exe);
}
@@ -448,7 +81,7 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.MakeArgString(std::string("-out:") + Output.getFilename()));
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles) &&
- !C.getDriver().IsCLMode()) {
+ !C.getDriver().IsCLMode() && !C.getDriver().IsFlangMode()) {
CmdArgs.push_back("-defaultlib:libcmt");
CmdArgs.push_back("-defaultlib:oldnames");
}
@@ -469,7 +102,7 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// The DIA SDK always uses the legacy vc arch, even in new MSVC versions.
llvm::sys::path::append(DIAPath, "lib",
- llvmArchToLegacyVCArch(TC.getArch()));
+ llvm::archToLegacyVCArch(TC.getArch()));
CmdArgs.push_back(Args.MakeArgString(Twine("-libpath:") + DIAPath));
}
if (!llvm::sys::Process::GetEnv("LIB") ||
@@ -477,12 +110,10 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
options::OPT__SLASH_winsysroot)) {
CmdArgs.push_back(Args.MakeArgString(
Twine("-libpath:") +
- TC.getSubDirectoryPath(
- toolchains::MSVCToolChain::SubDirectoryType::Lib)));
+ TC.getSubDirectoryPath(llvm::SubDirectoryType::Lib)));
CmdArgs.push_back(Args.MakeArgString(
Twine("-libpath:") +
- TC.getSubDirectoryPath(toolchains::MSVCToolChain::SubDirectoryType::Lib,
- "atlmfc")));
+ TC.getSubDirectoryPath(llvm::SubDirectoryType::Lib, "atlmfc")));
}
if (!llvm::sys::Process::GetEnv("LIB") ||
Args.getLastArg(options::OPT__SLASH_winsdkdir,
@@ -499,6 +130,16 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
Args.MakeArgString(std::string("-libpath:") + WindowsSdkLibPath));
}
+ if (C.getDriver().IsFlangMode()) {
+ addFortranRuntimeLibraryPath(TC, Args, CmdArgs);
+ addFortranRuntimeLibs(TC, CmdArgs);
+
+ // Inform the MSVC linker that we're generating a console application, i.e.
+ // one with `main` as the "user-defined" entry point. The `main` function is
+ // defined in flang's runtime libraries.
+ CmdArgs.push_back("/subsystem:console");
+ }
+
// Add the compiler-rt library directories to libpath if they exist to help
// the linker find the various sanitizer, builtin, and profiling runtimes.
for (const auto &LibPath : TC.getLibraryPaths()) {
@@ -655,6 +296,8 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
A.renderAsInput(Args, CmdArgs);
}
+ addHIPRuntimeLibArgs(TC, Args, CmdArgs);
+
TC.addProfileRTLibs(Args, CmdArgs);
std::vector<const char *> Environment;
@@ -695,7 +338,7 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// native target bin directory.
// e.g. when compiling for x86 on an x64 host, PATH should start with:
// /bin/Hostx64/x86;/bin/Hostx64/x64
- // This doesn't attempt to handle ToolsetLayout::DevDivInternal.
+ // This doesn't attempt to handle llvm::ToolsetLayout::DevDivInternal.
if (TC.getIsVS2017OrNewer() &&
llvm::Triple(llvm::sys::getProcessTriple()).getArch() != TC.getArch()) {
auto HostArch = llvm::Triple(llvm::sys::getProcessTriple()).getArch();
@@ -730,13 +373,12 @@ void visualstudio::Linker::ConstructJob(Compilation &C, const JobAction &JA,
for (const char *Cursor = EnvBlock.data(); *Cursor != '\0';) {
llvm::StringRef EnvVar(Cursor);
if (EnvVar.startswith_insensitive("path=")) {
- using SubDirectoryType = toolchains::MSVCToolChain::SubDirectoryType;
constexpr size_t PrefixLen = 5; // strlen("path=")
Environment.push_back(Args.MakeArgString(
EnvVar.substr(0, PrefixLen) +
- TC.getSubDirectoryPath(SubDirectoryType::Bin) +
+ TC.getSubDirectoryPath(llvm::SubDirectoryType::Bin) +
llvm::Twine(llvm::sys::EnvPathSeparator) +
- TC.getSubDirectoryPath(SubDirectoryType::Bin, "", HostArch) +
+ TC.getSubDirectoryPath(llvm::SubDirectoryType::Bin, HostArch) +
(EnvVar.size() > PrefixLen
? llvm::Twine(llvm::sys::EnvPathSeparator) +
EnvVar.substr(PrefixLen)
@@ -769,14 +411,29 @@ MSVCToolChain::MSVCToolChain(const Driver &D, const llvm::Triple &Triple,
if (getDriver().getInstalledDir() != getDriver().Dir)
getProgramPaths().push_back(getDriver().Dir);
+ Optional<llvm::StringRef> VCToolsDir, VCToolsVersion;
+ if (Arg *A = Args.getLastArg(options::OPT__SLASH_vctoolsdir))
+ VCToolsDir = A->getValue();
+ if (Arg *A = Args.getLastArg(options::OPT__SLASH_vctoolsversion))
+ VCToolsVersion = A->getValue();
+ if (Arg *A = Args.getLastArg(options::OPT__SLASH_winsdkdir))
+ WinSdkDir = A->getValue();
+ if (Arg *A = Args.getLastArg(options::OPT__SLASH_winsdkversion))
+ WinSdkVersion = A->getValue();
+ if (Arg *A = Args.getLastArg(options::OPT__SLASH_winsysroot))
+ WinSysRoot = A->getValue();
+
// Check the command line first, that's the user explicitly telling us what to
// use. Check the environment next, in case we're being invoked from a VS
// command prompt. Failing that, just try to find the newest Visual Studio
// version we can and use its default VC toolchain.
- findVCToolChainViaCommandLine(getVFS(), Args, VCToolChainPath, VSLayout) ||
- findVCToolChainViaEnvironment(getVFS(), VCToolChainPath, VSLayout) ||
- findVCToolChainViaSetupConfig(getVFS(), VCToolChainPath, VSLayout) ||
- findVCToolChainViaRegistry(VCToolChainPath, VSLayout);
+ llvm::findVCToolChainViaCommandLine(getVFS(), VCToolsDir, VCToolsVersion,
+ WinSysRoot, VCToolChainPath, VSLayout) ||
+ llvm::findVCToolChainViaEnvironment(getVFS(), VCToolChainPath,
+ VSLayout) ||
+ llvm::findVCToolChainViaSetupConfig(getVFS(), VCToolChainPath,
+ VSLayout) ||
+ llvm::findVCToolChainViaRegistry(VCToolChainPath, VSLayout);
}
Tool *MSVCToolChain::buildLinker() const {
@@ -802,8 +459,8 @@ bool MSVCToolChain::IsUnwindTablesDefault(const ArgList &Args) const {
// All non-x86_32 Windows targets require unwind tables. However, LLVM
// doesn't know how to generate them for all targets, so only enable
// the ones that are actually implemented.
- return getArch() == llvm::Triple::x86_64 ||
- getArch() == llvm::Triple::aarch64;
+ return getArch() == llvm::Triple::x86_64 || getArch() == llvm::Triple::arm ||
+ getArch() == llvm::Triple::thumb || getArch() == llvm::Triple::aarch64;
}
bool MSVCToolChain::isPICDefault() const {
@@ -830,360 +487,60 @@ void MSVCToolChain::AddHIPIncludeArgs(const ArgList &DriverArgs,
RocmInstallation.AddHIPIncludeArgs(DriverArgs, CC1Args);
}
+void MSVCToolChain::AddHIPRuntimeLibArgs(const ArgList &Args,
+ ArgStringList &CmdArgs) const {
+ CmdArgs.append({Args.MakeArgString(StringRef("-libpath:") +
+ RocmInstallation.getLibPath()),
+ "amdhip64.lib"});
+}
+
void MSVCToolChain::printVerboseInfo(raw_ostream &OS) const {
CudaInstallation.print(OS);
RocmInstallation.print(OS);
}
-// Get the path to a specific subdirectory in the current toolchain for
-// a given target architecture.
-// VS2017 changed the VC toolchain layout, so this should be used instead
-// of hardcoding paths.
std::string
-MSVCToolChain::getSubDirectoryPath(SubDirectoryType Type,
- llvm::StringRef SubdirParent,
- llvm::Triple::ArchType TargetArch) const {
- const char *SubdirName;
- const char *IncludeName;
- switch (VSLayout) {
- case ToolsetLayout::OlderVS:
- SubdirName = llvmArchToLegacyVCArch(TargetArch);
- IncludeName = "include";
- break;
- case ToolsetLayout::VS2017OrNewer:
- SubdirName = llvmArchToWindowsSDKArch(TargetArch);
- IncludeName = "include";
- break;
- case ToolsetLayout::DevDivInternal:
- SubdirName = llvmArchToDevDivInternalArch(TargetArch);
- IncludeName = "inc";
- break;
- }
-
- llvm::SmallString<256> Path(VCToolChainPath);
- if (!SubdirParent.empty())
- llvm::sys::path::append(Path, SubdirParent);
-
- switch (Type) {
- case SubDirectoryType::Bin:
- if (VSLayout == ToolsetLayout::VS2017OrNewer) {
- const bool HostIsX64 =
- llvm::Triple(llvm::sys::getProcessTriple()).isArch64Bit();
- const char *const HostName = HostIsX64 ? "Hostx64" : "Hostx86";
- llvm::sys::path::append(Path, "bin", HostName, SubdirName);
- } else { // OlderVS or DevDivInternal
- llvm::sys::path::append(Path, "bin", SubdirName);
- }
- break;
- case SubDirectoryType::Include:
- llvm::sys::path::append(Path, IncludeName);
- break;
- case SubDirectoryType::Lib:
- llvm::sys::path::append(Path, "lib", SubdirName);
- break;
- }
- return std::string(Path.str());
-}
-
-#ifdef _WIN32
-static bool readFullStringValue(HKEY hkey, const char *valueName,
- std::string &value) {
- std::wstring WideValueName;
- if (!llvm::ConvertUTF8toWide(valueName, WideValueName))
- return false;
-
- DWORD result = 0;
- DWORD valueSize = 0;
- DWORD type = 0;
- // First just query for the required size.
- result = RegQueryValueExW(hkey, WideValueName.c_str(), NULL, &type, NULL,
- &valueSize);
- if (result != ERROR_SUCCESS || type != REG_SZ || !valueSize)
- return false;
- std::vector<BYTE> buffer(valueSize);
- result = RegQueryValueExW(hkey, WideValueName.c_str(), NULL, NULL, &buffer[0],
- &valueSize);
- if (result == ERROR_SUCCESS) {
- std::wstring WideValue(reinterpret_cast<const wchar_t *>(buffer.data()),
- valueSize / sizeof(wchar_t));
- if (valueSize && WideValue.back() == L'\0') {
- WideValue.pop_back();
- }
- // The destination buffer must be empty as an invariant of the conversion
- // function; but this function is sometimes called in a loop that passes in
- // the same buffer, however. Simply clear it out so we can overwrite it.
- value.clear();
- return llvm::convertWideToUTF8(WideValue, value);
- }
- return false;
+MSVCToolChain::getSubDirectoryPath(llvm::SubDirectoryType Type,
+ llvm::StringRef SubdirParent) const {
+ return llvm::getSubDirectoryPath(Type, VSLayout, VCToolChainPath, getArch(),
+ SubdirParent);
}
-#endif
-/// Read registry string.
-/// This also supports a means to look for high-versioned keys by use
-/// of a $VERSION placeholder in the key path.
-/// $VERSION in the key path is a placeholder for the version number,
-/// causing the highest value path to be searched for and used.
-/// I.e. "SOFTWARE\\Microsoft\\VisualStudio\\$VERSION".
-/// There can be additional characters in the component. Only the numeric
-/// characters are compared. This function only searches HKLM.
-static bool getSystemRegistryString(const char *keyPath, const char *valueName,
- std::string &value, std::string *phValue) {
-#ifndef _WIN32
- return false;
-#else
- HKEY hRootKey = HKEY_LOCAL_MACHINE;
- HKEY hKey = NULL;
- long lResult;
- bool returnValue = false;
-
- const char *placeHolder = strstr(keyPath, "$VERSION");
- std::string bestName;
- // If we have a $VERSION placeholder, do the highest-version search.
- if (placeHolder) {
- const char *keyEnd = placeHolder - 1;
- const char *nextKey = placeHolder;
- // Find end of previous key.
- while ((keyEnd > keyPath) && (*keyEnd != '\\'))
- keyEnd--;
- // Find end of key containing $VERSION.
- while (*nextKey && (*nextKey != '\\'))
- nextKey++;
- size_t partialKeyLength = keyEnd - keyPath;
- char partialKey[256];
- if (partialKeyLength >= sizeof(partialKey))
- partialKeyLength = sizeof(partialKey) - 1;
- strncpy(partialKey, keyPath, partialKeyLength);
- partialKey[partialKeyLength] = '\0';
- HKEY hTopKey = NULL;
- lResult = RegOpenKeyExA(hRootKey, partialKey, 0, KEY_READ | KEY_WOW64_32KEY,
- &hTopKey);
- if (lResult == ERROR_SUCCESS) {
- char keyName[256];
- double bestValue = 0.0;
- DWORD index, size = sizeof(keyName) - 1;
- for (index = 0; RegEnumKeyExA(hTopKey, index, keyName, &size, NULL, NULL,
- NULL, NULL) == ERROR_SUCCESS;
- index++) {
- const char *sp = keyName;
- while (*sp && !isDigit(*sp))
- sp++;
- if (!*sp)
- continue;
- const char *ep = sp + 1;
- while (*ep && (isDigit(*ep) || (*ep == '.')))
- ep++;
- char numBuf[32];
- strncpy(numBuf, sp, sizeof(numBuf) - 1);
- numBuf[sizeof(numBuf) - 1] = '\0';
- double dvalue = strtod(numBuf, NULL);
- if (dvalue > bestValue) {
- // Test that InstallDir is indeed there before keeping this index.
- // Open the chosen key path remainder.
- bestName = keyName;
- // Append rest of key.
- bestName.append(nextKey);
- lResult = RegOpenKeyExA(hTopKey, bestName.c_str(), 0,
- KEY_READ | KEY_WOW64_32KEY, &hKey);
- if (lResult == ERROR_SUCCESS) {
- if (readFullStringValue(hKey, valueName, value)) {
- bestValue = dvalue;
- if (phValue)
- *phValue = bestName;
- returnValue = true;
- }
- RegCloseKey(hKey);
- }
- }
- size = sizeof(keyName) - 1;
- }
- RegCloseKey(hTopKey);
- }
- } else {
- lResult =
- RegOpenKeyExA(hRootKey, keyPath, 0, KEY_READ | KEY_WOW64_32KEY, &hKey);
- if (lResult == ERROR_SUCCESS) {
- if (readFullStringValue(hKey, valueName, value))
- returnValue = true;
- if (phValue)
- phValue->clear();
- RegCloseKey(hKey);
- }
- }
- return returnValue;
-#endif // _WIN32
+std::string
+MSVCToolChain::getSubDirectoryPath(llvm::SubDirectoryType Type,
+ llvm::Triple::ArchType TargetArch) const {
+ return llvm::getSubDirectoryPath(Type, VSLayout, VCToolChainPath, TargetArch,
+ "");
}
// Find the most recent version of Universal CRT or Windows 10 SDK.
// vcvarsqueryregistry.bat from Visual Studio 2015 sorts entries in the include
// directory by name and uses the last one of the list.
// So we compare entry names lexicographically to find the greatest one.
-static bool getWindows10SDKVersionFromPath(llvm::vfs::FileSystem &VFS,
- const std::string &SDKPath,
- std::string &SDKVersion) {
- llvm::SmallString<128> IncludePath(SDKPath);
- llvm::sys::path::append(IncludePath, "Include");
- SDKVersion = getHighestNumericTupleInDirectory(VFS, IncludePath);
- return !SDKVersion.empty();
-}
-
-static bool getWindowsSDKDirViaCommandLine(llvm::vfs::FileSystem &VFS,
- const ArgList &Args,
- std::string &Path, int &Major,
- std::string &Version) {
- if (Arg *A = Args.getLastArg(options::OPT__SLASH_winsdkdir,
- options::OPT__SLASH_winsysroot)) {
- // Don't validate the input; trust the value supplied by the user.
- // The motivation is to prevent unnecessary file and registry access.
- llvm::VersionTuple SDKVersion;
- if (Arg *A = Args.getLastArg(options::OPT__SLASH_winsdkversion))
- SDKVersion.tryParse(A->getValue());
-
- if (A->getOption().getID() == options::OPT__SLASH_winsysroot) {
- llvm::SmallString<128> SDKPath(A->getValue());
- llvm::sys::path::append(SDKPath, "Windows Kits");
- if (!SDKVersion.empty())
- llvm::sys::path::append(SDKPath, Twine(SDKVersion.getMajor()));
- else
- llvm::sys::path::append(
- SDKPath, getHighestNumericTupleInDirectory(VFS, SDKPath));
- Path = std::string(SDKPath.str());
- } else {
- Path = A->getValue();
- }
-
- if (!SDKVersion.empty()) {
- Major = SDKVersion.getMajor();
- Version = SDKVersion.getAsString();
- } else if (getWindows10SDKVersionFromPath(VFS, Path, Version)) {
- Major = 10;
- }
- return true;
- }
- return false;
-}
-
-/// Get Windows SDK installation directory.
-static bool getWindowsSDKDir(llvm::vfs::FileSystem &VFS, const ArgList &Args,
- std::string &Path, int &Major,
- std::string &WindowsSDKIncludeVersion,
- std::string &WindowsSDKLibVersion) {
- // Trust /winsdkdir and /winsdkversion if present.
- if (getWindowsSDKDirViaCommandLine(VFS, Args, Path, Major,
- WindowsSDKIncludeVersion)) {
- WindowsSDKLibVersion = WindowsSDKIncludeVersion;
- return true;
- }
-
- // FIXME: Try env vars (%WindowsSdkDir%, %UCRTVersion%) before going to registry.
-
- // Try the Windows registry.
- std::string RegistrySDKVersion;
- if (!getSystemRegistryString(
- "SOFTWARE\\Microsoft\\Microsoft SDKs\\Windows\\$VERSION",
- "InstallationFolder", Path, &RegistrySDKVersion))
- return false;
- if (Path.empty() || RegistrySDKVersion.empty())
- return false;
-
- WindowsSDKIncludeVersion.clear();
- WindowsSDKLibVersion.clear();
- Major = 0;
- std::sscanf(RegistrySDKVersion.c_str(), "v%d.", &Major);
- if (Major <= 7)
- return true;
- if (Major == 8) {
- // Windows SDK 8.x installs libraries in a folder whose names depend on the
- // version of the OS you're targeting. By default choose the newest, which
- // usually corresponds to the version of the OS you've installed the SDK on.
- const char *Tests[] = {"winv6.3", "win8", "win7"};
- for (const char *Test : Tests) {
- llvm::SmallString<128> TestPath(Path);
- llvm::sys::path::append(TestPath, "Lib", Test);
- if (VFS.exists(TestPath)) {
- WindowsSDKLibVersion = Test;
- break;
- }
- }
- return !WindowsSDKLibVersion.empty();
- }
- if (Major == 10) {
- if (!getWindows10SDKVersionFromPath(VFS, Path, WindowsSDKIncludeVersion))
- return false;
- WindowsSDKLibVersion = WindowsSDKIncludeVersion;
- return true;
- }
- // Unsupported SDK version
- return false;
-}
-
// Gets the library path required to link against the Windows SDK.
-bool MSVCToolChain::getWindowsSDKLibraryPath(
- const ArgList &Args, std::string &path) const {
+bool MSVCToolChain::getWindowsSDKLibraryPath(const ArgList &Args,
+ std::string &path) const {
std::string sdkPath;
int sdkMajor = 0;
std::string windowsSDKIncludeVersion;
std::string windowsSDKLibVersion;
path.clear();
- if (!getWindowsSDKDir(getVFS(), Args, sdkPath, sdkMajor,
- windowsSDKIncludeVersion, windowsSDKLibVersion))
+ if (!llvm::getWindowsSDKDir(getVFS(), WinSdkDir, WinSdkVersion, WinSysRoot,
+ sdkPath, sdkMajor, windowsSDKIncludeVersion,
+ windowsSDKLibVersion))
return false;
llvm::SmallString<128> libPath(sdkPath);
llvm::sys::path::append(libPath, "Lib");
- if (sdkMajor >= 8) {
- llvm::sys::path::append(libPath, windowsSDKLibVersion, "um",
- llvmArchToWindowsSDKArch(getArch()));
- } else {
- switch (getArch()) {
- // In Windows SDK 7.x, x86 libraries are directly in the Lib folder.
- case llvm::Triple::x86:
- break;
- case llvm::Triple::x86_64:
- llvm::sys::path::append(libPath, "x64");
- break;
- case llvm::Triple::arm:
- // It is not necessary to link against Windows SDK 7.x when targeting ARM.
- return false;
- default:
- return false;
- }
- }
-
- path = std::string(libPath.str());
- return true;
+ if (sdkMajor >= 8)
+ llvm::sys::path::append(libPath, windowsSDKLibVersion, "um");
+ return llvm::appendArchToWindowsSDKLibPath(sdkMajor, libPath, getArch(),
+ path);
}
-// Check if the Include path of a specified version of Visual Studio contains
-// specific header files. If not, they are probably shipped with Universal CRT.
bool MSVCToolChain::useUniversalCRT() const {
- llvm::SmallString<128> TestPath(
- getSubDirectoryPath(SubDirectoryType::Include));
- llvm::sys::path::append(TestPath, "stdlib.h");
- return !getVFS().exists(TestPath);
-}
-
-static bool getUniversalCRTSdkDir(llvm::vfs::FileSystem &VFS,
- const ArgList &Args, std::string &Path,
- std::string &UCRTVersion) {
- // If /winsdkdir is passed, use it as location for the UCRT too.
- // FIXME: Should there be a dedicated /ucrtdir to override /winsdkdir?
- int Major;
- if (getWindowsSDKDirViaCommandLine(VFS, Args, Path, Major, UCRTVersion))
- return true;
-
- // FIXME: Try env vars (%UniversalCRTSdkDir%, %UCRTVersion%) before going to
- // registry.
-
- // vcvarsqueryregistry.bat for Visual Studio 2015 queries the registry
- // for the specific key "KitsRoot10". So do we.
- if (!getSystemRegistryString(
- "SOFTWARE\\Microsoft\\Windows Kits\\Installed Roots", "KitsRoot10",
- Path, nullptr))
- return false;
-
- return getWindows10SDKVersionFromPath(VFS, Path, UCRTVersion);
+ return llvm::useUniversalCRT(VSLayout, VCToolChainPath, getArch(), getVFS());
}
bool MSVCToolChain::getUniversalCRTLibraryPath(const ArgList &Args,
@@ -1192,10 +549,12 @@ bool MSVCToolChain::getUniversalCRTLibraryPath(const ArgList &Args,
std::string UCRTVersion;
Path.clear();
- if (!getUniversalCRTSdkDir(getVFS(), Args, UniversalCRTSdkPath, UCRTVersion))
+ if (!llvm::getUniversalCRTSdkDir(getVFS(), WinSdkDir, WinSdkVersion,
+ WinSysRoot, UniversalCRTSdkPath,
+ UCRTVersion))
return false;
- StringRef ArchName = llvmArchToWindowsSDKArch(getArch());
+ StringRef ArchName = llvm::archToWindowsSDKArch(getArch());
if (ArchName.empty())
return false;
@@ -1313,15 +672,17 @@ void MSVCToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
// the correct include paths first.
if (!VCToolChainPath.empty()) {
addSystemInclude(DriverArgs, CC1Args,
- getSubDirectoryPath(SubDirectoryType::Include));
- addSystemInclude(DriverArgs, CC1Args,
- getSubDirectoryPath(SubDirectoryType::Include, "atlmfc"));
+ getSubDirectoryPath(llvm::SubDirectoryType::Include));
+ addSystemInclude(
+ DriverArgs, CC1Args,
+ getSubDirectoryPath(llvm::SubDirectoryType::Include, "atlmfc"));
if (useUniversalCRT()) {
std::string UniversalCRTSdkPath;
std::string UCRTVersion;
- if (getUniversalCRTSdkDir(getVFS(), DriverArgs, UniversalCRTSdkPath,
- UCRTVersion)) {
+ if (llvm::getUniversalCRTSdkDir(getVFS(), WinSdkDir, WinSdkVersion,
+ WinSysRoot, UniversalCRTSdkPath,
+ UCRTVersion)) {
AddSystemIncludeWithSubfolder(DriverArgs, CC1Args, UniversalCRTSdkPath,
"Include", UCRTVersion, "ucrt");
}
@@ -1331,8 +692,9 @@ void MSVCToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
int major = 0;
std::string windowsSDKIncludeVersion;
std::string windowsSDKLibVersion;
- if (getWindowsSDKDir(getVFS(), DriverArgs, WindowsSDKDir, major,
- windowsSDKIncludeVersion, windowsSDKLibVersion)) {
+ if (llvm::getWindowsSDKDir(getVFS(), WinSdkDir, WinSdkVersion, WinSysRoot,
+ WindowsSDKDir, major, windowsSDKIncludeVersion,
+ windowsSDKLibVersion)) {
if (major >= 8) {
// Note: windowsSDKIncludeVersion is empty for SDKs prior to v10.
// Anyway, llvm::sys::path::append is able to manage it.
@@ -1348,7 +710,7 @@ void MSVCToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
if (major >= 10) {
llvm::VersionTuple Tuple;
if (!Tuple.tryParse(windowsSDKIncludeVersion) &&
- Tuple.getSubminor().getValueOr(0) >= 17134) {
+ Tuple.getSubminor().value_or(0) >= 17134) {
AddSystemIncludeWithSubfolder(DriverArgs, CC1Args, WindowsSDKDir,
"Include", windowsSDKIncludeVersion,
"cppwinrt");
@@ -1389,7 +751,8 @@ VersionTuple MSVCToolChain::computeMSVCVersion(const Driver *D,
if (MSVT.empty())
MSVT = getTriple().getEnvironmentVersion();
if (MSVT.empty() && IsWindowsMSVC)
- MSVT = getMSVCVersionFromExe(getSubDirectoryPath(SubDirectoryType::Bin));
+ MSVT =
+ getMSVCVersionFromExe(getSubDirectoryPath(llvm::SubDirectoryType::Bin));
if (MSVT.empty() &&
Args.hasFlag(options::OPT_fms_extensions, options::OPT_fno_ms_extensions,
IsWindowsMSVC)) {
@@ -1405,8 +768,8 @@ MSVCToolChain::ComputeEffectiveClangTriple(const ArgList &Args,
// The MSVC version doesn't care about the architecture, even though it
// may look at the triple internally.
VersionTuple MSVT = computeMSVCVersion(/*D=*/nullptr, Args);
- MSVT = VersionTuple(MSVT.getMajor(), MSVT.getMinor().getValueOr(0),
- MSVT.getSubminor().getValueOr(0));
+ MSVT = VersionTuple(MSVT.getMajor(), MSVT.getMinor().value_or(0),
+ MSVT.getSubminor().value_or(0));
// For the rest of the triple, however, a computed architecture name may
// be needed.
@@ -1619,7 +982,7 @@ void MSVCToolChain::addClangTargetOptions(
Action::OffloadKind DeviceOffloadKind) const {
// MSVC STL kindly allows removing all usages of typeid by defining
// _HAS_STATIC_RTTI to 0. Do so, when compiling with -fno-rtti
- if (DriverArgs.hasArg(options::OPT_fno_rtti, options::OPT_frtti,
- /*Default=*/false))
+ if (DriverArgs.hasFlag(options::OPT_fno_rtti, options::OPT_frtti,
+ /*Default=*/false))
CC1Args.push_back("-D_HAS_STATIC_RTTI=0");
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.h
index c842773996ed..f09256ba124e 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVC.h
@@ -15,6 +15,7 @@
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Tool.h"
#include "clang/Driver/ToolChain.h"
+#include "llvm/WindowsDriver/MSVCPaths.h"
namespace clang {
namespace driver {
@@ -73,29 +74,15 @@ public:
return 4;
}
- enum class SubDirectoryType {
- Bin,
- Include,
- Lib,
- };
- std::string getSubDirectoryPath(SubDirectoryType Type,
- llvm::StringRef SubdirParent,
+ std::string getSubDirectoryPath(llvm::SubDirectoryType Type,
+ llvm::StringRef SubdirParent = "") const;
+ std::string getSubDirectoryPath(llvm::SubDirectoryType Type,
llvm::Triple::ArchType TargetArch) const;
- // Convenience overload.
- // Uses the current target arch.
- std::string getSubDirectoryPath(SubDirectoryType Type,
- llvm::StringRef SubdirParent = "") const {
- return getSubDirectoryPath(Type, SubdirParent, getArch());
+ bool getIsVS2017OrNewer() const {
+ return VSLayout == llvm::ToolsetLayout::VS2017OrNewer;
}
- enum class ToolsetLayout {
- OlderVS,
- VS2017OrNewer,
- DevDivInternal,
- };
- bool getIsVS2017OrNewer() const { return VSLayout == ToolsetLayout::VS2017OrNewer; }
-
void
AddClangSystemIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
@@ -109,6 +96,9 @@ public:
void AddHIPIncludeArgs(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args) const override;
+ void AddHIPRuntimeLibArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs) const override;
+
bool getWindowsSDKLibraryPath(
const llvm::opt::ArgList &Args, std::string &path) const;
bool getUniversalCRTLibraryPath(const llvm::opt::ArgList &Args,
@@ -142,8 +132,9 @@ protected:
Tool *buildLinker() const override;
Tool *buildAssembler() const override;
private:
+ llvm::Optional<llvm::StringRef> WinSdkDir, WinSdkVersion, WinSysRoot;
std::string VCToolChainPath;
- ToolsetLayout VSLayout = ToolsetLayout::OlderVS;
+ llvm::ToolsetLayout VSLayout = llvm::ToolsetLayout::OlderVS;
CudaInstallationDetector CudaInstallation;
RocmInstallationDetector RocmInstallation;
};
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVCSetupApi.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVCSetupApi.h
deleted file mode 100644
index 28e6e3e08e37..000000000000
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/MSVCSetupApi.h
+++ /dev/null
@@ -1,523 +0,0 @@
-// <copyright file="Program.cpp" company="Microsoft Corporation">
-// Copyright (C) Microsoft Corporation. All rights reserved.
-// Licensed under the MIT license.
-// </copyright>
-// <license>
-// The MIT License (MIT)
-//
-// Copyright (C) Microsoft Corporation. All rights reserved.
-//
-// Permission is hereby granted, free of charge, to any person obtaining
-// a copy of this software and associated documentation files (the "Software"),
-// to deal in the Software without restriction, including without limitation the
-// rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-// sell copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-// SOFTWARE.
-// </license>
-
-#pragma once
-
-#ifdef __clang__
-#pragma clang diagnostic push
-#pragma clang diagnostic ignored "-Wnon-virtual-dtor"
-#endif
-
-// Constants
-//
-#ifndef E_NOTFOUND
-#define E_NOTFOUND HRESULT_FROM_WIN32(ERROR_NOT_FOUND)
-#endif
-
-#ifndef E_FILENOTFOUND
-#define E_FILENOTFOUND HRESULT_FROM_WIN32(ERROR_FILE_NOT_FOUND)
-#endif
-
-// Enumerations
-//
-/// <summary>
-/// The state of an instance.
-/// </summary>
-enum InstanceState : unsigned {
- /// <summary>
- /// The instance state has not been determined.
- /// </summary>
- eNone = 0,
-
- /// <summary>
- /// The instance installation path exists.
- /// </summary>
- eLocal = 1,
-
- /// <summary>
- /// A product is registered to the instance.
- /// </summary>
- eRegistered = 2,
-
- /// <summary>
- /// No reboot is required for the instance.
- /// </summary>
- eNoRebootRequired = 4,
-
- /// <summary>
- /// The instance represents a complete install.
- /// </summary>
- eComplete = MAXUINT,
-};
-
-// Forward interface declarations
-//
-#ifndef __ISetupInstance_FWD_DEFINED__
-#define __ISetupInstance_FWD_DEFINED__
-typedef struct ISetupInstance ISetupInstance;
-#endif
-
-#ifndef __ISetupInstance2_FWD_DEFINED__
-#define __ISetupInstance2_FWD_DEFINED__
-typedef struct ISetupInstance2 ISetupInstance2;
-#endif
-
-#ifndef __IEnumSetupInstances_FWD_DEFINED__
-#define __IEnumSetupInstances_FWD_DEFINED__
-typedef struct IEnumSetupInstances IEnumSetupInstances;
-#endif
-
-#ifndef __ISetupConfiguration_FWD_DEFINED__
-#define __ISetupConfiguration_FWD_DEFINED__
-typedef struct ISetupConfiguration ISetupConfiguration;
-#endif
-
-#ifndef __ISetupConfiguration2_FWD_DEFINED__
-#define __ISetupConfiguration2_FWD_DEFINED__
-typedef struct ISetupConfiguration2 ISetupConfiguration2;
-#endif
-
-#ifndef __ISetupPackageReference_FWD_DEFINED__
-#define __ISetupPackageReference_FWD_DEFINED__
-typedef struct ISetupPackageReference ISetupPackageReference;
-#endif
-
-#ifndef __ISetupHelper_FWD_DEFINED__
-#define __ISetupHelper_FWD_DEFINED__
-typedef struct ISetupHelper ISetupHelper;
-#endif
-
-// Forward class declarations
-//
-#ifndef __SetupConfiguration_FWD_DEFINED__
-#define __SetupConfiguration_FWD_DEFINED__
-
-#ifdef __cplusplus
-typedef class SetupConfiguration SetupConfiguration;
-#endif
-
-#endif
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-// Interface definitions
-//
-EXTERN_C const IID IID_ISetupInstance;
-
-#if defined(__cplusplus) && !defined(CINTERFACE)
-/// <summary>
-/// Information about an instance of a product.
-/// </summary>
-struct DECLSPEC_UUID("B41463C3-8866-43B5-BC33-2B0676F7F42E")
- DECLSPEC_NOVTABLE ISetupInstance : public IUnknown {
- /// <summary>
- /// Gets the instance identifier (should match the name of the parent instance
- /// directory).
- /// </summary>
- /// <param name="pbstrInstanceId">The instance identifier.</param>
- /// <returns>Standard HRESULT indicating success or failure, including
- /// E_FILENOTFOUND if the instance state does not exist.</returns>
- STDMETHOD(GetInstanceId)(_Out_ BSTR *pbstrInstanceId) = 0;
-
- /// <summary>
- /// Gets the local date and time when the installation was originally
- /// installed.
- /// </summary>
- /// <param name="pInstallDate">The local date and time when the installation
- /// was originally installed.</param>
- /// <returns>Standard HRESULT indicating success or failure, including
- /// E_FILENOTFOUND if the instance state does not exist and E_NOTFOUND if the
- /// property is not defined.</returns>
- STDMETHOD(GetInstallDate)(_Out_ LPFILETIME pInstallDate) = 0;
-
- /// <summary>
- /// Gets the unique name of the installation, often indicating the branch and
- /// other information used for telemetry.
- /// </summary>
- /// <param name="pbstrInstallationName">The unique name of the installation,
- /// often indicating the branch and other information used for
- /// telemetry.</param>
- /// <returns>Standard HRESULT indicating success or failure, including
- /// E_FILENOTFOUND if the instance state does not exist and E_NOTFOUND if the
- /// property is not defined.</returns>
- STDMETHOD(GetInstallationName)(_Out_ BSTR *pbstrInstallationName) = 0;
-
- /// <summary>
- /// Gets the path to the installation root of the product.
- /// </summary>
- /// <param name="pbstrInstallationPath">The path to the installation root of
- /// the product.</param>
- /// <returns>Standard HRESULT indicating success or failure, including
- /// E_FILENOTFOUND if the instance state does not exist and E_NOTFOUND if the
- /// property is not defined.</returns>
- STDMETHOD(GetInstallationPath)(_Out_ BSTR *pbstrInstallationPath) = 0;
-
- /// <summary>
- /// Gets the version of the product installed in this instance.
- /// </summary>
- /// <param name="pbstrInstallationVersion">The version of the product
- /// installed in this instance.</param>
- /// <returns>Standard HRESULT indicating success or failure, including
- /// E_FILENOTFOUND if the instance state does not exist and E_NOTFOUND if the
- /// property is not defined.</returns>
- STDMETHOD(GetInstallationVersion)(_Out_ BSTR *pbstrInstallationVersion) = 0;
-
- /// <summary>
- /// Gets the display name (title) of the product installed in this instance.
- /// </summary>
- /// <param name="lcid">The LCID for the display name.</param>
- /// <param name="pbstrDisplayName">The display name (title) of the product
- /// installed in this instance.</param>
- /// <returns>Standard HRESULT indicating success or failure, including
- /// E_FILENOTFOUND if the instance state does not exist and E_NOTFOUND if the
- /// property is not defined.</returns>
- STDMETHOD(GetDisplayName)(_In_ LCID lcid, _Out_ BSTR *pbstrDisplayName) = 0;
-
- /// <summary>
- /// Gets the description of the product installed in this instance.
- /// </summary>
- /// <param name="lcid">The LCID for the description.</param>
- /// <param name="pbstrDescription">The description of the product installed in
- /// this instance.</param>
- /// <returns>Standard HRESULT indicating success or failure, including
- /// E_FILENOTFOUND if the instance state does not exist and E_NOTFOUND if the
- /// property is not defined.</returns>
- STDMETHOD(GetDescription)(_In_ LCID lcid, _Out_ BSTR *pbstrDescription) = 0;
-
- /// <summary>
- /// Resolves the optional relative path to the root path of the instance.
- /// </summary>
- /// <param name="pwszRelativePath">A relative path within the instance to
- /// resolve, or NULL to get the root path.</param>
- /// <param name="pbstrAbsolutePath">The full path to the optional relative
- /// path within the instance. If the relative path is NULL, the root path will
- /// always terminate in a backslash.</param>
- /// <returns>Standard HRESULT indicating success or failure, including
- /// E_FILENOTFOUND if the instance state does not exist and E_NOTFOUND if the
- /// property is not defined.</returns>
- STDMETHOD(ResolvePath)
- (_In_opt_z_ LPCOLESTR pwszRelativePath, _Out_ BSTR *pbstrAbsolutePath) = 0;
-};
-#endif
-
-EXTERN_C const IID IID_ISetupInstance2;
-
-#if defined(__cplusplus) && !defined(CINTERFACE)
-/// <summary>
-/// Information about an instance of a product.
-/// </summary>
-struct DECLSPEC_UUID("89143C9A-05AF-49B0-B717-72E218A2185C")
- DECLSPEC_NOVTABLE ISetupInstance2 : public ISetupInstance {
- /// <summary>
- /// Gets the state of the instance.
- /// </summary>
- /// <param name="pState">The state of the instance.</param>
- /// <returns>Standard HRESULT indicating success or failure, including
- /// E_FILENOTFOUND if the instance state does not exist.</returns>
- STDMETHOD(GetState)(_Out_ InstanceState *pState) = 0;
-
- /// <summary>
- /// Gets an array of package references registered to the instance.
- /// </summary>
- /// <param name="ppsaPackages">Pointer to an array of <see
- /// cref="ISetupPackageReference"/>.</param>
- /// <returns>Standard HRESULT indicating success or failure, including
- /// E_FILENOTFOUND if the instance state does not exist and E_NOTFOUND if the
- /// packages property is not defined.</returns>
- STDMETHOD(GetPackages)(_Out_ LPSAFEARRAY *ppsaPackages) = 0;
-
- /// <summary>
- /// Gets a pointer to the <see cref="ISetupPackageReference"/> that represents
- /// the registered product.
- /// </summary>
- /// <param name="ppPackage">Pointer to an instance of <see
- /// cref="ISetupPackageReference"/>. This may be NULL if <see
- /// cref="GetState"/> does not return <see cref="eComplete"/>.</param>
- /// <returns>Standard HRESULT indicating success or failure, including
- /// E_FILENOTFOUND if the instance state does not exist and E_NOTFOUND if the
- /// packages property is not defined.</returns>
- STDMETHOD(GetProduct)
- (_Outptr_result_maybenull_ ISetupPackageReference **ppPackage) = 0;
-
- /// <summary>
- /// Gets the relative path to the product application, if available.
- /// </summary>
- /// <param name="pbstrProductPath">The relative path to the product
- /// application, if available.</param>
- /// <returns>Standard HRESULT indicating success or failure, including
- /// E_FILENOTFOUND if the instance state does not exist.</returns>
- STDMETHOD(GetProductPath)
- (_Outptr_result_maybenull_ BSTR *pbstrProductPath) = 0;
-};
-#endif
-
-EXTERN_C const IID IID_IEnumSetupInstances;
-
-#if defined(__cplusplus) && !defined(CINTERFACE)
-/// <summary>
-/// A enumerator of installed <see cref="ISetupInstance"/> objects.
-/// </summary>
-struct DECLSPEC_UUID("6380BCFF-41D3-4B2E-8B2E-BF8A6810C848")
- DECLSPEC_NOVTABLE IEnumSetupInstances : public IUnknown {
- /// <summary>
- /// Retrieves the next set of product instances in the enumeration sequence.
- /// </summary>
- /// <param name="celt">The number of product instances to retrieve.</param>
- /// <param name="rgelt">A pointer to an array of <see
- /// cref="ISetupInstance"/>.</param>
- /// <param name="pceltFetched">A pointer to the number of product instances
- /// retrieved. If celt is 1 this parameter may be NULL.</param>
- /// <returns>S_OK if the number of elements were fetched, S_FALSE if nothing
- /// was fetched (at end of enumeration), E_INVALIDARG if celt is greater than
- /// 1 and pceltFetched is NULL, or E_OUTOFMEMORY if an <see
- /// cref="ISetupInstance"/> could not be allocated.</returns>
- STDMETHOD(Next)
- (_In_ ULONG celt, _Out_writes_to_(celt, *pceltFetched) ISetupInstance **rgelt,
- _Out_opt_ _Deref_out_range_(0, celt) ULONG *pceltFetched) = 0;
-
- /// <summary>
- /// Skips the next set of product instances in the enumeration sequence.
- /// </summary>
- /// <param name="celt">The number of product instances to skip.</param>
- /// <returns>S_OK if the number of elements could be skipped; otherwise,
- /// S_FALSE;</returns>
- STDMETHOD(Skip)(_In_ ULONG celt) = 0;
-
- /// <summary>
- /// Resets the enumeration sequence to the beginning.
- /// </summary>
- /// <returns>Always returns S_OK;</returns>
- STDMETHOD(Reset)(void) = 0;
-
- /// <summary>
- /// Creates a new enumeration object in the same state as the current
- /// enumeration object: the new object points to the same place in the
- /// enumeration sequence.
- /// </summary>
- /// <param name="ppenum">A pointer to a pointer to a new <see
- /// cref="IEnumSetupInstances"/> interface. If the method fails, this
- /// parameter is undefined.</param>
- /// <returns>S_OK if a clone was returned; otherwise, E_OUTOFMEMORY.</returns>
- STDMETHOD(Clone)(_Deref_out_opt_ IEnumSetupInstances **ppenum) = 0;
-};
-#endif
-
-EXTERN_C const IID IID_ISetupConfiguration;
-
-#if defined(__cplusplus) && !defined(CINTERFACE)
-/// <summary>
-/// Gets information about product instances set up on the machine.
-/// </summary>
-struct DECLSPEC_UUID("42843719-DB4C-46C2-8E7C-64F1816EFD5B")
- DECLSPEC_NOVTABLE ISetupConfiguration : public IUnknown {
- /// <summary>
- /// Enumerates all completed product instances installed.
- /// </summary>
- /// <param name="ppEnumInstances">An enumeration of completed, installed
- /// product instances.</param>
- /// <returns>Standard HRESULT indicating success or failure.</returns>
- STDMETHOD(EnumInstances)(_Out_ IEnumSetupInstances **ppEnumInstances) = 0;
-
- /// <summary>
- /// Gets the instance for the current process path.
- /// </summary>
- /// <param name="ppInstance">The instance for the current process
- /// path.</param>
- /// <returns>The instance for the current process path, or E_NOTFOUND if not
- /// found.</returns>
- STDMETHOD(GetInstanceForCurrentProcess)
- (_Out_ ISetupInstance **ppInstance) = 0;
-
- /// <summary>
- /// Gets the instance for the given path.
- /// </summary>
- /// <param name="ppInstance">The instance for the given path.</param>
- /// <returns>The instance for the given path, or E_NOTFOUND if not
- /// found.</returns>
- STDMETHOD(GetInstanceForPath)
- (_In_z_ LPCWSTR wzPath, _Out_ ISetupInstance **ppInstance) = 0;
-};
-#endif
-
-EXTERN_C const IID IID_ISetupConfiguration2;
-
-#if defined(__cplusplus) && !defined(CINTERFACE)
-/// <summary>
-/// Gets information about product instances.
-/// </summary>
-struct DECLSPEC_UUID("26AAB78C-4A60-49D6-AF3B-3C35BC93365D")
- DECLSPEC_NOVTABLE ISetupConfiguration2 : public ISetupConfiguration {
- /// <summary>
- /// Enumerates all product instances.
- /// </summary>
- /// <param name="ppEnumInstances">An enumeration of all product
- /// instances.</param>
- /// <returns>Standard HRESULT indicating success or failure.</returns>
- STDMETHOD(EnumAllInstances)(_Out_ IEnumSetupInstances **ppEnumInstances) = 0;
-};
-#endif
-
-EXTERN_C const IID IID_ISetupPackageReference;
-
-#if defined(__cplusplus) && !defined(CINTERFACE)
-/// <summary>
-/// A reference to a package.
-/// </summary>
-struct DECLSPEC_UUID("da8d8a16-b2b6-4487-a2f1-594ccccd6bf5")
- DECLSPEC_NOVTABLE ISetupPackageReference : public IUnknown {
- /// <summary>
- /// Gets the general package identifier.
- /// </summary>
- /// <param name="pbstrId">The general package identifier.</param>
- /// <returns>Standard HRESULT indicating success or failure.</returns>
- STDMETHOD(GetId)(_Out_ BSTR *pbstrId) = 0;
-
- /// <summary>
- /// Gets the version of the package.
- /// </summary>
- /// <param name="pbstrVersion">The version of the package.</param>
- /// <returns>Standard HRESULT indicating success or failure.</returns>
- STDMETHOD(GetVersion)(_Out_ BSTR *pbstrVersion) = 0;
-
- /// <summary>
- /// Gets the target process architecture of the package.
- /// </summary>
- /// <param name="pbstrChip">The target process architecture of the
- /// package.</param>
- /// <returns>Standard HRESULT indicating success or failure.</returns>
- STDMETHOD(GetChip)(_Out_ BSTR *pbstrChip) = 0;
-
- /// <summary>
- /// Gets the language and optional region identifier.
- /// </summary>
- /// <param name="pbstrLanguage">The language and optional region
- /// identifier.</param>
- /// <returns>Standard HRESULT indicating success or failure.</returns>
- STDMETHOD(GetLanguage)(_Out_ BSTR *pbstrLanguage) = 0;
-
- /// <summary>
- /// Gets the build branch of the package.
- /// </summary>
- /// <param name="pbstrBranch">The build branch of the package.</param>
- /// <returns>Standard HRESULT indicating success or failure.</returns>
- STDMETHOD(GetBranch)(_Out_ BSTR *pbstrBranch) = 0;
-
- /// <summary>
- /// Gets the type of the package.
- /// </summary>
- /// <param name="pbstrType">The type of the package.</param>
- /// <returns>Standard HRESULT indicating success or failure.</returns>
- STDMETHOD(GetType)(_Out_ BSTR *pbstrType) = 0;
-
- /// <summary>
- /// Gets the unique identifier consisting of all defined tokens.
- /// </summary>
- /// <param name="pbstrUniqueId">The unique identifier consisting of all
- /// defined tokens.</param>
- /// <returns>Standard HRESULT indicating success or failure, including
- /// E_UNEXPECTED if no Id was defined (required).</returns>
- STDMETHOD(GetUniqueId)(_Out_ BSTR *pbstrUniqueId) = 0;
-};
-#endif
-
-EXTERN_C const IID IID_ISetupHelper;
-
-#if defined(__cplusplus) && !defined(CINTERFACE)
-/// <summary>
-/// Helper functions.
-/// </summary>
-/// <remarks>
-/// You can query for this interface from the <see cref="SetupConfiguration"/>
-/// class.
-/// </remarks>
-struct DECLSPEC_UUID("42b21b78-6192-463e-87bf-d577838f1d5c")
- DECLSPEC_NOVTABLE ISetupHelper : public IUnknown {
- /// <summary>
- /// Parses a dotted quad version string into a 64-bit unsigned integer.
- /// </summary>
- /// <param name="pwszVersion">The dotted quad version string to parse, e.g.
- /// 1.2.3.4.</param>
- /// <param name="pullVersion">A 64-bit unsigned integer representing the
- /// version. You can compare this to other versions.</param>
- /// <returns>Standard HRESULT indicating success or failure.</returns>
- STDMETHOD(ParseVersion)
- (_In_ LPCOLESTR pwszVersion, _Out_ PULONGLONG pullVersion) = 0;
-
- /// <summary>
- /// Parses a dotted quad version string into a 64-bit unsigned integer.
- /// </summary>
- /// <param name="pwszVersionRange">The string containing 1 or 2 dotted quad
- /// version strings to parse, e.g. [1.0,) that means 1.0.0.0 or newer.</param>
- /// <param name="pullMinVersion">A 64-bit unsigned integer representing the
- /// minimum version, which may be 0. You can compare this to other
- /// versions.</param>
- /// <param name="pullMaxVersion">A 64-bit unsigned integer representing the
- /// maximum version, which may be MAXULONGLONG. You can compare this to other
- /// versions.</param>
- /// <returns>Standard HRESULT indicating success or failure.</returns>
- STDMETHOD(ParseVersionRange)
- (_In_ LPCOLESTR pwszVersionRange, _Out_ PULONGLONG pullMinVersion,
- _Out_ PULONGLONG pullMaxVersion) = 0;
-};
-#endif
-
-// Class declarations
-//
-EXTERN_C const CLSID CLSID_SetupConfiguration;
-
-#ifdef __cplusplus
-/// <summary>
-/// This class implements <see cref="ISetupConfiguration"/>, <see
-/// cref="ISetupConfiguration2"/>, and <see cref="ISetupHelper"/>.
-/// </summary>
-class DECLSPEC_UUID("177F0C4A-1CD3-4DE7-A32C-71DBBB9FA36D") SetupConfiguration;
-#endif
-
-// Function declarations
-//
-/// <summary>
-/// Gets an <see cref="ISetupConfiguration"/> that provides information about
-/// product instances installed on the machine.
-/// </summary>
-/// <param name="ppConfiguration">The <see cref="ISetupConfiguration"/> that
-/// provides information about product instances installed on the
-/// machine.</param>
-/// <param name="pReserved">Reserved for future use.</param>
-/// <returns>Standard HRESULT indicating success or failure.</returns>
-STDMETHODIMP GetSetupConfiguration(_Out_ ISetupConfiguration **ppConfiguration,
- _Reserved_ LPVOID pReserved);
-
-#ifdef __cplusplus
-}
-#endif
-
-#ifdef __clang__
-#pragma clang diagnostic pop
-#endif
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.cpp
index ceeaa79bc202..c4b4f8e9b89b 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/MinGW.cpp
@@ -218,6 +218,11 @@ void tools::MinGW::Linker::ConstructJob(Compilation &C, const JobAction &JA,
AddLinkerInputs(TC, Inputs, Args, CmdArgs, JA);
+ if (C.getDriver().IsFlangMode()) {
+ addFortranRuntimeLibraryPath(TC, Args, CmdArgs);
+ addFortranRuntimeLibs(TC, CmdArgs);
+ }
+
// TODO: Add profile stuff here
if (TC.ShouldLinkCXXStdlib(Args)) {
@@ -478,8 +483,8 @@ bool toolchains::MinGW::IsUnwindTablesDefault(const ArgList &Args) const {
if (ExceptionArg &&
ExceptionArg->getOption().matches(options::OPT_fseh_exceptions))
return true;
- return getArch() == llvm::Triple::x86_64 ||
- getArch() == llvm::Triple::aarch64;
+ return getArch() == llvm::Triple::x86_64 || getArch() == llvm::Triple::arm ||
+ getArch() == llvm::Triple::thumb || getArch() == llvm::Triple::aarch64;
}
bool toolchains::MinGW::isPICDefault() const {
@@ -495,7 +500,8 @@ bool toolchains::MinGW::isPICDefaultForced() const { return true; }
llvm::ExceptionHandling
toolchains::MinGW::GetExceptionModel(const ArgList &Args) const {
- if (getArch() == llvm::Triple::x86_64 || getArch() == llvm::Triple::aarch64)
+ if (getArch() == llvm::Triple::x86_64 || getArch() == llvm::Triple::aarch64 ||
+ getArch() == llvm::Triple::arm || getArch() == llvm::Triple::thumb)
return llvm::ExceptionHandling::WinEH;
return llvm::ExceptionHandling::DwarfCFI;
}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Minix.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Minix.cpp
index 5bceb9aba3e9..4b8670a79012 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Minix.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Minix.cpp
@@ -56,7 +56,8 @@ void tools::minix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
assert(Output.isNothing() && "Invalid output.");
}
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles,
+ options::OPT_r)) {
CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath("crt1.o")));
CmdArgs.push_back(Args.MakeArgString(getToolChain().GetFilePath("crti.o")));
CmdArgs.push_back(
@@ -71,7 +72,8 @@ void tools::minix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
getToolChain().addProfileRTLibs(Args, CmdArgs);
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs,
+ options::OPT_r)) {
if (D.CCCIsCXX()) {
if (getToolChain().ShouldLinkCXXStdlib(Args))
getToolChain().AddCXXStdlibLibArgs(Args, CmdArgs);
@@ -79,7 +81,8 @@ void tools::minix::Linker::ConstructJob(Compilation &C, const JobAction &JA,
}
}
- if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles)) {
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles,
+ options::OPT_r)) {
if (Args.hasArg(options::OPT_pthread))
CmdArgs.push_back("-lpthread");
CmdArgs.push_back("-lc");
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/OpenBSD.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/OpenBSD.cpp
index 86a10ce4b0e7..54cf3cc89caf 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/OpenBSD.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/OpenBSD.cpp
@@ -17,6 +17,7 @@
#include "clang/Driver/SanitizerArgs.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/VirtualFileSystem.h"
using namespace clang::driver;
using namespace clang::driver::tools;
@@ -113,6 +114,7 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
const toolchains::OpenBSD &ToolChain =
static_cast<const toolchains::OpenBSD &>(getToolChain());
const Driver &D = ToolChain.getDriver();
+ const llvm::Triple::ArchType Arch = ToolChain.getArch();
ArgStringList CmdArgs;
// Silence warning for "clang -g foo.o -o foo"
@@ -123,9 +125,12 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
// handled somewhere else.
Args.ClaimAllArgs(options::OPT_w);
- if (ToolChain.getArch() == llvm::Triple::mips64)
+ if (!D.SysRoot.empty())
+ CmdArgs.push_back(Args.MakeArgString("--sysroot=" + D.SysRoot));
+
+ if (Arch == llvm::Triple::mips64)
CmdArgs.push_back("-EB");
- else if (ToolChain.getArch() == llvm::Triple::mips64el)
+ else if (Arch == llvm::Triple::mips64el)
CmdArgs.push_back("-EL");
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_shared)) {
@@ -153,6 +158,9 @@ void openbsd::Linker::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_nopie) || Args.hasArg(options::OPT_pg))
CmdArgs.push_back("-nopie");
+ if (Arch == llvm::Triple::riscv64)
+ CmdArgs.push_back("-X");
+
if (Output.isFilename()) {
CmdArgs.push_back("-o");
CmdArgs.push_back(Output.getFilename());
@@ -327,12 +335,21 @@ void OpenBSD::AddCXXStdlibLibArgs(const ArgList &Args,
CmdArgs.push_back(Profiling ? "-lpthread_p" : "-lpthread");
}
-std::string OpenBSD::getCompilerRT(const ArgList &Args,
- StringRef Component,
+std::string OpenBSD::getCompilerRT(const ArgList &Args, StringRef Component,
FileType Type) const {
- SmallString<128> Path(getDriver().SysRoot);
- llvm::sys::path::append(Path, "/usr/lib/libcompiler_rt.a");
- return std::string(Path.str());
+ if (Component == "builtins") {
+ SmallString<128> Path(getDriver().SysRoot);
+ llvm::sys::path::append(Path, "/usr/lib/libcompiler_rt.a");
+ return std::string(Path.str());
+ }
+ SmallString<128> P(getDriver().ResourceDir);
+ std::string CRTBasename =
+ buildCompilerRTBasename(Args, Component, Type, /*AddArch=*/false);
+ llvm::sys::path::append(P, "lib", CRTBasename);
+ // Checks if this is the base system case which uses a different location.
+ if (getVFS().exists(P))
+ return std::string(P.str());
+ return ToolChain::getCompilerRT(Args, Component, Type);
}
Tool *OpenBSD::buildAssembler() const {
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.cpp
index bcf9147833dd..35a83d79abfd 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.cpp
@@ -7,7 +7,6 @@
//===----------------------------------------------------------------------===//
#include "PS4CPU.h"
-#include "FreeBSD.h"
#include "CommonArgs.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
@@ -23,8 +22,18 @@ using namespace clang::driver;
using namespace clang;
using namespace llvm::opt;
-void tools::PS4cpu::addProfileRTArgs(const ToolChain &TC, const ArgList &Args,
- ArgStringList &CmdArgs) {
+// Helper to paste bits of an option together and return a saved string.
+static const char *makeArgString(const ArgList &Args, const char *Prefix,
+ const char *Base, const char *Suffix) {
+ // Basically "Prefix + Base + Suffix" all converted to Twine then saved.
+ return Args.MakeArgString(Twine(StringRef(Prefix), Base) + Suffix);
+}
+
+void tools::PScpu::addProfileRTArgs(const ToolChain &TC, const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ assert(TC.getTriple().isPS());
+ auto &PSTC = static_cast<const toolchains::PS4PS5Base &>(TC);
+
if ((Args.hasFlag(options::OPT_fprofile_arcs, options::OPT_fno_profile_arcs,
false) ||
Args.hasFlag(options::OPT_fprofile_generate,
@@ -41,14 +50,16 @@ void tools::PS4cpu::addProfileRTArgs(const ToolChain &TC, const ArgList &Args,
options::OPT_fno_profile_generate, false) ||
Args.hasArg(options::OPT_fcreate_profile) ||
Args.hasArg(options::OPT_coverage)))
- CmdArgs.push_back("--dependent-lib=libclang_rt.profile-x86_64.a");
+ CmdArgs.push_back(makeArgString(
+ Args, "--dependent-lib=", PSTC.getProfileRTLibName(), ""));
}
-void tools::PS4cpu::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
+void tools::PScpu::Assembler::ConstructJob(Compilation &C, const JobAction &JA,
const InputInfo &Output,
const InputInfoList &Inputs,
const ArgList &Args,
const char *LinkingOutput) const {
+ auto &TC = static_cast<const toolchains::PS4PS5Base &>(getToolChain());
claimNoWarnArgs(Args);
ArgStringList CmdArgs;
@@ -62,41 +73,57 @@ void tools::PS4cpu::Assemble::ConstructJob(Compilation &C, const JobAction &JA,
assert(Input.isFilename() && "Invalid input.");
CmdArgs.push_back(Input.getFilename());
- const char *Exec =
- Args.MakeArgString(getToolChain().GetProgramPath("orbis-as"));
+ std::string AsName = TC.qualifyPSCmdName("as");
+ const char *Exec = Args.MakeArgString(TC.GetProgramPath(AsName.c_str()));
C.addCommand(std::make_unique<Command>(JA, *this,
ResponseFileSupport::AtFileUTF8(),
Exec, CmdArgs, Inputs, Output));
}
-static void AddPS4SanitizerArgs(const ToolChain &TC, const ArgList &Args,
- ArgStringList &CmdArgs) {
- const SanitizerArgs &SanArgs = TC.getSanitizerArgs(Args);
- if (SanArgs.needsUbsanRt()) {
- CmdArgs.push_back("-lSceDbgUBSanitizer_stub_weak");
- }
- if (SanArgs.needsAsanRt()) {
- CmdArgs.push_back("-lSceDbgAddressSanitizer_stub_weak");
- }
+void tools::PScpu::addSanitizerArgs(const ToolChain &TC, const ArgList &Args,
+ ArgStringList &CmdArgs) {
+ assert(TC.getTriple().isPS());
+ auto &PSTC = static_cast<const toolchains::PS4PS5Base &>(TC);
+ PSTC.addSanitizerArgs(Args, CmdArgs, "--dependent-lib=lib", ".a");
+}
+
+void toolchains::PS4CPU::addSanitizerArgs(const ArgList &Args,
+ ArgStringList &CmdArgs,
+ const char *Prefix,
+ const char *Suffix) const {
+ auto arg = [&](const char *Name) -> const char * {
+ return makeArgString(Args, Prefix, Name, Suffix);
+ };
+ const SanitizerArgs &SanArgs = getSanitizerArgs(Args);
+ if (SanArgs.needsUbsanRt())
+ CmdArgs.push_back(arg("SceDbgUBSanitizer_stub_weak"));
+ if (SanArgs.needsAsanRt())
+ CmdArgs.push_back(arg("SceDbgAddressSanitizer_stub_weak"));
}
-void tools::PS4cpu::addSanitizerArgs(const ToolChain &TC, const ArgList &Args,
- ArgStringList &CmdArgs) {
- const SanitizerArgs &SanArgs = TC.getSanitizerArgs(Args);
+void toolchains::PS5CPU::addSanitizerArgs(const ArgList &Args,
+ ArgStringList &CmdArgs,
+ const char *Prefix,
+ const char *Suffix) const {
+ auto arg = [&](const char *Name) -> const char * {
+ return makeArgString(Args, Prefix, Name, Suffix);
+ };
+ const SanitizerArgs &SanArgs = getSanitizerArgs(Args);
if (SanArgs.needsUbsanRt())
- CmdArgs.push_back("--dependent-lib=libSceDbgUBSanitizer_stub_weak.a");
+ CmdArgs.push_back(arg("SceUBSanitizer_nosubmission_stub_weak"));
if (SanArgs.needsAsanRt())
- CmdArgs.push_back("--dependent-lib=libSceDbgAddressSanitizer_stub_weak.a");
+ CmdArgs.push_back(arg("SceAddressSanitizer_nosubmission_stub_weak"));
+ if (SanArgs.needsTsanRt())
+ CmdArgs.push_back(arg("SceThreadSanitizer_nosubmission_stub_weak"));
}
-void tools::PS4cpu::Link::ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output,
- const InputInfoList &Inputs,
- const ArgList &Args,
- const char *LinkingOutput) const {
- const toolchains::FreeBSD &ToolChain =
- static_cast<const toolchains::FreeBSD &>(getToolChain());
- const Driver &D = ToolChain.getDriver();
+void tools::PScpu::Linker::ConstructJob(Compilation &C, const JobAction &JA,
+ const InputInfo &Output,
+ const InputInfoList &Inputs,
+ const ArgList &Args,
+ const char *LinkingOutput) const {
+ auto &TC = static_cast<const toolchains::PS4PS5Base &>(getToolChain());
+ const Driver &D = TC.getDriver();
ArgStringList CmdArgs;
// Silence warning for "clang -g foo.o -o foo"
@@ -116,7 +143,7 @@ void tools::PS4cpu::Link::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_rdynamic))
CmdArgs.push_back("-export-dynamic");
if (Args.hasArg(options::OPT_shared))
- CmdArgs.push_back("--oformat=so");
+ CmdArgs.push_back("--shared");
if (Output.isFilename()) {
CmdArgs.push_back("-o");
@@ -125,8 +152,8 @@ void tools::PS4cpu::Link::ConstructJob(Compilation &C, const JobAction &JA,
assert(Output.isNothing() && "Invalid output.");
}
- if(!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs))
- AddPS4SanitizerArgs(ToolChain, Args, CmdArgs);
+ if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nodefaultlibs))
+ TC.addSanitizerArgs(Args, CmdArgs, "-l", "");
Args.AddAllArgs(CmdArgs, options::OPT_L);
Args.AddAllArgs(CmdArgs, options::OPT_T_Group);
@@ -138,7 +165,7 @@ void tools::PS4cpu::Link::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_Z_Xlinker__no_demangle))
CmdArgs.push_back("--no-demangle");
- AddLinkerInputs(ToolChain, Inputs, Args, CmdArgs, JA);
+ AddLinkerInputs(TC, Inputs, Args, CmdArgs, JA);
if (Args.hasArg(options::OPT_pthread)) {
CmdArgs.push_back("-lpthread");
@@ -146,89 +173,92 @@ void tools::PS4cpu::Link::ConstructJob(Compilation &C, const JobAction &JA,
if (Args.hasArg(options::OPT_fuse_ld_EQ)) {
D.Diag(diag::err_drv_unsupported_opt_for_target)
- << "-fuse-ld" << getToolChain().getTriple().str();
+ << "-fuse-ld" << TC.getTriple().str();
}
- const char *Exec =
- Args.MakeArgString(ToolChain.GetProgramPath("orbis-ld"));
+ std::string LdName = TC.qualifyPSCmdName(TC.getLinkerBaseName());
+ const char *Exec = Args.MakeArgString(TC.GetProgramPath(LdName.c_str()));
C.addCommand(std::make_unique<Command>(JA, *this,
ResponseFileSupport::AtFileUTF8(),
Exec, CmdArgs, Inputs, Output));
}
-toolchains::PS4CPU::PS4CPU(const Driver &D, const llvm::Triple &Triple,
- const ArgList &Args)
+toolchains::PS4PS5Base::PS4PS5Base(const Driver &D, const llvm::Triple &Triple,
+ const ArgList &Args, StringRef Platform,
+ const char *EnvVar)
: Generic_ELF(D, Triple, Args) {
if (Args.hasArg(clang::driver::options::OPT_static))
- D.Diag(clang::diag::err_drv_unsupported_opt_for_target) << "-static"
- << "PS4";
+ D.Diag(clang::diag::err_drv_unsupported_opt_for_target)
+ << "-static" << Platform;
- // Determine where to find the PS4 libraries. We use SCE_ORBIS_SDK_DIR
+ // Determine where to find the PS4/PS5 libraries. We use the EnvVar
// if it exists; otherwise use the driver's installation path, which
// should be <SDK_DIR>/host_tools/bin.
- SmallString<512> PS4SDKDir;
- if (const char *EnvValue = getenv("SCE_ORBIS_SDK_DIR")) {
+ SmallString<512> SDKDir;
+ if (const char *EnvValue = getenv(EnvVar)) {
if (!llvm::sys::fs::exists(EnvValue))
- getDriver().Diag(clang::diag::warn_drv_ps4_sdk_dir) << EnvValue;
- PS4SDKDir = EnvValue;
+ D.Diag(clang::diag::warn_drv_ps_sdk_dir) << EnvVar << EnvValue;
+ SDKDir = EnvValue;
} else {
- PS4SDKDir = getDriver().Dir;
- llvm::sys::path::append(PS4SDKDir, "/../../");
+ SDKDir = D.Dir;
+ llvm::sys::path::append(SDKDir, "/../../");
}
- // By default, the driver won't report a warning if it can't find
- // PS4's include or lib directories. This behavior could be changed if
+ // By default, the driver won't report a warning if it can't find the
+ // SDK include or lib directories. This behavior could be changed if
// -Weverything or -Winvalid-or-nonexistent-directory options are passed.
// If -isysroot was passed, use that as the SDK base path.
std::string PrefixDir;
if (const Arg *A = Args.getLastArg(options::OPT_isysroot)) {
PrefixDir = A->getValue();
if (!llvm::sys::fs::exists(PrefixDir))
- getDriver().Diag(clang::diag::warn_missing_sysroot) << PrefixDir;
+ D.Diag(clang::diag::warn_missing_sysroot) << PrefixDir;
} else
- PrefixDir = std::string(PS4SDKDir.str());
+ PrefixDir = std::string(SDKDir.str());
- SmallString<512> PS4SDKIncludeDir(PrefixDir);
- llvm::sys::path::append(PS4SDKIncludeDir, "target/include");
+ SmallString<512> SDKIncludeDir(PrefixDir);
+ llvm::sys::path::append(SDKIncludeDir, "target/include");
if (!Args.hasArg(options::OPT_nostdinc) &&
!Args.hasArg(options::OPT_nostdlibinc) &&
!Args.hasArg(options::OPT_isysroot) &&
!Args.hasArg(options::OPT__sysroot_EQ) &&
- !llvm::sys::fs::exists(PS4SDKIncludeDir)) {
- getDriver().Diag(clang::diag::warn_drv_unable_to_find_directory_expected)
- << "PS4 system headers" << PS4SDKIncludeDir;
+ !llvm::sys::fs::exists(SDKIncludeDir)) {
+ D.Diag(clang::diag::warn_drv_unable_to_find_directory_expected)
+ << Twine(Platform, " system headers").str() << SDKIncludeDir;
}
- SmallString<512> PS4SDKLibDir(PS4SDKDir);
- llvm::sys::path::append(PS4SDKLibDir, "target/lib");
+ SmallString<512> SDKLibDir(SDKDir);
+ llvm::sys::path::append(SDKLibDir, "target/lib");
if (!Args.hasArg(options::OPT_nostdlib) &&
!Args.hasArg(options::OPT_nodefaultlibs) &&
!Args.hasArg(options::OPT__sysroot_EQ) && !Args.hasArg(options::OPT_E) &&
!Args.hasArg(options::OPT_c) && !Args.hasArg(options::OPT_S) &&
!Args.hasArg(options::OPT_emit_ast) &&
- !llvm::sys::fs::exists(PS4SDKLibDir)) {
- getDriver().Diag(clang::diag::warn_drv_unable_to_find_directory_expected)
- << "PS4 system libraries" << PS4SDKLibDir;
+ !llvm::sys::fs::exists(SDKLibDir)) {
+ D.Diag(clang::diag::warn_drv_unable_to_find_directory_expected)
+ << Twine(Platform, " system libraries").str() << SDKLibDir;
return;
}
- getFilePaths().push_back(std::string(PS4SDKLibDir.str()));
+ getFilePaths().push_back(std::string(SDKLibDir.str()));
}
Tool *toolchains::PS4CPU::buildAssembler() const {
- return new tools::PS4cpu::Assemble(*this);
+ return new tools::PScpu::Assembler(*this);
}
-Tool *toolchains::PS4CPU::buildLinker() const {
- return new tools::PS4cpu::Link(*this);
+Tool *toolchains::PS5CPU::buildAssembler() const {
+ // PS5 does not support an external assembler.
+ getDriver().Diag(clang::diag::err_no_external_assembler);
+ return nullptr;
}
-bool toolchains::PS4CPU::isPICDefault() const { return true; }
-
-bool toolchains::PS4CPU::HasNativeLLVMSupport() const { return true; }
+Tool *toolchains::PS4PS5Base::buildLinker() const {
+ return new tools::PScpu::Linker(*this);
+}
-SanitizerMask toolchains::PS4CPU::getSupportedSanitizers() const {
+SanitizerMask toolchains::PS4PS5Base::getSupportedSanitizers() const {
SanitizerMask Res = ToolChain::getSupportedSanitizers();
Res |= SanitizerKind::Address;
Res |= SanitizerKind::PointerCompare;
@@ -237,10 +267,16 @@ SanitizerMask toolchains::PS4CPU::getSupportedSanitizers() const {
return Res;
}
-void toolchains::PS4CPU::addClangTargetOptions(
+SanitizerMask toolchains::PS5CPU::getSupportedSanitizers() const {
+ SanitizerMask Res = PS4PS5Base::getSupportedSanitizers();
+ Res |= SanitizerKind::Thread;
+ return Res;
+}
+
+void toolchains::PS4PS5Base::addClangTargetOptions(
const ArgList &DriverArgs, ArgStringList &CC1Args,
Action::OffloadKind DeviceOffloadingKind) const {
- // PS4 does not use init arrays.
+ // PS4/PS5 do not use init arrays.
if (DriverArgs.hasArg(options::OPT_fuse_init_array)) {
Arg *A = DriverArgs.getLastArg(options::OPT_fuse_init_array);
getDriver().Diag(clang::diag::err_drv_unsupported_opt_for_target)
@@ -281,3 +317,13 @@ void toolchains::PS4CPU::addClangTargetOptions(
CC1Args.push_back("-fvisibility-externs-nodllstorageclass=default");
}
}
+
+// PS4 toolchain.
+toolchains::PS4CPU::PS4CPU(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args)
+ : PS4PS5Base(D, Triple, Args, "PS4", "SCE_ORBIS_SDK_DIR") {}
+
+// PS5 toolchain.
+toolchains::PS5CPU::PS5CPU(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args)
+ : PS4PS5Base(D, Triple, Args, "PS5", "SCE_PROSPERO_SDK_DIR") {}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.h
index 4bedabaf267c..954e7d8d8d68 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/PS4CPU.h
@@ -18,7 +18,8 @@ namespace clang {
namespace driver {
namespace tools {
-namespace PS4cpu {
+namespace PScpu {
+// Functions/classes in this namespace support both PS4 and PS5.
void addProfileRTArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs);
@@ -26,75 +27,69 @@ void addProfileRTArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
void addSanitizerArgs(const ToolChain &TC, const llvm::opt::ArgList &Args,
llvm::opt::ArgStringList &CmdArgs);
-class LLVM_LIBRARY_VISIBILITY Assemble : public Tool {
+class LLVM_LIBRARY_VISIBILITY Assembler : public Tool {
public:
- Assemble(const ToolChain &TC) : Tool("PS4cpu::Assemble", "assembler", TC) {}
+ Assembler(const ToolChain &TC) : Tool("PScpu::Assembler", "assembler", TC) {}
bool hasIntegratedCPP() const override { return false; }
void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output,
- const InputInfoList &Inputs,
+ const InputInfo &Output, const InputInfoList &Inputs,
const llvm::opt::ArgList &TCArgs,
const char *LinkingOutput) const override;
};
-class LLVM_LIBRARY_VISIBILITY Link : public Tool {
+class LLVM_LIBRARY_VISIBILITY Linker : public Tool {
public:
- Link(const ToolChain &TC) : Tool("PS4cpu::Link", "linker", TC) {}
+ Linker(const ToolChain &TC) : Tool("PScpu::Linker", "linker", TC) {}
bool hasIntegratedCPP() const override { return false; }
bool isLinkJob() const override { return true; }
void ConstructJob(Compilation &C, const JobAction &JA,
- const InputInfo &Output,
- const InputInfoList &Inputs,
+ const InputInfo &Output, const InputInfoList &Inputs,
const llvm::opt::ArgList &TCArgs,
const char *LinkingOutput) const override;
};
-} // end namespace PS4cpu
+} // namespace PScpu
} // namespace tools
namespace toolchains {
-class LLVM_LIBRARY_VISIBILITY PS4CPU : public Generic_ELF {
+// Common Toolchain base class for PS4 and PS5.
+class LLVM_LIBRARY_VISIBILITY PS4PS5Base : public Generic_ELF {
public:
- PS4CPU(const Driver &D, const llvm::Triple &Triple,
- const llvm::opt::ArgList &Args);
+ PS4PS5Base(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args, StringRef Platform,
+ const char *EnvVar);
// No support for finding a C++ standard library yet.
- void addLibCxxIncludePaths(
- const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args) const override {}
- void addLibStdCxxIncludePaths(
- const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args) const override {}
+ void addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override {
+ }
+ void
+ addLibStdCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const override {}
bool IsMathErrnoDefault() const override { return false; }
bool IsObjCNonFragileABIDefault() const override { return true; }
- bool HasNativeLLVMSupport() const override;
- bool isPICDefault() const override;
+ bool HasNativeLLVMSupport() const override { return true; }
+ bool isPICDefault() const override { return true; }
LangOptions::StackProtectorMode
GetDefaultStackProtectorLevel(bool KernelOrKext) const override {
return LangOptions::SSPStrong;
}
- unsigned GetDefaultDwarfVersion() const override { return 4; }
llvm::DebuggerKind getDefaultDebuggerTuning() const override {
return llvm::DebuggerKind::SCE;
}
SanitizerMask getSupportedSanitizers() const override;
- // PS4 toolchain uses legacy thin LTO API, which is not
- // capable of unit splitting.
- bool canSplitThinLTOUnit() const override { return false; }
-
void addClangTargetOptions(
- const llvm::opt::ArgList &DriverArgs,
- llvm::opt::ArgStringList &CC1Args,
- Action::OffloadKind DeviceOffloadingKind) const override;
+ const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args,
+ Action::OffloadKind DeviceOffloadingKind) const override;
llvm::DenormalMode getDefaultDenormalModeForType(
const llvm::opt::ArgList &DriverArgs, const JobAction &JA,
@@ -105,11 +100,71 @@ public:
bool useRelaxRelocations() const override { return true; }
+ // Helper methods for PS4/PS5.
+ virtual const char *getLinkerBaseName() const = 0;
+ virtual std::string qualifyPSCmdName(StringRef CmdName) const = 0;
+ virtual void addSanitizerArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs,
+ const char *Prefix,
+ const char *Suffix) const = 0;
+ virtual const char *getProfileRTLibName() const = 0;
+
protected:
- Tool *buildAssembler() const override;
Tool *buildLinker() const override;
};
+// PS4-specific Toolchain class.
+class LLVM_LIBRARY_VISIBILITY PS4CPU : public PS4PS5Base {
+public:
+ PS4CPU(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+
+ unsigned GetDefaultDwarfVersion() const override { return 4; }
+
+ // PS4 toolchain uses legacy thin LTO API, which is not
+ // capable of unit splitting.
+ bool canSplitThinLTOUnit() const override { return false; }
+
+ const char *getLinkerBaseName() const override { return "ld"; }
+ std::string qualifyPSCmdName(StringRef CmdName) const override {
+ return Twine("orbis-", CmdName).str();
+ }
+ void addSanitizerArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs, const char *Prefix,
+ const char *Suffix) const override;
+ const char *getProfileRTLibName() const override {
+ return "libclang_rt.profile-x86_64.a";
+ }
+
+protected:
+ Tool *buildAssembler() const override;
+};
+
+// PS5-specific Toolchain class.
+class LLVM_LIBRARY_VISIBILITY PS5CPU : public PS4PS5Base {
+public:
+ PS5CPU(const Driver &D, const llvm::Triple &Triple,
+ const llvm::opt::ArgList &Args);
+
+ unsigned GetDefaultDwarfVersion() const override { return 5; }
+
+ SanitizerMask getSupportedSanitizers() const override;
+
+ const char *getLinkerBaseName() const override { return "lld"; }
+ std::string qualifyPSCmdName(StringRef CmdName) const override {
+ return Twine("prospero-", CmdName).str();
+ }
+ void addSanitizerArgs(const llvm::opt::ArgList &Args,
+ llvm::opt::ArgStringList &CmdArgs, const char *Prefix,
+ const char *Suffix) const override;
+ const char *getProfileRTLibName() const override {
+ return "libclang_rt.profile-x86_64_nosubmission.a";
+ }
+
+protected:
+ Tool *buildAssembler() const override;
+};
+
} // end namespace toolchains
} // end namespace driver
} // end namespace clang
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.cpp
index 714325a2db39..a048765bc6d3 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.cpp
@@ -98,6 +98,12 @@ void RISCVToolChain::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
if (DriverArgs.hasArg(options::OPT_nostdinc))
return;
+ if (!DriverArgs.hasArg(options::OPT_nobuiltininc)) {
+ SmallString<128> Dir(getDriver().ResourceDir);
+ llvm::sys::path::append(Dir, "include");
+ addSystemInclude(DriverArgs, CC1Args, Dir.str());
+ }
+
if (!DriverArgs.hasArg(options::OPT_nostdlibinc)) {
SmallString<128> Dir(computeSysRoot());
llvm::sys::path::append(Dir, "include");
@@ -157,6 +163,7 @@ void RISCV::Linker::ConstructJob(Compilation &C, const JobAction &JA,
} else {
CmdArgs.push_back("elf32lriscv");
}
+ CmdArgs.push_back("-X");
std::string Linker = getToolChain().GetLinkerPath();
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.h
index 62099bee0404..46b94bdb54e0 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/RISCVToolchain.h
@@ -22,7 +22,6 @@ public:
const llvm::opt::ArgList &Args);
static bool hasGCCToolchain(const Driver &D, const llvm::opt::ArgList &Args);
- bool IsIntegratedAssemblerDefault() const override { return true; }
void addClangTargetOptions(const llvm::opt::ArgList &DriverArgs,
llvm::opt::ArgStringList &CC1Args,
Action::OffloadKind) const override;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/ROCm.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/ROCm.h
index bb482be68260..33baaa887043 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/ROCm.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/ROCm.h
@@ -22,6 +22,26 @@
namespace clang {
namespace driver {
+/// ABI version of device library.
+struct DeviceLibABIVersion {
+ unsigned ABIVersion = 0;
+ DeviceLibABIVersion(unsigned V) : ABIVersion(V) {}
+ static DeviceLibABIVersion fromCodeObjectVersion(unsigned CodeObjectVersion) {
+ if (CodeObjectVersion < 4)
+ CodeObjectVersion = 4;
+ return DeviceLibABIVersion(CodeObjectVersion * 100);
+ }
+ /// Whether ABI version bc file is requested.
+ /// ABIVersion is code object version multiplied by 100. Code object v4
+ /// and below works with ROCm 5.0 and below which does not have
+ /// abi_version_*.bc. Code object v5 requires abi_version_500.bc.
+ bool requiresLibrary() { return ABIVersion >= 500; }
+ std::string toString() {
+ assert(ABIVersion % 100 == 0 && "Not supported");
+ return Twine(ABIVersion / 100).str();
+ }
+};
+
/// A class to find a viable ROCM installation
/// TODO: Generalize to handle libclc.
class RocmInstallationDetector {
@@ -107,6 +127,10 @@ private:
ConditionalLibrary DenormalsAreZero;
ConditionalLibrary CorrectlyRoundedSqrt;
+ // Maps ABI version to library path. The version number is in the format of
+ // three digits as used in the ABI version library name.
+ std::map<unsigned, std::string> ABIVersionMap;
+
// Cache ROCm installation search paths.
SmallVector<Candidate, 4> ROCmSearchDirs;
bool PrintROCmSearchDirs;
@@ -142,7 +166,12 @@ public:
getCommonBitcodeLibs(const llvm::opt::ArgList &DriverArgs,
StringRef LibDeviceFile, bool Wave64, bool DAZ,
bool FiniteOnly, bool UnsafeMathOpt,
- bool FastRelaxedMath, bool CorrectSqrt) const;
+ bool FastRelaxedMath, bool CorrectSqrt,
+ DeviceLibABIVersion ABIVer, bool isOpenMP) const;
+ /// Check file paths of default bitcode libraries common to AMDGPU based
+ /// toolchains. \returns false if there are invalid or missing files.
+ bool checkCommonBitcodeLibs(StringRef GPUArch, StringRef LibDeviceFile,
+ DeviceLibABIVersion ABIVer) const;
/// Check whether we detected a valid HIP runtime.
bool hasHIPRuntime() const { return HasHIPRuntime; }
@@ -214,6 +243,13 @@ public:
return CorrectlyRoundedSqrt.get(Enabled);
}
+ StringRef getABIVersionPath(DeviceLibABIVersion ABIVer) const {
+ auto Loc = ABIVersionMap.find(ABIVer.ABIVersion);
+ if (Loc == ABIVersionMap.end())
+ return StringRef();
+ return Loc->second;
+ }
+
/// Get libdevice file for given architecture
std::string getLibDeviceFile(StringRef Gpu) const {
return LibDeviceMap.lookup(Gpu);
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.h
index a16ae3ca51fa..bb2904f76128 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/SPIRV.h
@@ -64,8 +64,9 @@ public:
: ToolChain(D, Triple, Args) {}
bool useIntegratedAs() const override { return true; }
- bool useIntegratedBackend() const override { return false; }
+ bool IsIntegratedBackendDefault() const override { return false; }
+ bool IsNonIntegratedBackendSupported() const override { return true; }
bool IsMathErrnoDefault() const override { return false; }
bool isCrossCompiling() const override { return true; }
bool isPICDefault() const override { return false; }
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/Solaris.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/Solaris.cpp
index cb3898575d3a..c75375ac95f7 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/Solaris.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/Solaris.cpp
@@ -14,6 +14,8 @@
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
+#include "clang/Driver/SanitizerArgs.h"
+#include "clang/Driver/ToolChain.h"
#include "llvm/Option/ArgList.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
@@ -147,8 +149,18 @@ void solaris::Linker::ConstructJob(Compilation &C, const JobAction &JA,
CmdArgs.push_back("-lgcc");
CmdArgs.push_back("-lm");
}
- if (NeedsSanitizerDeps)
+ if (NeedsSanitizerDeps) {
linkSanitizerRuntimeDeps(getToolChain(), CmdArgs);
+
+ // Work around Solaris/amd64 ld bug when calling __tls_get_addr directly.
+ // However, ld -z relax=transtls is available since Solaris 11.2, but not
+ // in Illumos.
+ const SanitizerArgs &SA = getToolChain().getSanitizerArgs(Args);
+ if (getToolChain().getTriple().getArch() == llvm::Triple::x86_64 &&
+ (SA.needsAsanRt() || SA.needsStatsRt() ||
+ (SA.needsUbsanRt() && !SA.requiresMinimalRuntime())))
+ CmdArgs.push_back("-zrelax=transtls");
+ }
}
if (!Args.hasArg(options::OPT_nostdlib, options::OPT_nostartfiles,
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/VEToolchain.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/VEToolchain.h
index 964b0d0dd8d4..8b9ccaa7fada 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/VEToolchain.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/VEToolchain.h
@@ -26,7 +26,6 @@ protected:
Tool *buildLinker() const override;
public:
- bool IsIntegratedAssemblerDefault() const override { return true; }
bool isPICDefault() const override;
bool isPIEDefault(const llvm::opt::ArgList &Args) const override;
bool isPICDefaultForced() const override;
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.cpp b/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.cpp
index 3614272a5f74..c5e4d569793c 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.cpp
@@ -8,15 +8,17 @@
#include "WebAssembly.h"
#include "CommonArgs.h"
+#include "Gnu.h"
#include "clang/Basic/Version.h"
#include "clang/Config/config.h"
#include "clang/Driver/Compilation.h"
#include "clang/Driver/Driver.h"
#include "clang/Driver/DriverDiagnostic.h"
#include "clang/Driver/Options.h"
+#include "llvm/Option/ArgList.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/Path.h"
-#include "llvm/Option/ArgList.h"
+#include "llvm/Support/VirtualFileSystem.h"
using namespace clang::driver;
using namespace clang::driver::tools;
@@ -371,7 +373,11 @@ ToolChain::CXXStdlibType
WebAssembly::GetCXXStdlibType(const ArgList &Args) const {
if (Arg *A = Args.getLastArg(options::OPT_stdlib_EQ)) {
StringRef Value = A->getValue();
- if (Value != "libc++")
+ if (Value == "libc++")
+ return ToolChain::CST_Libcxx;
+ else if (Value == "libstdc++")
+ return ToolChain::CST_Libstdcxx;
+ else
getDriver().Diag(diag::err_drv_invalid_stdlib_name)
<< A->getAsString(Args);
}
@@ -417,17 +423,18 @@ void WebAssembly::AddClangSystemIncludeArgs(const ArgList &DriverArgs,
void WebAssembly::AddClangCXXStdlibIncludeArgs(const ArgList &DriverArgs,
ArgStringList &CC1Args) const {
- if (!DriverArgs.hasArg(options::OPT_nostdlibinc) &&
- !DriverArgs.hasArg(options::OPT_nostdincxx)) {
- if (getTriple().getOS() != llvm::Triple::UnknownOS) {
- const std::string MultiarchTriple =
- getMultiarchTriple(getDriver(), getTriple(), getDriver().SysRoot);
- addSystemInclude(DriverArgs, CC1Args,
- getDriver().SysRoot + "/include/" + MultiarchTriple +
- "/c++/v1");
- }
- addSystemInclude(DriverArgs, CC1Args,
- getDriver().SysRoot + "/include/c++/v1");
+
+ if (DriverArgs.hasArg(options::OPT_nostdlibinc) ||
+ DriverArgs.hasArg(options::OPT_nostdincxx))
+ return;
+
+ switch (GetCXXStdlibType(DriverArgs)) {
+ case ToolChain::CST_Libcxx:
+ addLibCxxIncludePaths(DriverArgs, CC1Args);
+ break;
+ case ToolChain::CST_Libstdcxx:
+ addLibStdCXXIncludePaths(DriverArgs, CC1Args);
+ break;
}
}
@@ -440,7 +447,8 @@ void WebAssembly::AddCXXStdlibLibArgs(const llvm::opt::ArgList &Args,
CmdArgs.push_back("-lc++abi");
break;
case ToolChain::CST_Libstdcxx:
- llvm_unreachable("invalid stdlib name");
+ CmdArgs.push_back("-lstdc++");
+ break;
}
}
@@ -455,3 +463,77 @@ SanitizerMask WebAssembly::getSupportedSanitizers() const {
Tool *WebAssembly::buildLinker() const {
return new tools::wasm::Linker(*this);
}
+
+void WebAssembly::addLibCxxIncludePaths(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+ const Driver &D = getDriver();
+ std::string SysRoot = computeSysRoot();
+ std::string LibPath = SysRoot + "/include";
+ const std::string MultiarchTriple =
+ getMultiarchTriple(D, getTriple(), SysRoot);
+ bool IsKnownOs = (getTriple().getOS() != llvm::Triple::UnknownOS);
+
+ std::string Version = detectLibcxxVersion(LibPath);
+ if (Version.empty())
+ return;
+
+ // First add the per-target include path if the OS is known.
+ if (IsKnownOs) {
+ std::string TargetDir = LibPath + "/" + MultiarchTriple + "/c++/" + Version;
+ addSystemInclude(DriverArgs, CC1Args, TargetDir);
+ }
+
+ // Second add the generic one.
+ addSystemInclude(DriverArgs, CC1Args, LibPath + "/c++/" + Version);
+}
+
+void WebAssembly::addLibStdCXXIncludePaths(
+ const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const {
+ // We cannot use GCCInstallationDetector here as the sysroot usually does
+ // not contain a full GCC installation.
+ // Instead, we search the given sysroot for /usr/include/xx, similar
+ // to how we do it for libc++.
+ const Driver &D = getDriver();
+ std::string SysRoot = computeSysRoot();
+ std::string LibPath = SysRoot + "/include";
+ const std::string MultiarchTriple =
+ getMultiarchTriple(D, getTriple(), SysRoot);
+ bool IsKnownOs = (getTriple().getOS() != llvm::Triple::UnknownOS);
+
+ // This is similar to detectLibcxxVersion()
+ std::string Version;
+ {
+ std::error_code EC;
+ Generic_GCC::GCCVersion MaxVersion =
+ Generic_GCC::GCCVersion::Parse("0.0.0");
+ SmallString<128> Path(LibPath);
+ llvm::sys::path::append(Path, "c++");
+ for (llvm::vfs::directory_iterator LI = getVFS().dir_begin(Path, EC), LE;
+ !EC && LI != LE; LI = LI.increment(EC)) {
+ StringRef VersionText = llvm::sys::path::filename(LI->path());
+ if (VersionText[0] != 'v') {
+ auto Version = Generic_GCC::GCCVersion::Parse(VersionText);
+ if (Version > MaxVersion)
+ MaxVersion = Version;
+ }
+ }
+ if (MaxVersion.Major > 0)
+ Version = MaxVersion.Text;
+ }
+
+ if (Version.empty())
+ return;
+
+ // First add the per-target include path if the OS is known.
+ if (IsKnownOs) {
+ std::string TargetDir = LibPath + "/c++/" + Version + "/" + MultiarchTriple;
+ addSystemInclude(DriverArgs, CC1Args, TargetDir);
+ }
+
+ // Second add the generic one.
+ addSystemInclude(DriverArgs, CC1Args, LibPath + "/c++/" + Version);
+ // Third the backward one.
+ addSystemInclude(DriverArgs, CC1Args, LibPath + "/c++/" + Version + "/backward");
+}
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.h
index b4c3082a089a..5b9b8a0fe4e6 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/WebAssembly.h
@@ -70,11 +70,20 @@ private:
const char *getDefaultLinker() const override { return "wasm-ld"; }
+ CXXStdlibType GetDefaultCXXStdlibType() const override {
+ return ToolChain::CST_Libcxx;
+ }
+
Tool *buildLinker() const override;
std::string getMultiarchTriple(const Driver &D,
const llvm::Triple &TargetTriple,
StringRef SysRoot) const override;
+
+ void addLibCxxIncludePaths(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const;
+ void addLibStdCXXIncludePaths(const llvm::opt::ArgList &DriverArgs,
+ llvm::opt::ArgStringList &CC1Args) const;
};
} // end namespace toolchains
diff --git a/contrib/llvm-project/clang/lib/Driver/ToolChains/ZOS.h b/contrib/llvm-project/clang/lib/Driver/ToolChains/ZOS.h
index 50bff0993561..53138306fd41 100644
--- a/contrib/llvm-project/clang/lib/Driver/ToolChains/ZOS.h
+++ b/contrib/llvm-project/clang/lib/Driver/ToolChains/ZOS.h
@@ -30,6 +30,8 @@ public:
bool IsIntegratedAssemblerDefault() const override { return true; }
+ unsigned GetDefaultDwarfVersion() const override { return 4; }
+
void addClangTargetOptions(
const llvm::opt::ArgList &DriverArgs, llvm::opt::ArgStringList &CC1Args,
Action::OffloadKind DeviceOffloadingKind) const override;
diff --git a/contrib/llvm-project/clang/lib/Driver/Types.cpp b/contrib/llvm-project/clang/lib/Driver/Types.cpp
index 8f6adc6c2ad1..66da6fe97059 100644
--- a/contrib/llvm-project/clang/lib/Driver/Types.cpp
+++ b/contrib/llvm-project/clang/lib/Driver/Types.cpp
@@ -65,9 +65,16 @@ static bool isPreprocessedModuleType(ID Id) {
return Id == TY_CXXModule || Id == TY_PP_CXXModule;
}
+static bool isPreprocessedHeaderUnitType(ID Id) {
+ return Id == TY_CXXSHeader || Id == TY_CXXUHeader || Id == TY_CXXHUHeader ||
+ Id == TY_PP_CXXHeaderUnit;
+}
+
types::ID types::getPrecompiledType(ID Id) {
if (isPreprocessedModuleType(Id))
return TY_ModuleFile;
+ if (isPreprocessedHeaderUnitType(Id))
+ return TY_HeaderUnit;
if (onlyPrecompileType(Id))
return TY_PCH;
return TY_INVALID;
@@ -139,6 +146,10 @@ bool types::isAcceptedByClang(ID Id) {
case TY_CLHeader:
case TY_ObjCHeader: case TY_PP_ObjCHeader:
case TY_CXXHeader: case TY_PP_CXXHeader:
+ case TY_CXXSHeader:
+ case TY_CXXUHeader:
+ case TY_CXXHUHeader:
+ case TY_PP_CXXHeaderUnit:
case TY_ObjCXXHeader: case TY_PP_ObjCXXHeader:
case TY_CXXModule: case TY_PP_CXXModule:
case TY_AST: case TY_ModuleFile: case TY_PCH:
@@ -148,6 +159,20 @@ bool types::isAcceptedByClang(ID Id) {
}
}
+bool types::isAcceptedByFlang(ID Id) {
+ switch (Id) {
+ default:
+ return false;
+
+ case TY_Fortran:
+ case TY_PP_Fortran:
+ return true;
+ case TY_LLVM_IR:
+ case TY_LLVM_BC:
+ return true;
+ }
+}
+
bool types::isDerivedFromC(ID Id) {
switch (Id) {
default:
@@ -210,6 +235,10 @@ bool types::isCXX(ID Id) {
case TY_CXX: case TY_PP_CXX:
case TY_ObjCXX: case TY_PP_ObjCXX: case TY_PP_ObjCXX_Alias:
case TY_CXXHeader: case TY_PP_CXXHeader:
+ case TY_CXXSHeader:
+ case TY_CXXUHeader:
+ case TY_CXXHUHeader:
+ case TY_PP_CXXHeaderUnit:
case TY_ObjCXXHeader: case TY_PP_ObjCXXHeader:
case TY_CXXModule: case TY_PP_CXXModule:
case TY_CUDA: case TY_PP_CUDA: case TY_CUDA_DEVICE:
@@ -257,16 +286,6 @@ bool types::isHIP(ID Id) {
}
}
-bool types::isFortran(ID Id) {
- switch (Id) {
- default:
- return false;
-
- case TY_Fortran: case TY_PP_Fortran:
- return true;
- }
-}
-
bool types::isSrcFile(ID Id) {
return Id != TY_Object && getPreprocessedType(Id) != TY_INVALID;
}
@@ -323,6 +342,7 @@ types::ID types::lookupTypeForExtension(llvm::StringRef Ext) {
.Case("hpp", TY_CXXHeader)
.Case("hxx", TY_CXXHeader)
.Case("iim", TY_PP_CXXModule)
+ .Case("iih", TY_PP_CXXHeaderUnit)
.Case("lib", TY_Object)
.Case("mii", TY_PP_ObjCXX)
.Case("obj", TY_Object)
@@ -332,6 +352,7 @@ types::ID types::lookupTypeForExtension(llvm::StringRef Ext) {
.Case("c++m", TY_CXXModule)
.Case("cppm", TY_CXXModule)
.Case("cxxm", TY_CXXModule)
+ .Case("hlsl", TY_HLSL)
.Default(TY_INVALID);
}
diff --git a/contrib/llvm-project/clang/lib/Edit/RewriteObjCFoundationAPI.cpp b/contrib/llvm-project/clang/lib/Edit/RewriteObjCFoundationAPI.cpp
index 589bf8d216ed..1ca041f3ed6d 100644
--- a/contrib/llvm-project/clang/lib/Edit/RewriteObjCFoundationAPI.cpp
+++ b/contrib/llvm-project/clang/lib/Edit/RewriteObjCFoundationAPI.cpp
@@ -725,11 +725,11 @@ static bool getLiteralInfo(SourceRange literalRange,
break;
}
- if (!UpperU.hasValue() && !UpperL.hasValue())
+ if (!UpperU && !UpperL)
UpperU = UpperL = true;
- else if (UpperU.hasValue() && !UpperL.hasValue())
+ else if (UpperU && !UpperL)
UpperL = UpperU;
- else if (UpperL.hasValue() && !UpperU.hasValue())
+ else if (UpperL && !UpperU)
UpperU = UpperL;
Info.U = *UpperU ? "U" : "u";
diff --git a/contrib/llvm-project/clang/lib/ExtractAPI/API.cpp b/contrib/llvm-project/clang/lib/ExtractAPI/API.cpp
new file mode 100644
index 000000000000..073e36f320d3
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/ExtractAPI/API.cpp
@@ -0,0 +1,233 @@
+//===- ExtractAPI/API.cpp ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements the APIRecord and derived record structs,
+/// and the APISet class.
+///
+//===----------------------------------------------------------------------===//
+
+#include "clang/ExtractAPI/API.h"
+#include "clang/AST/CommentCommandTraits.h"
+#include "clang/AST/CommentLexer.h"
+#include "clang/AST/RawCommentList.h"
+#include "clang/Index/USRGeneration.h"
+#include "llvm/ADT/STLFunctionalExtras.h"
+#include "llvm/ADT/StringRef.h"
+#include <memory>
+
+using namespace clang::extractapi;
+using namespace llvm;
+
+namespace {
+
+template <typename RecordTy, typename... CtorArgsTy>
+RecordTy *addTopLevelRecord(APISet::RecordMap<RecordTy> &RecordMap,
+ StringRef USR, CtorArgsTy &&...CtorArgs) {
+ auto Result = RecordMap.insert({USR, nullptr});
+
+ // Create the record if it does not already exist
+ if (Result.second)
+ Result.first->second =
+ std::make_unique<RecordTy>(USR, std::forward<CtorArgsTy>(CtorArgs)...);
+
+ return Result.first->second.get();
+}
+
+} // namespace
+
+GlobalVariableRecord *
+APISet::addGlobalVar(StringRef Name, StringRef USR, PresumedLoc Loc,
+ const AvailabilityInfo &Availability, LinkageInfo Linkage,
+ const DocComment &Comment, DeclarationFragments Fragments,
+ DeclarationFragments SubHeading) {
+ return addTopLevelRecord(GlobalVariables, USR, Name, Loc, Availability,
+ Linkage, Comment, Fragments, SubHeading);
+}
+
+GlobalFunctionRecord *APISet::addGlobalFunction(
+ StringRef Name, StringRef USR, PresumedLoc Loc,
+ const AvailabilityInfo &Availability, LinkageInfo Linkage,
+ const DocComment &Comment, DeclarationFragments Fragments,
+ DeclarationFragments SubHeading, FunctionSignature Signature) {
+ return addTopLevelRecord(GlobalFunctions, USR, Name, Loc, Availability,
+ Linkage, Comment, Fragments, SubHeading, Signature);
+}
+
+EnumConstantRecord *APISet::addEnumConstant(
+ EnumRecord *Enum, StringRef Name, StringRef USR, PresumedLoc Loc,
+ const AvailabilityInfo &Availability, const DocComment &Comment,
+ DeclarationFragments Declaration, DeclarationFragments SubHeading) {
+ auto Record = std::make_unique<EnumConstantRecord>(
+ USR, Name, Loc, Availability, Comment, Declaration, SubHeading);
+ return Enum->Constants.emplace_back(std::move(Record)).get();
+}
+
+EnumRecord *APISet::addEnum(StringRef Name, StringRef USR, PresumedLoc Loc,
+ const AvailabilityInfo &Availability,
+ const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading) {
+ return addTopLevelRecord(Enums, USR, Name, Loc, Availability, Comment,
+ Declaration, SubHeading);
+}
+
+StructFieldRecord *APISet::addStructField(StructRecord *Struct, StringRef Name,
+ StringRef USR, PresumedLoc Loc,
+ const AvailabilityInfo &Availability,
+ const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading) {
+ auto Record = std::make_unique<StructFieldRecord>(
+ USR, Name, Loc, Availability, Comment, Declaration, SubHeading);
+ return Struct->Fields.emplace_back(std::move(Record)).get();
+}
+
+StructRecord *APISet::addStruct(StringRef Name, StringRef USR, PresumedLoc Loc,
+ const AvailabilityInfo &Availability,
+ const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading) {
+ return addTopLevelRecord(Structs, USR, Name, Loc, Availability, Comment,
+ Declaration, SubHeading);
+}
+
+ObjCCategoryRecord *APISet::addObjCCategory(
+ StringRef Name, StringRef USR, PresumedLoc Loc,
+ const AvailabilityInfo &Availability, const DocComment &Comment,
+ DeclarationFragments Declaration, DeclarationFragments SubHeading,
+ SymbolReference Interface) {
+ // Create the category record.
+ auto *Record = addTopLevelRecord(ObjCCategories, USR, Name, Loc, Availability,
+ Comment, Declaration, SubHeading, Interface);
+
+ // If this category is extending a known interface, associate it with the
+ // ObjCInterfaceRecord.
+ auto It = ObjCInterfaces.find(Interface.USR);
+ if (It != ObjCInterfaces.end())
+ It->second->Categories.push_back(Record);
+
+ return Record;
+}
+
+ObjCInterfaceRecord *APISet::addObjCInterface(
+ StringRef Name, StringRef USR, PresumedLoc Loc,
+ const AvailabilityInfo &Availability, LinkageInfo Linkage,
+ const DocComment &Comment, DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, SymbolReference SuperClass) {
+ return addTopLevelRecord(ObjCInterfaces, USR, Name, Loc, Availability,
+ Linkage, Comment, Declaration, SubHeading,
+ SuperClass);
+}
+
+ObjCMethodRecord *APISet::addObjCMethod(
+ ObjCContainerRecord *Container, StringRef Name, StringRef USR,
+ PresumedLoc Loc, const AvailabilityInfo &Availability,
+ const DocComment &Comment, DeclarationFragments Declaration,
+ DeclarationFragments SubHeading, FunctionSignature Signature,
+ bool IsInstanceMethod) {
+ auto Record = std::make_unique<ObjCMethodRecord>(
+ USR, Name, Loc, Availability, Comment, Declaration, SubHeading, Signature,
+ IsInstanceMethod);
+ return Container->Methods.emplace_back(std::move(Record)).get();
+}
+
+ObjCPropertyRecord *APISet::addObjCProperty(
+ ObjCContainerRecord *Container, StringRef Name, StringRef USR,
+ PresumedLoc Loc, const AvailabilityInfo &Availability,
+ const DocComment &Comment, DeclarationFragments Declaration,
+ DeclarationFragments SubHeading,
+ ObjCPropertyRecord::AttributeKind Attributes, StringRef GetterName,
+ StringRef SetterName, bool IsOptional) {
+ auto Record = std::make_unique<ObjCPropertyRecord>(
+ USR, Name, Loc, Availability, Comment, Declaration, SubHeading,
+ Attributes, GetterName, SetterName, IsOptional);
+ return Container->Properties.emplace_back(std::move(Record)).get();
+}
+
+ObjCInstanceVariableRecord *APISet::addObjCInstanceVariable(
+ ObjCContainerRecord *Container, StringRef Name, StringRef USR,
+ PresumedLoc Loc, const AvailabilityInfo &Availability,
+ const DocComment &Comment, DeclarationFragments Declaration,
+ DeclarationFragments SubHeading,
+ ObjCInstanceVariableRecord::AccessControl Access) {
+ auto Record = std::make_unique<ObjCInstanceVariableRecord>(
+ USR, Name, Loc, Availability, Comment, Declaration, SubHeading, Access);
+ return Container->Ivars.emplace_back(std::move(Record)).get();
+}
+
+ObjCProtocolRecord *APISet::addObjCProtocol(
+ StringRef Name, StringRef USR, PresumedLoc Loc,
+ const AvailabilityInfo &Availability, const DocComment &Comment,
+ DeclarationFragments Declaration, DeclarationFragments SubHeading) {
+ return addTopLevelRecord(ObjCProtocols, USR, Name, Loc, Availability, Comment,
+ Declaration, SubHeading);
+}
+
+MacroDefinitionRecord *
+APISet::addMacroDefinition(StringRef Name, StringRef USR, PresumedLoc Loc,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading) {
+ return addTopLevelRecord(Macros, USR, Name, Loc, Declaration, SubHeading);
+}
+
+TypedefRecord *APISet::addTypedef(StringRef Name, StringRef USR,
+ PresumedLoc Loc,
+ const AvailabilityInfo &Availability,
+ const DocComment &Comment,
+ DeclarationFragments Declaration,
+ DeclarationFragments SubHeading,
+ SymbolReference UnderlyingType) {
+ return addTopLevelRecord(Typedefs, USR, Name, Loc, Availability, Comment,
+ Declaration, SubHeading, UnderlyingType);
+}
+
+StringRef APISet::recordUSR(const Decl *D) {
+ SmallString<128> USR;
+ index::generateUSRForDecl(D, USR);
+ return copyString(USR);
+}
+
+StringRef APISet::recordUSRForMacro(StringRef Name, SourceLocation SL,
+ const SourceManager &SM) {
+ SmallString<128> USR;
+ index::generateUSRForMacro(Name, SL, SM, USR);
+ return copyString(USR);
+}
+
+StringRef APISet::copyString(StringRef String) {
+ if (String.empty())
+ return {};
+
+ // No need to allocate memory and copy if the string has already been stored.
+ if (StringAllocator.identifyObject(String.data()))
+ return String;
+
+ void *Ptr = StringAllocator.Allocate(String.size(), 1);
+ memcpy(Ptr, String.data(), String.size());
+ return StringRef(reinterpret_cast<const char *>(Ptr), String.size());
+}
+
+APIRecord::~APIRecord() {}
+
+ObjCContainerRecord::~ObjCContainerRecord() {}
+
+void GlobalFunctionRecord::anchor() {}
+void GlobalVariableRecord::anchor() {}
+void EnumConstantRecord::anchor() {}
+void EnumRecord::anchor() {}
+void StructFieldRecord::anchor() {}
+void StructRecord::anchor() {}
+void ObjCPropertyRecord::anchor() {}
+void ObjCInstanceVariableRecord::anchor() {}
+void ObjCMethodRecord::anchor() {}
+void ObjCCategoryRecord::anchor() {}
+void ObjCInterfaceRecord::anchor() {}
+void ObjCProtocolRecord::anchor() {}
+void MacroDefinitionRecord::anchor() {}
+void TypedefRecord::anchor() {}
diff --git a/contrib/llvm-project/clang/lib/ExtractAPI/DeclarationFragments.cpp b/contrib/llvm-project/clang/lib/ExtractAPI/DeclarationFragments.cpp
new file mode 100644
index 000000000000..75d360a3ba16
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/ExtractAPI/DeclarationFragments.cpp
@@ -0,0 +1,799 @@
+//===- ExtractAPI/DeclarationFragments.cpp ----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements Declaration Fragments related classes.
+///
+//===----------------------------------------------------------------------===//
+
+#include "clang/ExtractAPI/DeclarationFragments.h"
+#include "TypedefUnderlyingTypeResolver.h"
+#include "clang/Index/USRGeneration.h"
+#include "llvm/ADT/StringSwitch.h"
+
+using namespace clang::extractapi;
+using namespace llvm;
+
+DeclarationFragments &DeclarationFragments::appendSpace() {
+ if (!Fragments.empty()) {
+ Fragment &Last = Fragments.back();
+ if (Last.Kind == FragmentKind::Text) {
+ // Merge the extra space into the last fragment if the last fragment is
+ // also text.
+ if (Last.Spelling.back() != ' ') { // avoid extra trailing spaces.
+ Last.Spelling.push_back(' ');
+ }
+ } else {
+ append(" ", FragmentKind::Text);
+ }
+ }
+
+ return *this;
+}
+
+StringRef DeclarationFragments::getFragmentKindString(
+ DeclarationFragments::FragmentKind Kind) {
+ switch (Kind) {
+ case DeclarationFragments::FragmentKind::None:
+ return "none";
+ case DeclarationFragments::FragmentKind::Keyword:
+ return "keyword";
+ case DeclarationFragments::FragmentKind::Attribute:
+ return "attribute";
+ case DeclarationFragments::FragmentKind::NumberLiteral:
+ return "number";
+ case DeclarationFragments::FragmentKind::StringLiteral:
+ return "string";
+ case DeclarationFragments::FragmentKind::Identifier:
+ return "identifier";
+ case DeclarationFragments::FragmentKind::TypeIdentifier:
+ return "typeIdentifier";
+ case DeclarationFragments::FragmentKind::GenericParameter:
+ return "genericParameter";
+ case DeclarationFragments::FragmentKind::ExternalParam:
+ return "externalParam";
+ case DeclarationFragments::FragmentKind::InternalParam:
+ return "internalParam";
+ case DeclarationFragments::FragmentKind::Text:
+ return "text";
+ }
+
+ llvm_unreachable("Unhandled FragmentKind");
+}
+
+DeclarationFragments::FragmentKind
+DeclarationFragments::parseFragmentKindFromString(StringRef S) {
+ return llvm::StringSwitch<FragmentKind>(S)
+ .Case("keyword", DeclarationFragments::FragmentKind::Keyword)
+ .Case("attribute", DeclarationFragments::FragmentKind::Attribute)
+ .Case("number", DeclarationFragments::FragmentKind::NumberLiteral)
+ .Case("string", DeclarationFragments::FragmentKind::StringLiteral)
+ .Case("identifier", DeclarationFragments::FragmentKind::Identifier)
+ .Case("typeIdentifier",
+ DeclarationFragments::FragmentKind::TypeIdentifier)
+ .Case("genericParameter",
+ DeclarationFragments::FragmentKind::GenericParameter)
+ .Case("internalParam", DeclarationFragments::FragmentKind::InternalParam)
+ .Case("externalParam", DeclarationFragments::FragmentKind::ExternalParam)
+ .Case("text", DeclarationFragments::FragmentKind::Text)
+ .Default(DeclarationFragments::FragmentKind::None);
+}
+
+// NNS stores C++ nested name specifiers, which are prefixes to qualified names.
+// Build declaration fragments for NNS recursively so that we have the USR for
+// every part in a qualified name, and also leaves the actual underlying type
+// cleaner for its own fragment.
+DeclarationFragments
+DeclarationFragmentsBuilder::getFragmentsForNNS(const NestedNameSpecifier *NNS,
+ ASTContext &Context,
+ DeclarationFragments &After) {
+ DeclarationFragments Fragments;
+ if (NNS->getPrefix())
+ Fragments.append(getFragmentsForNNS(NNS->getPrefix(), Context, After));
+
+ switch (NNS->getKind()) {
+ case NestedNameSpecifier::Identifier:
+ Fragments.append(NNS->getAsIdentifier()->getName(),
+ DeclarationFragments::FragmentKind::Identifier);
+ break;
+
+ case NestedNameSpecifier::Namespace: {
+ const NamespaceDecl *NS = NNS->getAsNamespace();
+ if (NS->isAnonymousNamespace())
+ return Fragments;
+ SmallString<128> USR;
+ index::generateUSRForDecl(NS, USR);
+ Fragments.append(NS->getName(),
+ DeclarationFragments::FragmentKind::Identifier, USR);
+ break;
+ }
+
+ case NestedNameSpecifier::NamespaceAlias: {
+ const NamespaceAliasDecl *Alias = NNS->getAsNamespaceAlias();
+ SmallString<128> USR;
+ index::generateUSRForDecl(Alias, USR);
+ Fragments.append(Alias->getName(),
+ DeclarationFragments::FragmentKind::Identifier, USR);
+ break;
+ }
+
+ case NestedNameSpecifier::Global:
+ // The global specifier `::` at the beginning. No stored value.
+ break;
+
+ case NestedNameSpecifier::Super:
+ // Microsoft's `__super` specifier.
+ Fragments.append("__super", DeclarationFragments::FragmentKind::Keyword);
+ break;
+
+ case NestedNameSpecifier::TypeSpecWithTemplate:
+ // A type prefixed by the `template` keyword.
+ Fragments.append("template", DeclarationFragments::FragmentKind::Keyword);
+ Fragments.appendSpace();
+ // Fallthrough after adding the keyword to handle the actual type.
+ LLVM_FALLTHROUGH;
+
+ case NestedNameSpecifier::TypeSpec: {
+ const Type *T = NNS->getAsType();
+ // FIXME: Handle C++ template specialization type
+ Fragments.append(getFragmentsForType(T, Context, After));
+ break;
+ }
+ }
+
+ // Add the separator text `::` for this segment.
+ return Fragments.append("::", DeclarationFragments::FragmentKind::Text);
+}
+
+// Recursively build the declaration fragments for an underlying `Type` with
+// qualifiers removed.
+DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForType(
+ const Type *T, ASTContext &Context, DeclarationFragments &After) {
+ assert(T && "invalid type");
+
+ DeclarationFragments Fragments;
+
+ // Declaration fragments of a pointer type is the declaration fragments of
+ // the pointee type followed by a `*`, except for Objective-C `id` and `Class`
+ // pointers, where we do not spell out the `*`.
+ if (T->isPointerType() ||
+ (T->isObjCObjectPointerType() &&
+ !T->getAs<ObjCObjectPointerType>()->isObjCIdOrClassType())) {
+ return Fragments
+ .append(getFragmentsForType(T->getPointeeType(), Context, After))
+ .append(" *", DeclarationFragments::FragmentKind::Text);
+ }
+
+ // Declaration fragments of a lvalue reference type is the declaration
+ // fragments of the underlying type followed by a `&`.
+ if (const LValueReferenceType *LRT = dyn_cast<LValueReferenceType>(T))
+ return Fragments
+ .append(
+ getFragmentsForType(LRT->getPointeeTypeAsWritten(), Context, After))
+ .append(" &", DeclarationFragments::FragmentKind::Text);
+
+ // Declaration fragments of a rvalue reference type is the declaration
+ // fragments of the underlying type followed by a `&&`.
+ if (const RValueReferenceType *RRT = dyn_cast<RValueReferenceType>(T))
+ return Fragments
+ .append(
+ getFragmentsForType(RRT->getPointeeTypeAsWritten(), Context, After))
+ .append(" &&", DeclarationFragments::FragmentKind::Text);
+
+ // Declaration fragments of an array-typed variable have two parts:
+ // 1. the element type of the array that appears before the variable name;
+ // 2. array brackets `[(0-9)?]` that appear after the variable name.
+ if (const ArrayType *AT = T->getAsArrayTypeUnsafe()) {
+ // Build the "after" part first because the inner element type might also
+ // be an array-type. For example `int matrix[3][4]` which has a type of
+ // "(array 3 of (array 4 of ints))."
+ // Push the array size part first to make sure they are in the right order.
+ After.append("[", DeclarationFragments::FragmentKind::Text);
+
+ switch (AT->getSizeModifier()) {
+ case ArrayType::Normal:
+ break;
+ case ArrayType::Static:
+ Fragments.append("static", DeclarationFragments::FragmentKind::Keyword);
+ break;
+ case ArrayType::Star:
+ Fragments.append("*", DeclarationFragments::FragmentKind::Text);
+ break;
+ }
+
+ if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT)) {
+ // FIXME: right now this would evaluate any expressions/macros written in
+ // the original source to concrete values. For example
+ // `int nums[MAX]` -> `int nums[100]`
+ // `char *str[5 + 1]` -> `char *str[6]`
+ SmallString<128> Size;
+ CAT->getSize().toStringUnsigned(Size);
+ After.append(Size, DeclarationFragments::FragmentKind::NumberLiteral);
+ }
+
+ After.append("]", DeclarationFragments::FragmentKind::Text);
+
+ return Fragments.append(
+ getFragmentsForType(AT->getElementType(), Context, After));
+ }
+
+ // An ElaboratedType is a sugar for types that are referred to using an
+ // elaborated keyword, e.g., `struct S`, `enum E`, or (in C++) via a
+ // qualified name, e.g., `N::M::type`, or both.
+ if (const ElaboratedType *ET = dyn_cast<ElaboratedType>(T)) {
+ ElaboratedTypeKeyword Keyword = ET->getKeyword();
+ if (Keyword != ETK_None) {
+ Fragments
+ .append(ElaboratedType::getKeywordName(Keyword),
+ DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace();
+ }
+
+ if (const NestedNameSpecifier *NNS = ET->getQualifier())
+ Fragments.append(getFragmentsForNNS(NNS, Context, After));
+
+ // After handling the elaborated keyword or qualified name, build
+ // declaration fragments for the desugared underlying type.
+ return Fragments.append(getFragmentsForType(ET->desugar(), Context, After));
+ }
+
+ // Everything we care about has been handled now, reduce to the canonical
+ // unqualified base type.
+ QualType Base = T->getCanonicalTypeUnqualified();
+
+ // Render Objective-C `id`/`instancetype` as keywords.
+ if (T->isObjCIdType())
+ return Fragments.append(Base.getAsString(),
+ DeclarationFragments::FragmentKind::Keyword);
+
+ // If the type is a typedefed type, get the underlying TypedefNameDecl for a
+ // direct reference to the typedef instead of the wrapped type.
+ if (const TypedefType *TypedefTy = dyn_cast<TypedefType>(T)) {
+ const TypedefNameDecl *Decl = TypedefTy->getDecl();
+ std::string USR =
+ TypedefUnderlyingTypeResolver(Context).getUSRForType(QualType(T, 0));
+ return Fragments.append(Decl->getName(),
+ DeclarationFragments::FragmentKind::TypeIdentifier,
+ USR);
+ }
+
+ // If the base type is a TagType (struct/interface/union/class/enum), let's
+ // get the underlying Decl for better names and USRs.
+ if (const TagType *TagTy = dyn_cast<TagType>(Base)) {
+ const TagDecl *Decl = TagTy->getDecl();
+ // Anonymous decl, skip this fragment.
+ if (Decl->getName().empty())
+ return Fragments;
+ SmallString<128> TagUSR;
+ clang::index::generateUSRForDecl(Decl, TagUSR);
+ return Fragments.append(Decl->getName(),
+ DeclarationFragments::FragmentKind::TypeIdentifier,
+ TagUSR);
+ }
+
+ // If the base type is an ObjCInterfaceType, use the underlying
+ // ObjCInterfaceDecl for the true USR.
+ if (const auto *ObjCIT = dyn_cast<ObjCInterfaceType>(Base)) {
+ const auto *Decl = ObjCIT->getDecl();
+ SmallString<128> USR;
+ index::generateUSRForDecl(Decl, USR);
+ return Fragments.append(Decl->getName(),
+ DeclarationFragments::FragmentKind::TypeIdentifier,
+ USR);
+ }
+
+ // Default fragment builder for other kinds of types (BuiltinType etc.)
+ SmallString<128> USR;
+ clang::index::generateUSRForType(Base, Context, USR);
+ Fragments.append(Base.getAsString(),
+ DeclarationFragments::FragmentKind::TypeIdentifier, USR);
+
+ return Fragments;
+}
+
+DeclarationFragments
+DeclarationFragmentsBuilder::getFragmentsForQualifiers(const Qualifiers Quals) {
+ DeclarationFragments Fragments;
+ if (Quals.hasConst())
+ Fragments.append("const", DeclarationFragments::FragmentKind::Keyword);
+ if (Quals.hasVolatile())
+ Fragments.append("volatile", DeclarationFragments::FragmentKind::Keyword);
+ if (Quals.hasRestrict())
+ Fragments.append("restrict", DeclarationFragments::FragmentKind::Keyword);
+
+ return Fragments;
+}
+
+DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForType(
+ const QualType QT, ASTContext &Context, DeclarationFragments &After) {
+ assert(!QT.isNull() && "invalid type");
+
+ if (const ParenType *PT = dyn_cast<ParenType>(QT)) {
+ After.append(")", DeclarationFragments::FragmentKind::Text);
+ return getFragmentsForType(PT->getInnerType(), Context, After)
+ .append("(", DeclarationFragments::FragmentKind::Text);
+ }
+
+ const SplitQualType SQT = QT.split();
+ DeclarationFragments QualsFragments = getFragmentsForQualifiers(SQT.Quals),
+ TypeFragments =
+ getFragmentsForType(SQT.Ty, Context, After);
+ if (QualsFragments.getFragments().empty())
+ return TypeFragments;
+
+ // Use east qualifier for pointer types
+ // For example:
+ // ```
+ // int * const
+ // ^---- ^----
+ // type qualifier
+ // ^-----------------
+ // const pointer to int
+ // ```
+ // should not be reconstructed as
+ // ```
+ // const int *
+ // ^---- ^--
+ // qualifier type
+ // ^---------------- ^
+ // pointer to const int
+ // ```
+ if (SQT.Ty->isAnyPointerType())
+ return TypeFragments.appendSpace().append(std::move(QualsFragments));
+
+ return QualsFragments.appendSpace().append(std::move(TypeFragments));
+}
+
+DeclarationFragments
+DeclarationFragmentsBuilder::getFragmentsForVar(const VarDecl *Var) {
+ DeclarationFragments Fragments;
+ StorageClass SC = Var->getStorageClass();
+ if (SC != SC_None)
+ Fragments
+ .append(VarDecl::getStorageClassSpecifierString(SC),
+ DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace();
+ QualType T =
+ Var->getTypeSourceInfo()
+ ? Var->getTypeSourceInfo()->getType()
+ : Var->getASTContext().getUnqualifiedObjCPointerType(Var->getType());
+
+ // Capture potential fragments that needs to be placed after the variable name
+ // ```
+ // int nums[5];
+ // char (*ptr_to_array)[6];
+ // ```
+ DeclarationFragments After;
+ return Fragments.append(getFragmentsForType(T, Var->getASTContext(), After))
+ .appendSpace()
+ .append(Var->getName(), DeclarationFragments::FragmentKind::Identifier)
+ .append(std::move(After));
+}
+
+DeclarationFragments
+DeclarationFragmentsBuilder::getFragmentsForParam(const ParmVarDecl *Param) {
+ DeclarationFragments Fragments, After;
+
+ QualType T = Param->getTypeSourceInfo()
+ ? Param->getTypeSourceInfo()->getType()
+ : Param->getASTContext().getUnqualifiedObjCPointerType(
+ Param->getType());
+
+ DeclarationFragments TypeFragments =
+ getFragmentsForType(T, Param->getASTContext(), After);
+
+ if (Param->isObjCMethodParameter())
+ Fragments.append("(", DeclarationFragments::FragmentKind::Text)
+ .append(std::move(TypeFragments))
+ .append(") ", DeclarationFragments::FragmentKind::Text);
+ else
+ Fragments.append(std::move(TypeFragments)).appendSpace();
+
+ return Fragments
+ .append(Param->getName(),
+ DeclarationFragments::FragmentKind::InternalParam)
+ .append(std::move(After));
+}
+
+DeclarationFragments
+DeclarationFragmentsBuilder::getFragmentsForFunction(const FunctionDecl *Func) {
+ DeclarationFragments Fragments;
+ // FIXME: Handle template specialization
+ switch (Func->getStorageClass()) {
+ case SC_None:
+ case SC_PrivateExtern:
+ break;
+ case SC_Extern:
+ Fragments.append("extern", DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace();
+ break;
+ case SC_Static:
+ Fragments.append("static", DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace();
+ break;
+ case SC_Auto:
+ case SC_Register:
+ llvm_unreachable("invalid for functions");
+ }
+ // FIXME: Handle C++ function specifiers: constexpr, consteval, explicit, etc.
+
+ // FIXME: Is `after` actually needed here?
+ DeclarationFragments After;
+ Fragments
+ .append(getFragmentsForType(Func->getReturnType(), Func->getASTContext(),
+ After))
+ .appendSpace()
+ .append(Func->getName(), DeclarationFragments::FragmentKind::Identifier)
+ .append(std::move(After));
+
+ Fragments.append("(", DeclarationFragments::FragmentKind::Text);
+ for (unsigned i = 0, end = Func->getNumParams(); i != end; ++i) {
+ if (i)
+ Fragments.append(", ", DeclarationFragments::FragmentKind::Text);
+ Fragments.append(getFragmentsForParam(Func->getParamDecl(i)));
+ }
+ Fragments.append(")", DeclarationFragments::FragmentKind::Text);
+
+ // FIXME: Handle exception specifiers: throw, noexcept
+ return Fragments;
+}
+
+DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForEnumConstant(
+ const EnumConstantDecl *EnumConstDecl) {
+ DeclarationFragments Fragments;
+ return Fragments.append(EnumConstDecl->getName(),
+ DeclarationFragments::FragmentKind::Identifier);
+}
+
+DeclarationFragments
+DeclarationFragmentsBuilder::getFragmentsForEnum(const EnumDecl *EnumDecl) {
+ if (const auto *TypedefNameDecl = EnumDecl->getTypedefNameForAnonDecl())
+ return getFragmentsForTypedef(TypedefNameDecl);
+
+ DeclarationFragments Fragments, After;
+ Fragments.append("enum", DeclarationFragments::FragmentKind::Keyword);
+
+ if (!EnumDecl->getName().empty())
+ Fragments.appendSpace().append(
+ EnumDecl->getName(), DeclarationFragments::FragmentKind::Identifier);
+
+ QualType IntegerType = EnumDecl->getIntegerType();
+ if (!IntegerType.isNull())
+ Fragments.append(": ", DeclarationFragments::FragmentKind::Text)
+ .append(
+ getFragmentsForType(IntegerType, EnumDecl->getASTContext(), After))
+ .append(std::move(After));
+
+ return Fragments;
+}
+
+DeclarationFragments
+DeclarationFragmentsBuilder::getFragmentsForField(const FieldDecl *Field) {
+ DeclarationFragments After;
+ return getFragmentsForType(Field->getType(), Field->getASTContext(), After)
+ .appendSpace()
+ .append(Field->getName(), DeclarationFragments::FragmentKind::Identifier)
+ .append(std::move(After));
+}
+
+DeclarationFragments
+DeclarationFragmentsBuilder::getFragmentsForStruct(const RecordDecl *Record) {
+ if (const auto *TypedefNameDecl = Record->getTypedefNameForAnonDecl())
+ return getFragmentsForTypedef(TypedefNameDecl);
+
+ DeclarationFragments Fragments;
+ Fragments.append("struct", DeclarationFragments::FragmentKind::Keyword);
+
+ if (!Record->getName().empty())
+ Fragments.appendSpace().append(
+ Record->getName(), DeclarationFragments::FragmentKind::Identifier);
+ return Fragments;
+}
+
+DeclarationFragments
+DeclarationFragmentsBuilder::getFragmentsForMacro(StringRef Name,
+ const MacroDirective *MD) {
+ DeclarationFragments Fragments;
+ Fragments.append("#define", DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace();
+ Fragments.append(Name, DeclarationFragments::FragmentKind::Identifier);
+
+ auto *MI = MD->getMacroInfo();
+
+ if (MI->isFunctionLike()) {
+ Fragments.append("(", DeclarationFragments::FragmentKind::Text);
+ unsigned numParameters = MI->getNumParams();
+ if (MI->isC99Varargs())
+ --numParameters;
+ for (unsigned i = 0; i < numParameters; ++i) {
+ if (i)
+ Fragments.append(", ", DeclarationFragments::FragmentKind::Text);
+ Fragments.append(MI->params()[i]->getName(),
+ DeclarationFragments::FragmentKind::InternalParam);
+ }
+ if (MI->isVariadic()) {
+ if (numParameters && MI->isC99Varargs())
+ Fragments.append(", ", DeclarationFragments::FragmentKind::Text);
+ Fragments.append("...", DeclarationFragments::FragmentKind::Text);
+ }
+ Fragments.append(")", DeclarationFragments::FragmentKind::Text);
+ }
+ return Fragments;
+}
+
+DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForObjCCategory(
+ const ObjCCategoryDecl *Category) {
+ DeclarationFragments Fragments;
+
+ SmallString<128> InterfaceUSR;
+ index::generateUSRForDecl(Category->getClassInterface(), InterfaceUSR);
+
+ Fragments.append("@interface", DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace()
+ .append(Category->getClassInterface()->getName(),
+ DeclarationFragments::FragmentKind::TypeIdentifier, InterfaceUSR)
+ .append(" (", DeclarationFragments::FragmentKind::Text)
+ .append(Category->getName(),
+ DeclarationFragments::FragmentKind::Identifier)
+ .append(")", DeclarationFragments::FragmentKind::Text);
+
+ return Fragments;
+}
+
+DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForObjCInterface(
+ const ObjCInterfaceDecl *Interface) {
+ DeclarationFragments Fragments;
+ // Build the base of the Objective-C interface declaration.
+ Fragments.append("@interface", DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace()
+ .append(Interface->getName(),
+ DeclarationFragments::FragmentKind::Identifier);
+
+ // Build the inheritance part of the declaration.
+ if (const ObjCInterfaceDecl *SuperClass = Interface->getSuperClass()) {
+ SmallString<128> SuperUSR;
+ index::generateUSRForDecl(SuperClass, SuperUSR);
+ Fragments.append(" : ", DeclarationFragments::FragmentKind::Text)
+ .append(SuperClass->getName(),
+ DeclarationFragments::FragmentKind::TypeIdentifier, SuperUSR);
+ }
+
+ return Fragments;
+}
+
+DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForObjCMethod(
+ const ObjCMethodDecl *Method) {
+ DeclarationFragments Fragments, After;
+ // Build the instance/class method indicator.
+ if (Method->isClassMethod())
+ Fragments.append("+ ", DeclarationFragments::FragmentKind::Text);
+ else if (Method->isInstanceMethod())
+ Fragments.append("- ", DeclarationFragments::FragmentKind::Text);
+
+ // Build the return type.
+ Fragments.append("(", DeclarationFragments::FragmentKind::Text)
+ .append(getFragmentsForType(Method->getReturnType(),
+ Method->getASTContext(), After))
+ .append(std::move(After))
+ .append(")", DeclarationFragments::FragmentKind::Text);
+
+ // Build the selector part.
+ Selector Selector = Method->getSelector();
+ if (Selector.getNumArgs() == 0)
+ // For Objective-C methods that don't take arguments, the first (and only)
+ // slot of the selector is the method name.
+ Fragments.appendSpace().append(
+ Selector.getNameForSlot(0),
+ DeclarationFragments::FragmentKind::Identifier);
+
+ // For Objective-C methods that take arguments, build the selector slots.
+ for (unsigned i = 0, end = Method->param_size(); i != end; ++i) {
+ // Objective-C method selector parts are considered as identifiers instead
+ // of "external parameters" as in Swift. This is because Objective-C method
+ // symbols are referenced with the entire selector, instead of just the
+ // method name in Swift.
+ SmallString<32> ParamID(Selector.getNameForSlot(i));
+ ParamID.append(":");
+ Fragments.appendSpace().append(
+ ParamID, DeclarationFragments::FragmentKind::Identifier);
+
+ // Build the internal parameter.
+ const ParmVarDecl *Param = Method->getParamDecl(i);
+ Fragments.append(getFragmentsForParam(Param));
+ }
+
+ return Fragments.append(";", DeclarationFragments::FragmentKind::Text);
+}
+
+DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForObjCProperty(
+ const ObjCPropertyDecl *Property) {
+ DeclarationFragments Fragments, After;
+
+ // Build the Objective-C property keyword.
+ Fragments.append("@property", DeclarationFragments::FragmentKind::Keyword);
+
+ const auto Attributes = Property->getPropertyAttributes();
+ // Build the attributes if there is any associated with the property.
+ if (Attributes != ObjCPropertyAttribute::kind_noattr) {
+ // No leading comma for the first attribute.
+ bool First = true;
+ Fragments.append(" (", DeclarationFragments::FragmentKind::Text);
+ // Helper function to render the attribute.
+ auto RenderAttribute =
+ [&](ObjCPropertyAttribute::Kind Kind, StringRef Spelling,
+ StringRef Arg = "",
+ DeclarationFragments::FragmentKind ArgKind =
+ DeclarationFragments::FragmentKind::Identifier) {
+ // Check if the `Kind` attribute is set for this property.
+ if ((Attributes & Kind) && !Spelling.empty()) {
+ // Add a leading comma if this is not the first attribute rendered.
+ if (!First)
+ Fragments.append(", ", DeclarationFragments::FragmentKind::Text);
+ // Render the spelling of this attribute `Kind` as a keyword.
+ Fragments.append(Spelling,
+ DeclarationFragments::FragmentKind::Keyword);
+ // If this attribute takes in arguments (e.g. `getter=getterName`),
+ // render the arguments.
+ if (!Arg.empty())
+ Fragments.append("=", DeclarationFragments::FragmentKind::Text)
+ .append(Arg, ArgKind);
+ First = false;
+ }
+ };
+
+ // Go through all possible Objective-C property attributes and render set
+ // ones.
+ RenderAttribute(ObjCPropertyAttribute::kind_class, "class");
+ RenderAttribute(ObjCPropertyAttribute::kind_direct, "direct");
+ RenderAttribute(ObjCPropertyAttribute::kind_nonatomic, "nonatomic");
+ RenderAttribute(ObjCPropertyAttribute::kind_atomic, "atomic");
+ RenderAttribute(ObjCPropertyAttribute::kind_assign, "assign");
+ RenderAttribute(ObjCPropertyAttribute::kind_retain, "retain");
+ RenderAttribute(ObjCPropertyAttribute::kind_strong, "strong");
+ RenderAttribute(ObjCPropertyAttribute::kind_copy, "copy");
+ RenderAttribute(ObjCPropertyAttribute::kind_weak, "weak");
+ RenderAttribute(ObjCPropertyAttribute::kind_unsafe_unretained,
+ "unsafe_unretained");
+ RenderAttribute(ObjCPropertyAttribute::kind_readwrite, "readwrite");
+ RenderAttribute(ObjCPropertyAttribute::kind_readonly, "readonly");
+ RenderAttribute(ObjCPropertyAttribute::kind_getter, "getter",
+ Property->getGetterName().getAsString());
+ RenderAttribute(ObjCPropertyAttribute::kind_setter, "setter",
+ Property->getSetterName().getAsString());
+
+ // Render nullability attributes.
+ if (Attributes & ObjCPropertyAttribute::kind_nullability) {
+ QualType Type = Property->getType();
+ if (const auto Nullability =
+ AttributedType::stripOuterNullability(Type)) {
+ if (!First)
+ Fragments.append(", ", DeclarationFragments::FragmentKind::Text);
+ if (*Nullability == NullabilityKind::Unspecified &&
+ (Attributes & ObjCPropertyAttribute::kind_null_resettable))
+ Fragments.append("null_resettable",
+ DeclarationFragments::FragmentKind::Keyword);
+ else
+ Fragments.append(
+ getNullabilitySpelling(*Nullability, /*isContextSensitive=*/true),
+ DeclarationFragments::FragmentKind::Keyword);
+ First = false;
+ }
+ }
+
+ Fragments.append(")", DeclarationFragments::FragmentKind::Text);
+ }
+
+ // Build the property type and name, and return the completed fragments.
+ return Fragments.appendSpace()
+ .append(getFragmentsForType(Property->getType(),
+ Property->getASTContext(), After))
+ .append(Property->getName(),
+ DeclarationFragments::FragmentKind::Identifier)
+ .append(std::move(After));
+}
+
+DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForObjCProtocol(
+ const ObjCProtocolDecl *Protocol) {
+ DeclarationFragments Fragments;
+ // Build basic protocol declaration.
+ Fragments.append("@protocol", DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace()
+ .append(Protocol->getName(),
+ DeclarationFragments::FragmentKind::Identifier);
+
+ // If this protocol conforms to other protocols, build the conformance list.
+ if (!Protocol->protocols().empty()) {
+ Fragments.append(" <", DeclarationFragments::FragmentKind::Text);
+ for (ObjCProtocolDecl::protocol_iterator It = Protocol->protocol_begin();
+ It != Protocol->protocol_end(); It++) {
+ // Add a leading comma if this is not the first protocol rendered.
+ if (It != Protocol->protocol_begin())
+ Fragments.append(", ", DeclarationFragments::FragmentKind::Text);
+
+ SmallString<128> USR;
+ index::generateUSRForDecl(*It, USR);
+ Fragments.append((*It)->getName(),
+ DeclarationFragments::FragmentKind::TypeIdentifier, USR);
+ }
+ Fragments.append(">", DeclarationFragments::FragmentKind::Text);
+ }
+
+ return Fragments;
+}
+
+DeclarationFragments DeclarationFragmentsBuilder::getFragmentsForTypedef(
+ const TypedefNameDecl *Decl) {
+ DeclarationFragments Fragments, After;
+ Fragments.append("typedef", DeclarationFragments::FragmentKind::Keyword)
+ .appendSpace()
+ .append(getFragmentsForType(Decl->getUnderlyingType(),
+ Decl->getASTContext(), After))
+ .append(std::move(After))
+ .appendSpace()
+ .append(Decl->getName(), DeclarationFragments::FragmentKind::Identifier);
+
+ return Fragments;
+}
+
+template <typename FunctionT>
+FunctionSignature
+DeclarationFragmentsBuilder::getFunctionSignature(const FunctionT *Function) {
+ FunctionSignature Signature;
+
+ DeclarationFragments ReturnType, After;
+ ReturnType
+ .append(getFragmentsForType(Function->getReturnType(),
+ Function->getASTContext(), After))
+ .append(std::move(After));
+ Signature.setReturnType(ReturnType);
+
+ for (const auto *Param : Function->parameters())
+ Signature.addParameter(Param->getName(), getFragmentsForParam(Param));
+
+ return Signature;
+}
+
+// Instantiate template for FunctionDecl.
+template FunctionSignature
+DeclarationFragmentsBuilder::getFunctionSignature(const FunctionDecl *);
+
+// Instantiate template for ObjCMethodDecl.
+template FunctionSignature
+DeclarationFragmentsBuilder::getFunctionSignature(const ObjCMethodDecl *);
+
+// Subheading of a symbol defaults to its name.
+DeclarationFragments
+DeclarationFragmentsBuilder::getSubHeading(const NamedDecl *Decl) {
+ DeclarationFragments Fragments;
+ if (!Decl->getName().empty())
+ Fragments.append(Decl->getName(),
+ DeclarationFragments::FragmentKind::Identifier);
+ return Fragments;
+}
+
+// Subheading of an Objective-C method is a `+` or `-` sign indicating whether
+// it's a class method or an instance method, followed by the selector name.
+DeclarationFragments
+DeclarationFragmentsBuilder::getSubHeading(const ObjCMethodDecl *Method) {
+ DeclarationFragments Fragments;
+ if (Method->isClassMethod())
+ Fragments.append("+ ", DeclarationFragments::FragmentKind::Text);
+ else if (Method->isInstanceMethod())
+ Fragments.append("- ", DeclarationFragments::FragmentKind::Text);
+
+ return Fragments.append(Method->getNameAsString(),
+ DeclarationFragments::FragmentKind::Identifier);
+}
+
+// Subheading of a symbol defaults to its name.
+DeclarationFragments
+DeclarationFragmentsBuilder::getSubHeadingForMacro(StringRef Name) {
+ DeclarationFragments Fragments;
+ Fragments.append(Name, DeclarationFragments::FragmentKind::Identifier);
+ return Fragments;
+}
diff --git a/contrib/llvm-project/clang/lib/ExtractAPI/ExtractAPIConsumer.cpp b/contrib/llvm-project/clang/lib/ExtractAPI/ExtractAPIConsumer.cpp
new file mode 100644
index 000000000000..bffa66c2d944
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/ExtractAPI/ExtractAPIConsumer.cpp
@@ -0,0 +1,974 @@
+//===- ExtractAPI/ExtractAPIConsumer.cpp ------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements the ExtractAPIAction, and ASTVisitor/Consumer to
+/// collect API information.
+///
+//===----------------------------------------------------------------------===//
+
+#include "TypedefUnderlyingTypeResolver.h"
+#include "clang/AST/ASTConsumer.h"
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/ParentMapContext.h"
+#include "clang/AST/RawCommentList.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/ExtractAPI/API.h"
+#include "clang/ExtractAPI/AvailabilityInfo.h"
+#include "clang/ExtractAPI/DeclarationFragments.h"
+#include "clang/ExtractAPI/FrontendActions.h"
+#include "clang/ExtractAPI/Serialization/SymbolGraphSerializer.h"
+#include "clang/Frontend/ASTConsumers.h"
+#include "clang/Frontend/CompilerInstance.h"
+#include "clang/Frontend/FrontendOptions.h"
+#include "clang/Lex/MacroInfo.h"
+#include "clang/Lex/PPCallbacks.h"
+#include "clang/Lex/Preprocessor.h"
+#include "clang/Lex/PreprocessorOptions.h"
+#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallVector.h"
+#include "llvm/Support/FileSystem.h"
+#include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/Regex.h"
+#include "llvm/Support/raw_ostream.h"
+#include <memory>
+#include <utility>
+
+using namespace clang;
+using namespace extractapi;
+
+namespace {
+
+StringRef getTypedefName(const TagDecl *Decl) {
+ if (const auto *TypedefDecl = Decl->getTypedefNameForAnonDecl())
+ return TypedefDecl->getName();
+
+ return {};
+}
+
+Optional<std::string> getRelativeIncludeName(const CompilerInstance &CI,
+ StringRef File,
+ bool *IsQuoted = nullptr) {
+ assert(CI.hasFileManager() &&
+ "CompilerInstance does not have a FileNamager!");
+
+ using namespace llvm::sys;
+ // Matches framework include patterns
+ const llvm::Regex Rule("/(.+)\\.framework/(.+)?Headers/(.+)");
+
+ const auto &FS = CI.getVirtualFileSystem();
+
+ SmallString<128> FilePath(File.begin(), File.end());
+ FS.makeAbsolute(FilePath);
+ path::remove_dots(FilePath, true);
+ FilePath = path::convert_to_slash(FilePath);
+ File = FilePath;
+
+ // Checks whether `Dir` is a strict path prefix of `File`. If so returns
+ // the prefix length. Otherwise return 0.
+ auto CheckDir = [&](llvm::StringRef Dir) -> unsigned {
+ llvm::SmallString<32> DirPath(Dir.begin(), Dir.end());
+ FS.makeAbsolute(DirPath);
+ path::remove_dots(DirPath, true);
+ Dir = DirPath;
+ for (auto NI = path::begin(File), NE = path::end(File),
+ DI = path::begin(Dir), DE = path::end(Dir);
+ /*termination condition in loop*/; ++NI, ++DI) {
+ // '.' components in File are ignored.
+ while (NI != NE && *NI == ".")
+ ++NI;
+ if (NI == NE)
+ break;
+
+ // '.' components in Dir are ignored.
+ while (DI != DE && *DI == ".")
+ ++DI;
+
+ // Dir is a prefix of File, up to '.' components and choice of path
+ // separators.
+ if (DI == DE)
+ return NI - path::begin(File);
+
+ // Consider all path separators equal.
+ if (NI->size() == 1 && DI->size() == 1 &&
+ path::is_separator(NI->front()) && path::is_separator(DI->front()))
+ continue;
+
+ // Special case Apple .sdk folders since the search path is typically a
+ // symlink like `iPhoneSimulator14.5.sdk` while the file is instead
+ // located in `iPhoneSimulator.sdk` (the real folder).
+ if (NI->endswith(".sdk") && DI->endswith(".sdk")) {
+ StringRef NBasename = path::stem(*NI);
+ StringRef DBasename = path::stem(*DI);
+ if (DBasename.startswith(NBasename))
+ continue;
+ }
+
+ if (*NI != *DI)
+ break;
+ }
+ return 0;
+ };
+
+ unsigned PrefixLength = 0;
+
+ // Go through the search paths and find the first one that is a prefix of
+ // the header.
+ for (const auto &Entry : CI.getHeaderSearchOpts().UserEntries) {
+ // Note whether the match is found in a quoted entry.
+ if (IsQuoted)
+ *IsQuoted = Entry.Group == frontend::Quoted;
+
+ if (auto EntryFile = CI.getFileManager().getOptionalFileRef(Entry.Path)) {
+ if (auto HMap = HeaderMap::Create(*EntryFile, CI.getFileManager())) {
+ // If this is a headermap entry, try to reverse lookup the full path
+ // for a spelled name before mapping.
+ StringRef SpelledFilename = HMap->reverseLookupFilename(File);
+ if (!SpelledFilename.empty())
+ return SpelledFilename.str();
+
+ // No matching mapping in this headermap, try next search entry.
+ continue;
+ }
+ }
+
+ // Entry is a directory search entry, try to check if it's a prefix of File.
+ PrefixLength = CheckDir(Entry.Path);
+ if (PrefixLength > 0) {
+ // The header is found in a framework path, construct the framework-style
+ // include name `<Framework/Header.h>`
+ if (Entry.IsFramework) {
+ SmallVector<StringRef, 4> Matches;
+ Rule.match(File, &Matches);
+ // Returned matches are always in stable order.
+ if (Matches.size() != 4)
+ return None;
+
+ return path::convert_to_slash(
+ (Matches[1].drop_front(Matches[1].rfind('/') + 1) + "/" +
+ Matches[3])
+ .str());
+ }
+
+ // The header is found in a normal search path, strip the search path
+ // prefix to get an include name.
+ return path::convert_to_slash(File.drop_front(PrefixLength));
+ }
+ }
+
+ // Couldn't determine a include name, use full path instead.
+ return None;
+}
+
+struct LocationFileChecker {
+ bool isLocationInKnownFile(SourceLocation Loc) {
+ // If the loc refers to a macro expansion we need to first get the file
+ // location of the expansion.
+ auto &SM = CI.getSourceManager();
+ auto FileLoc = SM.getFileLoc(Loc);
+ FileID FID = SM.getFileID(FileLoc);
+ if (FID.isInvalid())
+ return false;
+
+ const auto *File = SM.getFileEntryForID(FID);
+ if (!File)
+ return false;
+
+ if (KnownFileEntries.count(File))
+ return true;
+
+ if (ExternalFileEntries.count(File))
+ return false;
+
+ StringRef FileName = File->tryGetRealPathName().empty()
+ ? File->getName()
+ : File->tryGetRealPathName();
+
+ // Try to reduce the include name the same way we tried to include it.
+ bool IsQuoted = false;
+ if (auto IncludeName = getRelativeIncludeName(CI, FileName, &IsQuoted))
+ if (llvm::any_of(KnownFiles,
+ [&IsQuoted, &IncludeName](const auto &KnownFile) {
+ return KnownFile.first.equals(*IncludeName) &&
+ KnownFile.second == IsQuoted;
+ })) {
+ KnownFileEntries.insert(File);
+ return true;
+ }
+
+ // Record that the file was not found to avoid future reverse lookup for
+ // the same file.
+ ExternalFileEntries.insert(File);
+ return false;
+ }
+
+ LocationFileChecker(const CompilerInstance &CI,
+ SmallVector<std::pair<SmallString<32>, bool>> &KnownFiles)
+ : CI(CI), KnownFiles(KnownFiles), ExternalFileEntries() {
+ for (const auto &KnownFile : KnownFiles)
+ if (auto FileEntry = CI.getFileManager().getFile(KnownFile.first))
+ KnownFileEntries.insert(*FileEntry);
+ }
+
+private:
+ const CompilerInstance &CI;
+ SmallVector<std::pair<SmallString<32>, bool>> &KnownFiles;
+ llvm::DenseSet<const FileEntry *> KnownFileEntries;
+ llvm::DenseSet<const FileEntry *> ExternalFileEntries;
+};
+
+/// The RecursiveASTVisitor to traverse symbol declarations and collect API
+/// information.
+class ExtractAPIVisitor : public RecursiveASTVisitor<ExtractAPIVisitor> {
+public:
+ ExtractAPIVisitor(ASTContext &Context, LocationFileChecker &LCF, APISet &API)
+ : Context(Context), API(API), LCF(LCF) {}
+
+ const APISet &getAPI() const { return API; }
+
+ bool VisitVarDecl(const VarDecl *Decl) {
+ // Skip function parameters.
+ if (isa<ParmVarDecl>(Decl))
+ return true;
+
+ // Skip non-global variables in records (struct/union/class).
+ if (Decl->getDeclContext()->isRecord())
+ return true;
+
+ // Skip local variables inside function or method.
+ if (!Decl->isDefinedOutsideFunctionOrMethod())
+ return true;
+
+ // If this is a template but not specialization or instantiation, skip.
+ if (Decl->getASTContext().getTemplateOrSpecializationInfo(Decl) &&
+ Decl->getTemplateSpecializationKind() == TSK_Undeclared)
+ return true;
+
+ if (!LCF.isLocationInKnownFile(Decl->getLocation()))
+ return true;
+
+ // Collect symbol information.
+ StringRef Name = Decl->getName();
+ StringRef USR = API.recordUSR(Decl);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Decl->getLocation());
+ AvailabilityInfo Availability = getAvailability(Decl);
+ LinkageInfo Linkage = Decl->getLinkageAndVisibility();
+ DocComment Comment;
+ if (auto *RawComment = Context.getRawCommentForDeclNoCache(Decl))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ // Build declaration fragments and sub-heading for the variable.
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForVar(Decl);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Decl);
+
+ // Add the global variable record to the API set.
+ API.addGlobalVar(Name, USR, Loc, Availability, Linkage, Comment,
+ Declaration, SubHeading);
+ return true;
+ }
+
+ bool VisitFunctionDecl(const FunctionDecl *Decl) {
+ if (const auto *Method = dyn_cast<CXXMethodDecl>(Decl)) {
+ // Skip member function in class templates.
+ if (Method->getParent()->getDescribedClassTemplate() != nullptr)
+ return true;
+
+ // Skip methods in records.
+ for (auto P : Context.getParents(*Method)) {
+ if (P.get<CXXRecordDecl>())
+ return true;
+ }
+
+ // Skip ConstructorDecl and DestructorDecl.
+ if (isa<CXXConstructorDecl>(Method) || isa<CXXDestructorDecl>(Method))
+ return true;
+ }
+
+ // Skip templated functions.
+ switch (Decl->getTemplatedKind()) {
+ case FunctionDecl::TK_NonTemplate:
+ break;
+ case FunctionDecl::TK_MemberSpecialization:
+ case FunctionDecl::TK_FunctionTemplateSpecialization:
+ if (auto *TemplateInfo = Decl->getTemplateSpecializationInfo()) {
+ if (!TemplateInfo->isExplicitInstantiationOrSpecialization())
+ return true;
+ }
+ break;
+ case FunctionDecl::TK_FunctionTemplate:
+ case FunctionDecl::TK_DependentFunctionTemplateSpecialization:
+ return true;
+ }
+
+ if (!LCF.isLocationInKnownFile(Decl->getLocation()))
+ return true;
+
+ // Collect symbol information.
+ StringRef Name = Decl->getName();
+ StringRef USR = API.recordUSR(Decl);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Decl->getLocation());
+ AvailabilityInfo Availability = getAvailability(Decl);
+ LinkageInfo Linkage = Decl->getLinkageAndVisibility();
+ DocComment Comment;
+ if (auto *RawComment = Context.getRawCommentForDeclNoCache(Decl))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ // Build declaration fragments, sub-heading, and signature of the function.
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForFunction(Decl);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Decl);
+ FunctionSignature Signature =
+ DeclarationFragmentsBuilder::getFunctionSignature(Decl);
+
+ // Add the function record to the API set.
+ API.addGlobalFunction(Name, USR, Loc, Availability, Linkage, Comment,
+ Declaration, SubHeading, Signature);
+ return true;
+ }
+
+ bool VisitEnumDecl(const EnumDecl *Decl) {
+ if (!Decl->isComplete())
+ return true;
+
+ // Skip forward declaration.
+ if (!Decl->isThisDeclarationADefinition())
+ return true;
+
+ if (!LCF.isLocationInKnownFile(Decl->getLocation()))
+ return true;
+
+ // Collect symbol information.
+ std::string NameString = Decl->getQualifiedNameAsString();
+ StringRef Name(NameString);
+ if (Name.empty())
+ Name = getTypedefName(Decl);
+
+ StringRef USR = API.recordUSR(Decl);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Decl->getLocation());
+ AvailabilityInfo Availability = getAvailability(Decl);
+ DocComment Comment;
+ if (auto *RawComment = Context.getRawCommentForDeclNoCache(Decl))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ // Build declaration fragments and sub-heading for the enum.
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForEnum(Decl);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Decl);
+
+ EnumRecord *EnumRecord =
+ API.addEnum(API.copyString(Name), USR, Loc, Availability, Comment,
+ Declaration, SubHeading);
+
+ // Now collect information about the enumerators in this enum.
+ recordEnumConstants(EnumRecord, Decl->enumerators());
+
+ return true;
+ }
+
+ bool VisitRecordDecl(const RecordDecl *Decl) {
+ if (!Decl->isCompleteDefinition())
+ return true;
+
+ // Skip C++ structs/classes/unions
+ // TODO: support C++ records
+ if (isa<CXXRecordDecl>(Decl))
+ return true;
+
+ if (!LCF.isLocationInKnownFile(Decl->getLocation()))
+ return true;
+
+ // Collect symbol information.
+ StringRef Name = Decl->getName();
+ if (Name.empty())
+ Name = getTypedefName(Decl);
+ StringRef USR = API.recordUSR(Decl);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Decl->getLocation());
+ AvailabilityInfo Availability = getAvailability(Decl);
+ DocComment Comment;
+ if (auto *RawComment = Context.getRawCommentForDeclNoCache(Decl))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ // Build declaration fragments and sub-heading for the struct.
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForStruct(Decl);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Decl);
+
+ StructRecord *StructRecord = API.addStruct(
+ Name, USR, Loc, Availability, Comment, Declaration, SubHeading);
+
+ // Now collect information about the fields in this struct.
+ recordStructFields(StructRecord, Decl->fields());
+
+ return true;
+ }
+
+ bool VisitObjCInterfaceDecl(const ObjCInterfaceDecl *Decl) {
+ // Skip forward declaration for classes (@class)
+ if (!Decl->isThisDeclarationADefinition())
+ return true;
+
+ if (!LCF.isLocationInKnownFile(Decl->getLocation()))
+ return true;
+
+ // Collect symbol information.
+ StringRef Name = Decl->getName();
+ StringRef USR = API.recordUSR(Decl);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Decl->getLocation());
+ AvailabilityInfo Availability = getAvailability(Decl);
+ LinkageInfo Linkage = Decl->getLinkageAndVisibility();
+ DocComment Comment;
+ if (auto *RawComment = Context.getRawCommentForDeclNoCache(Decl))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ // Build declaration fragments and sub-heading for the interface.
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForObjCInterface(Decl);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Decl);
+
+ // Collect super class information.
+ SymbolReference SuperClass;
+ if (const auto *SuperClassDecl = Decl->getSuperClass()) {
+ SuperClass.Name = SuperClassDecl->getObjCRuntimeNameAsString();
+ SuperClass.USR = API.recordUSR(SuperClassDecl);
+ }
+
+ ObjCInterfaceRecord *ObjCInterfaceRecord =
+ API.addObjCInterface(Name, USR, Loc, Availability, Linkage, Comment,
+ Declaration, SubHeading, SuperClass);
+
+ // Record all methods (selectors). This doesn't include automatically
+ // synthesized property methods.
+ recordObjCMethods(ObjCInterfaceRecord, Decl->methods());
+ recordObjCProperties(ObjCInterfaceRecord, Decl->properties());
+ recordObjCInstanceVariables(ObjCInterfaceRecord, Decl->ivars());
+ recordObjCProtocols(ObjCInterfaceRecord, Decl->protocols());
+
+ return true;
+ }
+
+ bool VisitObjCProtocolDecl(const ObjCProtocolDecl *Decl) {
+ // Skip forward declaration for protocols (@protocol).
+ if (!Decl->isThisDeclarationADefinition())
+ return true;
+
+ if (!LCF.isLocationInKnownFile(Decl->getLocation()))
+ return true;
+
+ // Collect symbol information.
+ StringRef Name = Decl->getName();
+ StringRef USR = API.recordUSR(Decl);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Decl->getLocation());
+ AvailabilityInfo Availability = getAvailability(Decl);
+ DocComment Comment;
+ if (auto *RawComment = Context.getRawCommentForDeclNoCache(Decl))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ // Build declaration fragments and sub-heading for the protocol.
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForObjCProtocol(Decl);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Decl);
+
+ ObjCProtocolRecord *ObjCProtocolRecord = API.addObjCProtocol(
+ Name, USR, Loc, Availability, Comment, Declaration, SubHeading);
+
+ recordObjCMethods(ObjCProtocolRecord, Decl->methods());
+ recordObjCProperties(ObjCProtocolRecord, Decl->properties());
+ recordObjCProtocols(ObjCProtocolRecord, Decl->protocols());
+
+ return true;
+ }
+
+ bool VisitTypedefNameDecl(const TypedefNameDecl *Decl) {
+ // Skip ObjC Type Parameter for now.
+ if (isa<ObjCTypeParamDecl>(Decl))
+ return true;
+
+ if (!Decl->isDefinedOutsideFunctionOrMethod())
+ return true;
+
+ if (!LCF.isLocationInKnownFile(Decl->getLocation()))
+ return true;
+
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Decl->getLocation());
+ StringRef Name = Decl->getName();
+ AvailabilityInfo Availability = getAvailability(Decl);
+ StringRef USR = API.recordUSR(Decl);
+ DocComment Comment;
+ if (auto *RawComment = Context.getRawCommentForDeclNoCache(Decl))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ QualType Type = Decl->getUnderlyingType();
+ SymbolReference SymRef =
+ TypedefUnderlyingTypeResolver(Context).getSymbolReferenceForType(Type,
+ API);
+
+ API.addTypedef(Name, USR, Loc, Availability, Comment,
+ DeclarationFragmentsBuilder::getFragmentsForTypedef(Decl),
+ DeclarationFragmentsBuilder::getSubHeading(Decl), SymRef);
+
+ return true;
+ }
+
+ bool VisitObjCCategoryDecl(const ObjCCategoryDecl *Decl) {
+ // Collect symbol information.
+ StringRef Name = Decl->getName();
+ StringRef USR = API.recordUSR(Decl);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Decl->getLocation());
+ AvailabilityInfo Availability = getAvailability(Decl);
+ DocComment Comment;
+ if (auto *RawComment = Context.getRawCommentForDeclNoCache(Decl))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+ // Build declaration fragments and sub-heading for the category.
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForObjCCategory(Decl);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Decl);
+
+ const ObjCInterfaceDecl *InterfaceDecl = Decl->getClassInterface();
+ SymbolReference Interface(InterfaceDecl->getName(),
+ API.recordUSR(InterfaceDecl));
+
+ ObjCCategoryRecord *ObjCCategoryRecord =
+ API.addObjCCategory(Name, USR, Loc, Availability, Comment, Declaration,
+ SubHeading, Interface);
+
+ recordObjCMethods(ObjCCategoryRecord, Decl->methods());
+ recordObjCProperties(ObjCCategoryRecord, Decl->properties());
+ recordObjCInstanceVariables(ObjCCategoryRecord, Decl->ivars());
+ recordObjCProtocols(ObjCCategoryRecord, Decl->protocols());
+
+ return true;
+ }
+
+private:
+ /// Get availability information of the declaration \p D.
+ AvailabilityInfo getAvailability(const Decl *D) const {
+ StringRef PlatformName = Context.getTargetInfo().getPlatformName();
+
+ AvailabilityInfo Availability;
+ // Collect availability attributes from all redeclarations.
+ for (const auto *RD : D->redecls()) {
+ for (const auto *A : RD->specific_attrs<AvailabilityAttr>()) {
+ if (A->getPlatform()->getName() != PlatformName)
+ continue;
+ Availability = AvailabilityInfo(A->getIntroduced(), A->getDeprecated(),
+ A->getObsoleted(), A->getUnavailable(),
+ /* UnconditionallyDeprecated */ false,
+ /* UnconditionallyUnavailable */ false);
+ break;
+ }
+
+ if (const auto *A = RD->getAttr<UnavailableAttr>())
+ if (!A->isImplicit()) {
+ Availability.Unavailable = true;
+ Availability.UnconditionallyUnavailable = true;
+ }
+
+ if (const auto *A = RD->getAttr<DeprecatedAttr>())
+ if (!A->isImplicit())
+ Availability.UnconditionallyDeprecated = true;
+ }
+
+ return Availability;
+ }
+
+ /// Collect API information for the enum constants and associate with the
+ /// parent enum.
+ void recordEnumConstants(EnumRecord *EnumRecord,
+ const EnumDecl::enumerator_range Constants) {
+ for (const auto *Constant : Constants) {
+ // Collect symbol information.
+ StringRef Name = Constant->getName();
+ StringRef USR = API.recordUSR(Constant);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Constant->getLocation());
+ AvailabilityInfo Availability = getAvailability(Constant);
+ DocComment Comment;
+ if (auto *RawComment = Context.getRawCommentForDeclNoCache(Constant))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ // Build declaration fragments and sub-heading for the enum constant.
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForEnumConstant(Constant);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Constant);
+
+ API.addEnumConstant(EnumRecord, Name, USR, Loc, Availability, Comment,
+ Declaration, SubHeading);
+ }
+ }
+
+ /// Collect API information for the struct fields and associate with the
+ /// parent struct.
+ void recordStructFields(StructRecord *StructRecord,
+ const RecordDecl::field_range Fields) {
+ for (const auto *Field : Fields) {
+ // Collect symbol information.
+ StringRef Name = Field->getName();
+ StringRef USR = API.recordUSR(Field);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Field->getLocation());
+ AvailabilityInfo Availability = getAvailability(Field);
+ DocComment Comment;
+ if (auto *RawComment = Context.getRawCommentForDeclNoCache(Field))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ // Build declaration fragments and sub-heading for the struct field.
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForField(Field);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Field);
+
+ API.addStructField(StructRecord, Name, USR, Loc, Availability, Comment,
+ Declaration, SubHeading);
+ }
+ }
+
+ /// Collect API information for the Objective-C methods and associate with the
+ /// parent container.
+ void recordObjCMethods(ObjCContainerRecord *Container,
+ const ObjCContainerDecl::method_range Methods) {
+ for (const auto *Method : Methods) {
+ // Don't record selectors for properties.
+ if (Method->isPropertyAccessor())
+ continue;
+
+ StringRef Name = API.copyString(Method->getSelector().getAsString());
+ StringRef USR = API.recordUSR(Method);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Method->getLocation());
+ AvailabilityInfo Availability = getAvailability(Method);
+ DocComment Comment;
+ if (auto *RawComment = Context.getRawCommentForDeclNoCache(Method))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ // Build declaration fragments, sub-heading, and signature for the method.
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForObjCMethod(Method);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Method);
+ FunctionSignature Signature =
+ DeclarationFragmentsBuilder::getFunctionSignature(Method);
+
+ API.addObjCMethod(Container, Name, USR, Loc, Availability, Comment,
+ Declaration, SubHeading, Signature,
+ Method->isInstanceMethod());
+ }
+ }
+
+ void recordObjCProperties(ObjCContainerRecord *Container,
+ const ObjCContainerDecl::prop_range Properties) {
+ for (const auto *Property : Properties) {
+ StringRef Name = Property->getName();
+ StringRef USR = API.recordUSR(Property);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Property->getLocation());
+ AvailabilityInfo Availability = getAvailability(Property);
+ DocComment Comment;
+ if (auto *RawComment = Context.getRawCommentForDeclNoCache(Property))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ // Build declaration fragments and sub-heading for the property.
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForObjCProperty(Property);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Property);
+
+ StringRef GetterName =
+ API.copyString(Property->getGetterName().getAsString());
+ StringRef SetterName =
+ API.copyString(Property->getSetterName().getAsString());
+
+ // Get the attributes for property.
+ unsigned Attributes = ObjCPropertyRecord::NoAttr;
+ if (Property->getPropertyAttributes() &
+ ObjCPropertyAttribute::kind_readonly)
+ Attributes |= ObjCPropertyRecord::ReadOnly;
+ if (Property->getPropertyAttributes() & ObjCPropertyAttribute::kind_class)
+ Attributes |= ObjCPropertyRecord::Class;
+
+ API.addObjCProperty(
+ Container, Name, USR, Loc, Availability, Comment, Declaration,
+ SubHeading,
+ static_cast<ObjCPropertyRecord::AttributeKind>(Attributes),
+ GetterName, SetterName, Property->isOptional());
+ }
+ }
+
+ void recordObjCInstanceVariables(
+ ObjCContainerRecord *Container,
+ const llvm::iterator_range<
+ DeclContext::specific_decl_iterator<ObjCIvarDecl>>
+ Ivars) {
+ for (const auto *Ivar : Ivars) {
+ StringRef Name = Ivar->getName();
+ StringRef USR = API.recordUSR(Ivar);
+ PresumedLoc Loc =
+ Context.getSourceManager().getPresumedLoc(Ivar->getLocation());
+ AvailabilityInfo Availability = getAvailability(Ivar);
+ DocComment Comment;
+ if (auto *RawComment = Context.getRawCommentForDeclNoCache(Ivar))
+ Comment = RawComment->getFormattedLines(Context.getSourceManager(),
+ Context.getDiagnostics());
+
+ // Build declaration fragments and sub-heading for the instance variable.
+ DeclarationFragments Declaration =
+ DeclarationFragmentsBuilder::getFragmentsForField(Ivar);
+ DeclarationFragments SubHeading =
+ DeclarationFragmentsBuilder::getSubHeading(Ivar);
+
+ ObjCInstanceVariableRecord::AccessControl Access =
+ Ivar->getCanonicalAccessControl();
+
+ API.addObjCInstanceVariable(Container, Name, USR, Loc, Availability,
+ Comment, Declaration, SubHeading, Access);
+ }
+ }
+
+ void recordObjCProtocols(ObjCContainerRecord *Container,
+ ObjCInterfaceDecl::protocol_range Protocols) {
+ for (const auto *Protocol : Protocols)
+ Container->Protocols.emplace_back(Protocol->getName(),
+ API.recordUSR(Protocol));
+ }
+
+ ASTContext &Context;
+ APISet &API;
+ LocationFileChecker &LCF;
+};
+
+class ExtractAPIConsumer : public ASTConsumer {
+public:
+ ExtractAPIConsumer(ASTContext &Context,
+ std::unique_ptr<LocationFileChecker> LCF, APISet &API)
+ : Visitor(Context, *LCF, API), LCF(std::move(LCF)) {}
+
+ void HandleTranslationUnit(ASTContext &Context) override {
+ // Use ExtractAPIVisitor to traverse symbol declarations in the context.
+ Visitor.TraverseDecl(Context.getTranslationUnitDecl());
+ }
+
+private:
+ ExtractAPIVisitor Visitor;
+ std::unique_ptr<LocationFileChecker> LCF;
+};
+
+class MacroCallback : public PPCallbacks {
+public:
+ MacroCallback(const SourceManager &SM, LocationFileChecker &LCF, APISet &API,
+ Preprocessor &PP)
+ : SM(SM), LCF(LCF), API(API), PP(PP) {}
+
+ void MacroDefined(const Token &MacroNameToken,
+ const MacroDirective *MD) override {
+ auto *MacroInfo = MD->getMacroInfo();
+
+ if (MacroInfo->isBuiltinMacro())
+ return;
+
+ auto SourceLoc = MacroNameToken.getLocation();
+ if (SM.isWrittenInBuiltinFile(SourceLoc) ||
+ SM.isWrittenInCommandLineFile(SourceLoc))
+ return;
+
+ PendingMacros.emplace_back(MacroNameToken, MD);
+ }
+
+ // If a macro gets undefined at some point during preprocessing of the inputs
+ // it means that it isn't an exposed API and we should therefore not add a
+ // macro definition for it.
+ void MacroUndefined(const Token &MacroNameToken, const MacroDefinition &MD,
+ const MacroDirective *Undef) override {
+ // If this macro wasn't previously defined we don't need to do anything
+ // here.
+ if (!Undef)
+ return;
+
+ llvm::erase_if(PendingMacros, [&MD, this](const PendingMacro &PM) {
+ return MD.getMacroInfo()->isIdenticalTo(*PM.MD->getMacroInfo(), PP,
+ /*Syntactically*/ false);
+ });
+ }
+
+ void EndOfMainFile() override {
+ for (auto &PM : PendingMacros) {
+ // `isUsedForHeaderGuard` is only set when the preprocessor leaves the
+ // file so check for it here.
+ if (PM.MD->getMacroInfo()->isUsedForHeaderGuard())
+ continue;
+
+ if (!LCF.isLocationInKnownFile(PM.MacroNameToken.getLocation()))
+ continue;
+
+ StringRef Name = PM.MacroNameToken.getIdentifierInfo()->getName();
+ PresumedLoc Loc = SM.getPresumedLoc(PM.MacroNameToken.getLocation());
+ StringRef USR =
+ API.recordUSRForMacro(Name, PM.MacroNameToken.getLocation(), SM);
+
+ API.addMacroDefinition(
+ Name, USR, Loc,
+ DeclarationFragmentsBuilder::getFragmentsForMacro(Name, PM.MD),
+ DeclarationFragmentsBuilder::getSubHeadingForMacro(Name));
+ }
+
+ PendingMacros.clear();
+ }
+
+private:
+ struct PendingMacro {
+ Token MacroNameToken;
+ const MacroDirective *MD;
+
+ PendingMacro(const Token &MacroNameToken, const MacroDirective *MD)
+ : MacroNameToken(MacroNameToken), MD(MD) {}
+ };
+
+ const SourceManager &SM;
+ LocationFileChecker &LCF;
+ APISet &API;
+ Preprocessor &PP;
+ llvm::SmallVector<PendingMacro> PendingMacros;
+};
+
+} // namespace
+
+std::unique_ptr<ASTConsumer>
+ExtractAPIAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
+ OS = CreateOutputFile(CI, InFile);
+ if (!OS)
+ return nullptr;
+
+ ProductName = CI.getFrontendOpts().ProductName;
+
+ // Now that we have enough information about the language options and the
+ // target triple, let's create the APISet before anyone uses it.
+ API = std::make_unique<APISet>(
+ CI.getTarget().getTriple(),
+ CI.getFrontendOpts().Inputs.back().getKind().getLanguage());
+
+ auto LCF = std::make_unique<LocationFileChecker>(CI, KnownInputFiles);
+
+ CI.getPreprocessor().addPPCallbacks(std::make_unique<MacroCallback>(
+ CI.getSourceManager(), *LCF, *API, CI.getPreprocessor()));
+
+ return std::make_unique<ExtractAPIConsumer>(CI.getASTContext(),
+ std::move(LCF), *API);
+}
+
+bool ExtractAPIAction::PrepareToExecuteAction(CompilerInstance &CI) {
+ auto &Inputs = CI.getFrontendOpts().Inputs;
+ if (Inputs.empty())
+ return true;
+
+ if (!CI.hasFileManager())
+ if (!CI.createFileManager())
+ return false;
+
+ auto Kind = Inputs[0].getKind();
+
+ // Convert the header file inputs into a single input buffer.
+ SmallString<256> HeaderContents;
+ bool IsQuoted = false;
+ for (const FrontendInputFile &FIF : Inputs) {
+ if (Kind.isObjectiveC())
+ HeaderContents += "#import";
+ else
+ HeaderContents += "#include";
+
+ StringRef FilePath = FIF.getFile();
+ if (auto RelativeName = getRelativeIncludeName(CI, FilePath, &IsQuoted)) {
+ if (IsQuoted)
+ HeaderContents += " \"";
+ else
+ HeaderContents += " <";
+
+ HeaderContents += *RelativeName;
+
+ if (IsQuoted)
+ HeaderContents += "\"\n";
+ else
+ HeaderContents += ">\n";
+ KnownInputFiles.emplace_back(static_cast<SmallString<32>>(*RelativeName),
+ IsQuoted);
+ } else {
+ HeaderContents += " \"";
+ HeaderContents += FilePath;
+ HeaderContents += "\"\n";
+ KnownInputFiles.emplace_back(FilePath, true);
+ }
+ }
+
+ if (CI.getHeaderSearchOpts().Verbose)
+ CI.getVerboseOutputStream() << getInputBufferName() << ":\n"
+ << HeaderContents << "\n";
+
+ Buffer = llvm::MemoryBuffer::getMemBufferCopy(HeaderContents,
+ getInputBufferName());
+
+ // Set that buffer up as our "real" input in the CompilerInstance.
+ Inputs.clear();
+ Inputs.emplace_back(Buffer->getMemBufferRef(), Kind, /*IsSystem*/ false);
+
+ return true;
+}
+
+void ExtractAPIAction::EndSourceFileAction() {
+ if (!OS)
+ return;
+
+ // Setup a SymbolGraphSerializer to write out collected API information in
+ // the Symbol Graph format.
+ // FIXME: Make the kind of APISerializer configurable.
+ SymbolGraphSerializer SGSerializer(*API, ProductName);
+ SGSerializer.serialize(*OS);
+ OS.reset();
+}
+
+std::unique_ptr<raw_pwrite_stream>
+ExtractAPIAction::CreateOutputFile(CompilerInstance &CI, StringRef InFile) {
+ std::unique_ptr<raw_pwrite_stream> OS =
+ CI.createDefaultOutputFile(/*Binary=*/false, InFile, /*Extension=*/"json",
+ /*RemoveFileOnSignal=*/false);
+ if (!OS)
+ return nullptr;
+ return OS;
+}
diff --git a/contrib/llvm-project/clang/lib/ExtractAPI/Serialization/SerializerBase.cpp b/contrib/llvm-project/clang/lib/ExtractAPI/Serialization/SerializerBase.cpp
new file mode 100644
index 000000000000..71fd25b2b2ab
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/ExtractAPI/Serialization/SerializerBase.cpp
@@ -0,0 +1,19 @@
+//===- ExtractAPI/Serialization/SerializerBase.cpp --------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements the APISerializer interface.
+///
+//===----------------------------------------------------------------------===//
+
+#include "clang/ExtractAPI/Serialization/SerializerBase.h"
+#include "llvm/Support/raw_ostream.h"
+
+using namespace clang::extractapi;
+
+void APISerializer::serialize(llvm::raw_ostream &os) {}
diff --git a/contrib/llvm-project/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp b/contrib/llvm-project/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp
new file mode 100644
index 000000000000..c4797cea333f
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/ExtractAPI/Serialization/SymbolGraphSerializer.cpp
@@ -0,0 +1,708 @@
+//===- ExtractAPI/Serialization/SymbolGraphSerializer.cpp -------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements the SymbolGraphSerializer.
+///
+//===----------------------------------------------------------------------===//
+
+#include "clang/ExtractAPI/Serialization/SymbolGraphSerializer.h"
+#include "clang/Basic/Version.h"
+#include "clang/ExtractAPI/API.h"
+#include "clang/ExtractAPI/DeclarationFragments.h"
+#include "llvm/Support/JSON.h"
+#include "llvm/Support/Path.h"
+#include "llvm/Support/VersionTuple.h"
+#include <type_traits>
+
+using namespace clang;
+using namespace clang::extractapi;
+using namespace llvm;
+using namespace llvm::json;
+
+namespace {
+
+/// Helper function to inject a JSON object \p Obj into another object \p Paren
+/// at position \p Key.
+void serializeObject(Object &Paren, StringRef Key, Optional<Object> Obj) {
+ if (Obj)
+ Paren[Key] = std::move(Obj.getValue());
+}
+
+/// Helper function to inject a JSON array \p Array into object \p Paren at
+/// position \p Key.
+void serializeArray(Object &Paren, StringRef Key, Optional<Array> Array) {
+ if (Array)
+ Paren[Key] = std::move(Array.getValue());
+}
+
+/// Serialize a \c VersionTuple \p V with the Symbol Graph semantic version
+/// format.
+///
+/// A semantic version object contains three numeric fields, representing the
+/// \c major, \c minor, and \c patch parts of the version tuple.
+/// For example version tuple 1.0.3 is serialized as:
+/// \code
+/// {
+/// "major" : 1,
+/// "minor" : 0,
+/// "patch" : 3
+/// }
+/// \endcode
+///
+/// \returns \c None if the version \p V is empty, or an \c Object containing
+/// the semantic version representation of \p V.
+Optional<Object> serializeSemanticVersion(const VersionTuple &V) {
+ if (V.empty())
+ return None;
+
+ Object Version;
+ Version["major"] = V.getMajor();
+ Version["minor"] = V.getMinor().value_or(0);
+ Version["patch"] = V.getSubminor().value_or(0);
+ return Version;
+}
+
+/// Serialize the OS information in the Symbol Graph platform property.
+///
+/// The OS information in Symbol Graph contains the \c name of the OS, and an
+/// optional \c minimumVersion semantic version field.
+Object serializeOperatingSystem(const Triple &T) {
+ Object OS;
+ OS["name"] = T.getOSTypeName(T.getOS());
+ serializeObject(OS, "minimumVersion",
+ serializeSemanticVersion(T.getMinimumSupportedOSVersion()));
+ return OS;
+}
+
+/// Serialize the platform information in the Symbol Graph module section.
+///
+/// The platform object describes a target platform triple in corresponding
+/// three fields: \c architecture, \c vendor, and \c operatingSystem.
+Object serializePlatform(const Triple &T) {
+ Object Platform;
+ Platform["architecture"] = T.getArchName();
+ Platform["vendor"] = T.getVendorName();
+ Platform["operatingSystem"] = serializeOperatingSystem(T);
+ return Platform;
+}
+
+/// Serialize a source position.
+Object serializeSourcePosition(const PresumedLoc &Loc) {
+ assert(Loc.isValid() && "invalid source position");
+
+ Object SourcePosition;
+ SourcePosition["line"] = Loc.getLine();
+ SourcePosition["character"] = Loc.getColumn();
+
+ return SourcePosition;
+}
+
+/// Serialize a source location in file.
+///
+/// \param Loc The presumed location to serialize.
+/// \param IncludeFileURI If true, include the file path of \p Loc as a URI.
+/// Defaults to false.
+Object serializeSourceLocation(const PresumedLoc &Loc,
+ bool IncludeFileURI = false) {
+ Object SourceLocation;
+ serializeObject(SourceLocation, "position", serializeSourcePosition(Loc));
+
+ if (IncludeFileURI) {
+ std::string FileURI = "file://";
+ // Normalize file path to use forward slashes for the URI.
+ FileURI += sys::path::convert_to_slash(Loc.getFilename());
+ SourceLocation["uri"] = FileURI;
+ }
+
+ return SourceLocation;
+}
+
+/// Serialize a source range with begin and end locations.
+Object serializeSourceRange(const PresumedLoc &BeginLoc,
+ const PresumedLoc &EndLoc) {
+ Object SourceRange;
+ serializeObject(SourceRange, "start", serializeSourcePosition(BeginLoc));
+ serializeObject(SourceRange, "end", serializeSourcePosition(EndLoc));
+ return SourceRange;
+}
+
+/// Serialize the availability attributes of a symbol.
+///
+/// Availability information contains the introduced, deprecated, and obsoleted
+/// versions of the symbol as semantic versions, if not default.
+/// Availability information also contains flags to indicate if the symbol is
+/// unconditionally unavailable or deprecated,
+/// i.e. \c __attribute__((unavailable)) and \c __attribute__((deprecated)).
+///
+/// \returns \c None if the symbol has default availability attributes, or
+/// an \c Object containing the formatted availability information.
+Optional<Object> serializeAvailability(const AvailabilityInfo &Avail) {
+ if (Avail.isDefault())
+ return None;
+
+ Object Availbility;
+ serializeObject(Availbility, "introducedVersion",
+ serializeSemanticVersion(Avail.Introduced));
+ serializeObject(Availbility, "deprecatedVersion",
+ serializeSemanticVersion(Avail.Deprecated));
+ serializeObject(Availbility, "obsoletedVersion",
+ serializeSemanticVersion(Avail.Obsoleted));
+ if (Avail.isUnavailable())
+ Availbility["isUnconditionallyUnavailable"] = true;
+ if (Avail.isUnconditionallyDeprecated())
+ Availbility["isUnconditionallyDeprecated"] = true;
+
+ return Availbility;
+}
+
+/// Get the language name string for interface language references.
+StringRef getLanguageName(Language Lang) {
+ switch (Lang) {
+ case Language::C:
+ return "c";
+ case Language::ObjC:
+ return "objective-c";
+
+ // Unsupported language currently
+ case Language::CXX:
+ case Language::ObjCXX:
+ case Language::OpenCL:
+ case Language::OpenCLCXX:
+ case Language::CUDA:
+ case Language::RenderScript:
+ case Language::HIP:
+ case Language::HLSL:
+
+ // Languages that the frontend cannot parse and compile
+ case Language::Unknown:
+ case Language::Asm:
+ case Language::LLVM_IR:
+ llvm_unreachable("Unsupported language kind");
+ }
+
+ llvm_unreachable("Unhandled language kind");
+}
+
+/// Serialize the identifier object as specified by the Symbol Graph format.
+///
+/// The identifier property of a symbol contains the USR for precise and unique
+/// references, and the interface language name.
+Object serializeIdentifier(const APIRecord &Record, Language Lang) {
+ Object Identifier;
+ Identifier["precise"] = Record.USR;
+ Identifier["interfaceLanguage"] = getLanguageName(Lang);
+
+ return Identifier;
+}
+
+/// Serialize the documentation comments attached to a symbol, as specified by
+/// the Symbol Graph format.
+///
+/// The Symbol Graph \c docComment object contains an array of lines. Each line
+/// represents one line of striped documentation comment, with source range
+/// information.
+/// e.g.
+/// \code
+/// /// This is a documentation comment
+/// ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~' First line.
+/// /// with multiple lines.
+/// ^~~~~~~~~~~~~~~~~~~~~~~' Second line.
+/// \endcode
+///
+/// \returns \c None if \p Comment is empty, or an \c Object containing the
+/// formatted lines.
+Optional<Object> serializeDocComment(const DocComment &Comment) {
+ if (Comment.empty())
+ return None;
+
+ Object DocComment;
+ Array LinesArray;
+ for (const auto &CommentLine : Comment) {
+ Object Line;
+ Line["text"] = CommentLine.Text;
+ serializeObject(Line, "range",
+ serializeSourceRange(CommentLine.Begin, CommentLine.End));
+ LinesArray.emplace_back(std::move(Line));
+ }
+ serializeArray(DocComment, "lines", LinesArray);
+
+ return DocComment;
+}
+
+/// Serialize the declaration fragments of a symbol.
+///
+/// The Symbol Graph declaration fragments is an array of tagged important
+/// parts of a symbol's declaration. The fragments sequence can be joined to
+/// form spans of declaration text, with attached information useful for
+/// purposes like syntax-highlighting etc. For example:
+/// \code
+/// const int pi; -> "declarationFragments" : [
+/// {
+/// "kind" : "keyword",
+/// "spelling" : "const"
+/// },
+/// {
+/// "kind" : "text",
+/// "spelling" : " "
+/// },
+/// {
+/// "kind" : "typeIdentifier",
+/// "preciseIdentifier" : "c:I",
+/// "spelling" : "int"
+/// },
+/// {
+/// "kind" : "text",
+/// "spelling" : " "
+/// },
+/// {
+/// "kind" : "identifier",
+/// "spelling" : "pi"
+/// }
+/// ]
+/// \endcode
+///
+/// \returns \c None if \p DF is empty, or an \c Array containing the formatted
+/// declaration fragments array.
+Optional<Array> serializeDeclarationFragments(const DeclarationFragments &DF) {
+ if (DF.getFragments().empty())
+ return None;
+
+ Array Fragments;
+ for (const auto &F : DF.getFragments()) {
+ Object Fragment;
+ Fragment["spelling"] = F.Spelling;
+ Fragment["kind"] = DeclarationFragments::getFragmentKindString(F.Kind);
+ if (!F.PreciseIdentifier.empty())
+ Fragment["preciseIdentifier"] = F.PreciseIdentifier;
+ Fragments.emplace_back(std::move(Fragment));
+ }
+
+ return Fragments;
+}
+
+/// Serialize the \c names field of a symbol as specified by the Symbol Graph
+/// format.
+///
+/// The Symbol Graph names field contains multiple representations of a symbol
+/// that can be used for different applications:
+/// - \c title : The simple declared name of the symbol;
+/// - \c subHeading : An array of declaration fragments that provides tags,
+/// and potentially more tokens (for example the \c +/- symbol for
+/// Objective-C methods). Can be used as sub-headings for documentation.
+Object serializeNames(const APIRecord &Record) {
+ Object Names;
+ Names["title"] = Record.Name;
+ serializeArray(Names, "subHeading",
+ serializeDeclarationFragments(Record.SubHeading));
+ DeclarationFragments NavigatorFragments;
+ NavigatorFragments.append(Record.Name,
+ DeclarationFragments::FragmentKind::Identifier,
+ /*PreciseIdentifier*/ "");
+ serializeArray(Names, "navigator",
+ serializeDeclarationFragments(NavigatorFragments));
+
+ return Names;
+}
+
+/// Serialize the symbol kind information.
+///
+/// The Symbol Graph symbol kind property contains a shorthand \c identifier
+/// which is prefixed by the source language name, useful for tooling to parse
+/// the kind, and a \c displayName for rendering human-readable names.
+Object serializeSymbolKind(const APIRecord &Record, Language Lang) {
+ auto AddLangPrefix = [&Lang](StringRef S) -> std::string {
+ return (getLanguageName(Lang) + "." + S).str();
+ };
+
+ Object Kind;
+ switch (Record.getKind()) {
+ case APIRecord::RK_GlobalFunction:
+ Kind["identifier"] = AddLangPrefix("func");
+ Kind["displayName"] = "Function";
+ break;
+ case APIRecord::RK_GlobalVariable:
+ Kind["identifier"] = AddLangPrefix("var");
+ Kind["displayName"] = "Global Variable";
+ break;
+ case APIRecord::RK_EnumConstant:
+ Kind["identifier"] = AddLangPrefix("enum.case");
+ Kind["displayName"] = "Enumeration Case";
+ break;
+ case APIRecord::RK_Enum:
+ Kind["identifier"] = AddLangPrefix("enum");
+ Kind["displayName"] = "Enumeration";
+ break;
+ case APIRecord::RK_StructField:
+ Kind["identifier"] = AddLangPrefix("property");
+ Kind["displayName"] = "Instance Property";
+ break;
+ case APIRecord::RK_Struct:
+ Kind["identifier"] = AddLangPrefix("struct");
+ Kind["displayName"] = "Structure";
+ break;
+ case APIRecord::RK_ObjCIvar:
+ Kind["identifier"] = AddLangPrefix("ivar");
+ Kind["displayName"] = "Instance Variable";
+ break;
+ case APIRecord::RK_ObjCMethod:
+ if (dyn_cast<ObjCMethodRecord>(&Record)->IsInstanceMethod) {
+ Kind["identifier"] = AddLangPrefix("method");
+ Kind["displayName"] = "Instance Method";
+ } else {
+ Kind["identifier"] = AddLangPrefix("type.method");
+ Kind["displayName"] = "Type Method";
+ }
+ break;
+ case APIRecord::RK_ObjCProperty:
+ Kind["identifier"] = AddLangPrefix("property");
+ Kind["displayName"] = "Instance Property";
+ break;
+ case APIRecord::RK_ObjCInterface:
+ Kind["identifier"] = AddLangPrefix("class");
+ Kind["displayName"] = "Class";
+ break;
+ case APIRecord::RK_ObjCCategory:
+ // We don't serialize out standalone Objective-C category symbols yet.
+ llvm_unreachable("Serializing standalone Objective-C category symbols is "
+ "not supported.");
+ break;
+ case APIRecord::RK_ObjCProtocol:
+ Kind["identifier"] = AddLangPrefix("protocol");
+ Kind["displayName"] = "Protocol";
+ break;
+ case APIRecord::RK_MacroDefinition:
+ Kind["identifier"] = AddLangPrefix("macro");
+ Kind["displayName"] = "Macro";
+ break;
+ case APIRecord::RK_Typedef:
+ Kind["identifier"] = AddLangPrefix("typealias");
+ Kind["displayName"] = "Type Alias";
+ break;
+ }
+
+ return Kind;
+}
+
+template <typename RecordTy>
+Optional<Object> serializeFunctionSignatureMixinImpl(const RecordTy &Record,
+ std::true_type) {
+ const auto &FS = Record.Signature;
+ if (FS.empty())
+ return None;
+
+ Object Signature;
+ serializeArray(Signature, "returns",
+ serializeDeclarationFragments(FS.getReturnType()));
+
+ Array Parameters;
+ for (const auto &P : FS.getParameters()) {
+ Object Parameter;
+ Parameter["name"] = P.Name;
+ serializeArray(Parameter, "declarationFragments",
+ serializeDeclarationFragments(P.Fragments));
+ Parameters.emplace_back(std::move(Parameter));
+ }
+
+ if (!Parameters.empty())
+ Signature["parameters"] = std::move(Parameters);
+
+ return Signature;
+}
+
+template <typename RecordTy>
+Optional<Object> serializeFunctionSignatureMixinImpl(const RecordTy &Record,
+ std::false_type) {
+ return None;
+}
+
+/// Serialize the function signature field, as specified by the
+/// Symbol Graph format.
+///
+/// The Symbol Graph function signature property contains two arrays.
+/// - The \c returns array is the declaration fragments of the return type;
+/// - The \c parameters array contains names and declaration fragments of the
+/// parameters.
+///
+/// \returns \c None if \p FS is empty, or an \c Object containing the
+/// formatted function signature.
+template <typename RecordTy>
+void serializeFunctionSignatureMixin(Object &Paren, const RecordTy &Record) {
+ serializeObject(Paren, "functionSignature",
+ serializeFunctionSignatureMixinImpl(
+ Record, has_function_signature<RecordTy>()));
+}
+
+} // namespace
+
+void SymbolGraphSerializer::anchor() {}
+
+/// Defines the format version emitted by SymbolGraphSerializer.
+const VersionTuple SymbolGraphSerializer::FormatVersion{0, 5, 3};
+
+Object SymbolGraphSerializer::serializeMetadata() const {
+ Object Metadata;
+ serializeObject(Metadata, "formatVersion",
+ serializeSemanticVersion(FormatVersion));
+ Metadata["generator"] = clang::getClangFullVersion();
+ return Metadata;
+}
+
+Object SymbolGraphSerializer::serializeModule() const {
+ Object Module;
+ // The user is expected to always pass `--product-name=` on the command line
+ // to populate this field.
+ Module["name"] = ProductName;
+ serializeObject(Module, "platform", serializePlatform(API.getTarget()));
+ return Module;
+}
+
+bool SymbolGraphSerializer::shouldSkip(const APIRecord &Record) const {
+ // Skip unconditionally unavailable symbols
+ if (Record.Availability.isUnconditionallyUnavailable())
+ return true;
+
+ // Filter out symbols prefixed with an underscored as they are understood to
+ // be symbols clients should not use.
+ if (Record.Name.startswith("_"))
+ return true;
+
+ return false;
+}
+
+template <typename RecordTy>
+Optional<Object>
+SymbolGraphSerializer::serializeAPIRecord(const RecordTy &Record) const {
+ if (shouldSkip(Record))
+ return None;
+
+ Object Obj;
+ serializeObject(Obj, "identifier",
+ serializeIdentifier(Record, API.getLanguage()));
+ serializeObject(Obj, "kind", serializeSymbolKind(Record, API.getLanguage()));
+ serializeObject(Obj, "names", serializeNames(Record));
+ serializeObject(
+ Obj, "location",
+ serializeSourceLocation(Record.Location, /*IncludeFileURI=*/true));
+ serializeObject(Obj, "availbility",
+ serializeAvailability(Record.Availability));
+ serializeObject(Obj, "docComment", serializeDocComment(Record.Comment));
+ serializeArray(Obj, "declarationFragments",
+ serializeDeclarationFragments(Record.Declaration));
+ // TODO: Once we keep track of symbol access information serialize it
+ // correctly here.
+ Obj["accessLevel"] = "public";
+ serializeArray(Obj, "pathComponents", Array(PathComponents));
+
+ serializeFunctionSignatureMixin(Obj, Record);
+
+ return Obj;
+}
+
+template <typename MemberTy>
+void SymbolGraphSerializer::serializeMembers(
+ const APIRecord &Record,
+ const SmallVector<std::unique_ptr<MemberTy>> &Members) {
+ for (const auto &Member : Members) {
+ auto MemberPathComponentGuard = makePathComponentGuard(Member->Name);
+ auto MemberRecord = serializeAPIRecord(*Member);
+ if (!MemberRecord)
+ continue;
+
+ Symbols.emplace_back(std::move(*MemberRecord));
+ serializeRelationship(RelationshipKind::MemberOf, *Member, Record);
+ }
+}
+
+StringRef SymbolGraphSerializer::getRelationshipString(RelationshipKind Kind) {
+ switch (Kind) {
+ case RelationshipKind::MemberOf:
+ return "memberOf";
+ case RelationshipKind::InheritsFrom:
+ return "inheritsFrom";
+ case RelationshipKind::ConformsTo:
+ return "conformsTo";
+ }
+ llvm_unreachable("Unhandled relationship kind");
+}
+
+void SymbolGraphSerializer::serializeRelationship(RelationshipKind Kind,
+ SymbolReference Source,
+ SymbolReference Target) {
+ Object Relationship;
+ Relationship["source"] = Source.USR;
+ Relationship["target"] = Target.USR;
+ Relationship["kind"] = getRelationshipString(Kind);
+
+ Relationships.emplace_back(std::move(Relationship));
+}
+
+void SymbolGraphSerializer::serializeGlobalFunctionRecord(
+ const GlobalFunctionRecord &Record) {
+ auto GlobalPathComponentGuard = makePathComponentGuard(Record.Name);
+
+ auto Obj = serializeAPIRecord(Record);
+ if (!Obj)
+ return;
+
+ Symbols.emplace_back(std::move(*Obj));
+}
+
+void SymbolGraphSerializer::serializeGlobalVariableRecord(
+ const GlobalVariableRecord &Record) {
+ auto GlobalPathComponentGuard = makePathComponentGuard(Record.Name);
+
+ auto Obj = serializeAPIRecord(Record);
+ if (!Obj)
+ return;
+
+ Symbols.emplace_back(std::move(*Obj));
+}
+
+void SymbolGraphSerializer::serializeEnumRecord(const EnumRecord &Record) {
+ auto EnumPathComponentGuard = makePathComponentGuard(Record.Name);
+ auto Enum = serializeAPIRecord(Record);
+ if (!Enum)
+ return;
+
+ Symbols.emplace_back(std::move(*Enum));
+ serializeMembers(Record, Record.Constants);
+}
+
+void SymbolGraphSerializer::serializeStructRecord(const StructRecord &Record) {
+ auto StructPathComponentGuard = makePathComponentGuard(Record.Name);
+ auto Struct = serializeAPIRecord(Record);
+ if (!Struct)
+ return;
+
+ Symbols.emplace_back(std::move(*Struct));
+ serializeMembers(Record, Record.Fields);
+}
+
+void SymbolGraphSerializer::serializeObjCContainerRecord(
+ const ObjCContainerRecord &Record) {
+ auto ObjCContainerPathComponentGuard = makePathComponentGuard(Record.Name);
+ auto ObjCContainer = serializeAPIRecord(Record);
+ if (!ObjCContainer)
+ return;
+
+ Symbols.emplace_back(std::move(*ObjCContainer));
+
+ serializeMembers(Record, Record.Ivars);
+ serializeMembers(Record, Record.Methods);
+ serializeMembers(Record, Record.Properties);
+
+ for (const auto &Protocol : Record.Protocols)
+ // Record that Record conforms to Protocol.
+ serializeRelationship(RelationshipKind::ConformsTo, Record, Protocol);
+
+ if (auto *ObjCInterface = dyn_cast<ObjCInterfaceRecord>(&Record)) {
+ if (!ObjCInterface->SuperClass.empty())
+ // If Record is an Objective-C interface record and it has a super class,
+ // record that Record is inherited from SuperClass.
+ serializeRelationship(RelationshipKind::InheritsFrom, Record,
+ ObjCInterface->SuperClass);
+
+ // Members of categories extending an interface are serialized as members of
+ // the interface.
+ for (const auto *Category : ObjCInterface->Categories) {
+ serializeMembers(Record, Category->Ivars);
+ serializeMembers(Record, Category->Methods);
+ serializeMembers(Record, Category->Properties);
+
+ // Surface the protocols of the the category to the interface.
+ for (const auto &Protocol : Category->Protocols)
+ serializeRelationship(RelationshipKind::ConformsTo, Record, Protocol);
+ }
+ }
+}
+
+void SymbolGraphSerializer::serializeMacroDefinitionRecord(
+ const MacroDefinitionRecord &Record) {
+ auto MacroPathComponentGuard = makePathComponentGuard(Record.Name);
+ auto Macro = serializeAPIRecord(Record);
+
+ if (!Macro)
+ return;
+
+ Symbols.emplace_back(std::move(*Macro));
+}
+
+void SymbolGraphSerializer::serializeTypedefRecord(
+ const TypedefRecord &Record) {
+ // Typedefs of anonymous types have their entries unified with the underlying
+ // type.
+ bool ShouldDrop = Record.UnderlyingType.Name.empty();
+ // enums declared with `NS_OPTION` have a named enum and a named typedef, with
+ // the same name
+ ShouldDrop |= (Record.UnderlyingType.Name == Record.Name);
+ if (ShouldDrop)
+ return;
+
+ auto TypedefPathComponentGuard = makePathComponentGuard(Record.Name);
+ auto Typedef = serializeAPIRecord(Record);
+ if (!Typedef)
+ return;
+
+ (*Typedef)["type"] = Record.UnderlyingType.USR;
+
+ Symbols.emplace_back(std::move(*Typedef));
+}
+
+SymbolGraphSerializer::PathComponentGuard
+SymbolGraphSerializer::makePathComponentGuard(StringRef Component) {
+ return PathComponentGuard(PathComponents, Component);
+}
+
+Object SymbolGraphSerializer::serialize() {
+ Object Root;
+ serializeObject(Root, "metadata", serializeMetadata());
+ serializeObject(Root, "module", serializeModule());
+
+ // Serialize global variables in the API set.
+ for (const auto &GlobalVar : API.getGlobalVariables())
+ serializeGlobalVariableRecord(*GlobalVar.second);
+
+ for (const auto &GlobalFunction : API.getGlobalFunctions())
+ serializeGlobalFunctionRecord(*GlobalFunction.second);
+
+ // Serialize enum records in the API set.
+ for (const auto &Enum : API.getEnums())
+ serializeEnumRecord(*Enum.second);
+
+ // Serialize struct records in the API set.
+ for (const auto &Struct : API.getStructs())
+ serializeStructRecord(*Struct.second);
+
+ // Serialize Objective-C interface records in the API set.
+ for (const auto &ObjCInterface : API.getObjCInterfaces())
+ serializeObjCContainerRecord(*ObjCInterface.second);
+
+ // Serialize Objective-C protocol records in the API set.
+ for (const auto &ObjCProtocol : API.getObjCProtocols())
+ serializeObjCContainerRecord(*ObjCProtocol.second);
+
+ for (const auto &Macro : API.getMacros())
+ serializeMacroDefinitionRecord(*Macro.second);
+
+ for (const auto &Typedef : API.getTypedefs())
+ serializeTypedefRecord(*Typedef.second);
+
+ Root["symbols"] = std::move(Symbols);
+ Root["relationships"] = std::move(Relationships);
+
+ return Root;
+}
+
+void SymbolGraphSerializer::serialize(raw_ostream &os) {
+ Object root = serialize();
+ if (Options.Compact)
+ os << formatv("{0}", Value(std::move(root))) << "\n";
+ else
+ os << formatv("{0:2}", Value(std::move(root))) << "\n";
+}
diff --git a/contrib/llvm-project/clang/lib/ExtractAPI/TypedefUnderlyingTypeResolver.cpp b/contrib/llvm-project/clang/lib/ExtractAPI/TypedefUnderlyingTypeResolver.cpp
new file mode 100644
index 000000000000..9c165e693e0e
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/ExtractAPI/TypedefUnderlyingTypeResolver.cpp
@@ -0,0 +1,79 @@
+//===- ExtractAPI/TypedefUnderlyingTypeResolver.cpp -------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file implements UnderlyingTypeResolver.
+///
+//===----------------------------------------------------------------------===//
+
+#include "TypedefUnderlyingTypeResolver.h"
+#include "clang/Index/USRGeneration.h"
+
+using namespace clang;
+using namespace extractapi;
+
+namespace {
+
+const NamedDecl *getUnderlyingTypeDecl(QualType Type) {
+ const NamedDecl *TypeDecl = nullptr;
+
+ const TypedefType *TypedefTy = Type->getAs<TypedefType>();
+ if (TypedefTy)
+ TypeDecl = TypedefTy->getDecl();
+ if (const TagType *TagTy = Type->getAs<TagType>()) {
+ TypeDecl = TagTy->getDecl();
+ } else if (const ObjCInterfaceType *ObjCITy =
+ Type->getAs<ObjCInterfaceType>()) {
+ TypeDecl = ObjCITy->getDecl();
+ }
+
+ if (TypeDecl && TypedefTy) {
+ // if this is a typedef to another typedef, use the typedef's decl for the
+ // USR - this will actually be in the output, unlike a typedef to an
+ // anonymous decl
+ const TypedefNameDecl *TypedefDecl = TypedefTy->getDecl();
+ if (TypedefDecl->getUnderlyingType()->isTypedefNameType())
+ TypeDecl = TypedefDecl;
+ }
+
+ return TypeDecl;
+}
+
+} // namespace
+
+SymbolReference
+TypedefUnderlyingTypeResolver::getSymbolReferenceForType(QualType Type,
+ APISet &API) const {
+ std::string TypeName = Type.getAsString();
+ SmallString<128> TypeUSR;
+ const NamedDecl *TypeDecl = getUnderlyingTypeDecl(Type);
+ const TypedefType *TypedefTy = Type->getAs<TypedefType>();
+
+ if (TypeDecl) {
+ if (!TypedefTy)
+ TypeName = TypeDecl->getName().str();
+
+ clang::index::generateUSRForDecl(TypeDecl, TypeUSR);
+ } else {
+ clang::index::generateUSRForType(Type, Context, TypeUSR);
+ }
+
+ return {API.copyString(TypeName), API.copyString(TypeUSR)};
+}
+
+std::string TypedefUnderlyingTypeResolver::getUSRForType(QualType Type) const {
+ SmallString<128> TypeUSR;
+ const NamedDecl *TypeDecl = getUnderlyingTypeDecl(Type);
+
+ if (TypeDecl)
+ clang::index::generateUSRForDecl(TypeDecl, TypeUSR);
+ else
+ clang::index::generateUSRForType(Type, Context, TypeUSR);
+
+ return std::string(TypeUSR);
+}
diff --git a/contrib/llvm-project/clang/lib/ExtractAPI/TypedefUnderlyingTypeResolver.h b/contrib/llvm-project/clang/lib/ExtractAPI/TypedefUnderlyingTypeResolver.h
new file mode 100644
index 000000000000..0096ff235914
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/ExtractAPI/TypedefUnderlyingTypeResolver.h
@@ -0,0 +1,46 @@
+//===- ExtractAPI/TypedefUnderlyingTypeResolver.h ---------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file defines the UnderlyingTypeResolver which is a helper type for
+/// resolving the undelrying type for a given QualType and exposing that
+/// information in various forms.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_UNDERLYING_TYPE_RESOLVER_H
+#define LLVM_CLANG_UNDERLYING_TYPE_RESOLVER_H
+
+#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
+#include "clang/ExtractAPI/API.h"
+
+#include <string>
+
+namespace clang {
+namespace extractapi {
+
+struct TypedefUnderlyingTypeResolver {
+
+ /// Get a SymbolReference for the given type.
+ SymbolReference getSymbolReferenceForType(QualType Type, APISet &API) const;
+
+ /// Get a USR for the given type.
+ std::string getUSRForType(QualType Type) const;
+
+ explicit TypedefUnderlyingTypeResolver(ASTContext &Context)
+ : Context(Context) {}
+
+private:
+ ASTContext &Context;
+};
+
+} // namespace extractapi
+} // namespace clang
+
+#endif // LLVM_CLANG_UNDERLYING_TYPE_RESOLVER_H
diff --git a/contrib/llvm-project/clang/lib/Format/AffectedRangeManager.cpp b/contrib/llvm-project/clang/lib/Format/AffectedRangeManager.cpp
index f69f65c5ddf1..e65457437146 100644
--- a/contrib/llvm-project/clang/lib/Format/AffectedRangeManager.cpp
+++ b/contrib/llvm-project/clang/lib/Format/AffectedRangeManager.cpp
@@ -60,10 +60,12 @@ bool AffectedRangeManager::computeAffectedLines(
bool AffectedRangeManager::affectsCharSourceRange(
const CharSourceRange &Range) {
- for (const CharSourceRange &R : Ranges)
+ for (const CharSourceRange &R : Ranges) {
if (!SourceMgr.isBeforeInTranslationUnit(Range.getEnd(), R.getBegin()) &&
- !SourceMgr.isBeforeInTranslationUnit(R.getEnd(), Range.getBegin()))
+ !SourceMgr.isBeforeInTranslationUnit(R.getEnd(), Range.getBegin())) {
return true;
+ }
+ }
return false;
}
diff --git a/contrib/llvm-project/clang/lib/Format/BreakableToken.cpp b/contrib/llvm-project/clang/lib/Format/BreakableToken.cpp
index 1ffaa7f6f45b..db82018a4c83 100644
--- a/contrib/llvm-project/clang/lib/Format/BreakableToken.cpp
+++ b/contrib/llvm-project/clang/lib/Format/BreakableToken.cpp
@@ -142,9 +142,10 @@ getCommentSplit(StringRef Text, unsigned ContentStartColumn,
// Make sure that we don't break at leading whitespace that
// reaches past MaxSplit.
StringRef::size_type FirstNonWhitespace = Text.find_first_not_of(Blanks);
- if (FirstNonWhitespace == StringRef::npos)
+ if (FirstNonWhitespace == StringRef::npos) {
// If the comment is only whitespace, we cannot split.
return BreakableToken::Split(StringRef::npos, 0);
+ }
SpaceOffset = Text.find_first_of(
Blanks, std::max<unsigned>(MaxSplitBytes, FirstNonWhitespace));
}
@@ -410,14 +411,17 @@ BreakableBlockComment::BreakableBlockComment(
// now we just wrap them without stars.
Decoration = "";
}
- for (size_t i = 1, e = Lines.size(); i < e && !Decoration.empty(); ++i) {
- // If the last line is empty, the closing "*/" will have a star.
- if (i + 1 == e && Content[i].empty())
- break;
- if (!Content[i].empty() && i + 1 != e && Decoration.startswith(Content[i]))
+ for (size_t i = 1, e = Content.size(); i < e && !Decoration.empty(); ++i) {
+ const StringRef &Text = Content[i];
+ if (i + 1 == e) {
+ // If the last line is empty, the closing "*/" will have a star.
+ if (Text.empty())
+ break;
+ } else if (!Text.empty() && Decoration.startswith(Text)) {
continue;
- while (!Content[i].startswith(Decoration))
- Decoration = Decoration.substr(0, Decoration.size() - 1);
+ }
+ while (!Text.startswith(Decoration))
+ Decoration = Decoration.drop_back(1);
}
LastLineNeedsDecoration = true;
@@ -430,9 +434,8 @@ BreakableBlockComment::BreakableBlockComment(
// correctly indented.
LastLineNeedsDecoration = false;
// Align the star in the last '*/' with the stars on the previous lines.
- if (e >= 2 && !Decoration.empty()) {
+ if (e >= 2 && !Decoration.empty())
ContentColumn[i] = DecorationColumn;
- }
} else if (Decoration.empty()) {
// For all other lines, set the start column to 0 if they're empty, so
// we do not insert trailing whitespace anywhere.
@@ -448,13 +451,13 @@ BreakableBlockComment::BreakableBlockComment(
unsigned DecorationSize = Decoration.startswith(Content[i])
? Content[i].size()
: Decoration.size();
- if (DecorationSize) {
+ if (DecorationSize)
ContentColumn[i] = DecorationColumn + DecorationSize;
- }
Content[i] = Content[i].substr(DecorationSize);
- if (!Decoration.startswith(Content[i]))
+ if (!Decoration.startswith(Content[i])) {
IndentAtLineBreak =
std::min<int>(IndentAtLineBreak, std::max(0, ContentColumn[i]));
+ }
}
IndentAtLineBreak = std::max<unsigned>(IndentAtLineBreak, Decoration.size());
@@ -583,14 +586,14 @@ unsigned BreakableBlockComment::getContentIndent(unsigned LineIndex) const {
// /** line 0 */
// is "* line 0", so we need to skip over the decoration in that case.
StringRef ContentWithNoDecoration = Content[LineIndex];
- if (LineIndex == 0 && ContentWithNoDecoration.startswith("*")) {
+ if (LineIndex == 0 && ContentWithNoDecoration.startswith("*"))
ContentWithNoDecoration = ContentWithNoDecoration.substr(1).ltrim(Blanks);
- }
StringRef FirstWord = ContentWithNoDecoration.substr(
0, ContentWithNoDecoration.find_first_of(Blanks));
if (ContentIndentingJavadocAnnotations.find(FirstWord) !=
- ContentIndentingJavadocAnnotations.end())
+ ContentIndentingJavadocAnnotations.end()) {
return Style.ContinuationIndentWidth;
+ }
return 0;
}
@@ -636,8 +639,9 @@ BreakableToken::Split BreakableBlockComment::getReflowSplit(
if (LineIndex) {
unsigned PreviousContentIndent = getContentIndent(LineIndex - 1);
if (PreviousContentIndent && Trimmed != StringRef::npos &&
- Trimmed != PreviousContentIndent)
+ Trimmed != PreviousContentIndent) {
return Split(StringRef::npos, 0);
+ }
}
return Split(0, Trimmed != StringRef::npos ? Trimmed : 0);
@@ -679,9 +683,10 @@ void BreakableBlockComment::adaptStartOfLine(
// Note: this works because getCommentSplit is careful never to split at
// the beginning of a line.
size_t BreakLength = Lines[0].substr(1).find_first_not_of(Blanks);
- if (BreakLength != StringRef::npos)
+ if (BreakLength != StringRef::npos) {
insertBreak(LineIndex, 0, Split(1, BreakLength), /*ContentIndent=*/0,
Whitespaces);
+ }
}
return;
}
@@ -700,11 +705,9 @@ void BreakableBlockComment::adaptStartOfLine(
// contain a trailing whitespace.
Prefix = Prefix.substr(0, 1);
}
- } else {
- if (ContentColumn[LineIndex] == 1) {
- // This line starts immediately after the decorating *.
- Prefix = Prefix.substr(0, 1);
- }
+ } else if (ContentColumn[LineIndex] == 1) {
+ // This line starts immediately after the decorating *.
+ Prefix = Prefix.substr(0, 1);
}
// This is the offset of the end of the last line relative to the start of the
// token text in the token.
@@ -738,9 +741,8 @@ bool BreakableBlockComment::mayReflow(
// Content[LineIndex] may exclude the indent after the '*' decoration. In that
// case, we compute the start of the comment pragma manually.
StringRef IndentContent = Content[LineIndex];
- if (Lines[LineIndex].ltrim(Blanks).startswith("*")) {
+ if (Lines[LineIndex].ltrim(Blanks).startswith("*"))
IndentContent = Lines[LineIndex].ltrim(Blanks).substr(1);
- }
return LineIndex > 0 && !CommentPragmasRegex.match(IndentContent) &&
mayReflowContent(Content[LineIndex]) && !Tok.Finalized &&
!switchesFormatting(tokenAt(LineIndex));
@@ -778,6 +780,33 @@ BreakableLineCommentSection::BreakableLineCommentSection(
OriginalPrefix[i] = IndentPrefix;
const int SpacesInPrefix = llvm::count(IndentPrefix, ' ');
+ // This lambda also considers multibyte character that is not handled in
+ // functions like isPunctuation provided by CharInfo.
+ const auto NoSpaceBeforeFirstCommentChar = [&]() {
+ assert(Lines[i].size() > IndentPrefix.size());
+ const char FirstCommentChar = Lines[i][IndentPrefix.size()];
+ const unsigned FirstCharByteSize =
+ encoding::getCodePointNumBytes(FirstCommentChar, Encoding);
+ if (encoding::columnWidth(
+ Lines[i].substr(IndentPrefix.size(), FirstCharByteSize),
+ Encoding) != 1) {
+ return false;
+ }
+ // In C-like comments, add a space before #. For example this is useful
+ // to preserve the relative indentation when commenting out code with
+ // #includes.
+ //
+ // In languages using # as the comment leader such as proto, don't
+ // add a space to support patterns like:
+ // #########
+ // # section
+ // #########
+ if (FirstCommentChar == '#' && !TokenText.startswith("#"))
+ return false;
+ return FirstCommentChar == '\\' || isPunctuation(FirstCommentChar) ||
+ isHorizontalWhitespace(FirstCommentChar);
+ };
+
// On the first line of the comment section we calculate how many spaces
// are to be added or removed, all lines after that just get only the
// change and we will not look at the maximum anymore. Additionally to the
@@ -786,7 +815,7 @@ BreakableLineCommentSection::BreakableLineCommentSection(
if (i == 0 || OriginalPrefix[i].rtrim(Blanks) !=
OriginalPrefix[i - 1].rtrim(Blanks)) {
if (SpacesInPrefix < Minimum && Lines[i].size() > IndentPrefix.size() &&
- isAlphanumeric(Lines[i][IndentPrefix.size()])) {
+ !NoSpaceBeforeFirstCommentChar()) {
FirstLineSpaceChange = Minimum - SpacesInPrefix;
} else if (static_cast<unsigned>(SpacesInPrefix) >
Style.SpacesInLineCommentPrefix.Maximum) {
@@ -807,10 +836,13 @@ BreakableLineCommentSection::BreakableLineCommentSection(
assert(Lines[i].size() > IndentPrefix.size());
const auto FirstNonSpace = Lines[i][IndentPrefix.size()];
- const auto AllowsSpaceChange =
- SpacesInPrefix != 0 ||
- (isAlphanumeric(FirstNonSpace) ||
- (FirstNonSpace == '}' && FirstLineSpaceChange != 0));
+ const bool IsFormatComment = LineTok && switchesFormatting(*LineTok);
+ const bool LineRequiresLeadingSpace =
+ !NoSpaceBeforeFirstCommentChar() ||
+ (FirstNonSpace == '}' && FirstLineSpaceChange != 0);
+ const bool AllowsSpaceChange =
+ !IsFormatComment &&
+ (SpacesInPrefix != 0 || LineRequiresLeadingSpace);
if (PrefixSpaceChange[i] > 0 && AllowsSpaceChange) {
Prefix[i] = IndentPrefix.str();
@@ -999,9 +1031,8 @@ void BreakableLineCommentSection::adaptStartOfLine(
}
void BreakableLineCommentSection::updateNextToken(LineState &State) const {
- if (LastLineTok) {
+ if (LastLineTok)
State.NextToken = LastLineTok->Next;
- }
}
bool BreakableLineCommentSection::mayReflow(
@@ -1009,9 +1040,8 @@ bool BreakableLineCommentSection::mayReflow(
// Line comments have the indent as part of the prefix, so we need to
// recompute the start of the line.
StringRef IndentContent = Content[LineIndex];
- if (Lines[LineIndex].startswith("//")) {
+ if (Lines[LineIndex].startswith("//"))
IndentContent = Lines[LineIndex].substr(2);
- }
// FIXME: Decide whether we want to reflow non-regular indents:
// Currently, we only reflow when the OriginalPrefix[LineIndex] matches the
// OriginalPrefix[LineIndex-1]. That means we don't reflow
diff --git a/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.cpp b/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.cpp
index 45a4d23557f7..2cb985cdc4e5 100644
--- a/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.cpp
+++ b/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.cpp
@@ -123,8 +123,9 @@ static bool startsNextParameter(const FormatToken &Current,
const FormatStyle &Style) {
const FormatToken &Previous = *Current.Previous;
if (Current.is(TT_CtorInitializerComma) &&
- Style.BreakConstructorInitializers == FormatStyle::BCIS_BeforeComma)
+ Style.BreakConstructorInitializers == FormatStyle::BCIS_BeforeComma) {
return true;
+ }
if (Style.Language == FormatStyle::LK_Proto && Current.is(TT_SelectorName))
return true;
return Previous.is(tok::comma) && !Current.isTrailingComment() &&
@@ -150,8 +151,9 @@ static bool opensProtoMessageField(const FormatToken &LessTok,
// For example, the delimiter of R"deli(cont)deli" is deli.
static llvm::Optional<StringRef> getRawStringDelimiter(StringRef TokenText) {
if (TokenText.size() < 5 // The smallest raw string possible is 'R"()"'.
- || !TokenText.startswith("R\"") || !TokenText.endswith("\""))
+ || !TokenText.startswith("R\"") || !TokenText.endswith("\"")) {
return None;
+ }
// A raw string starts with 'R"<delimiter>(' and delimiter is ascii and has
// size at most 16 by the standard, so the first '(' must be among the first
@@ -175,10 +177,9 @@ static llvm::Optional<StringRef> getRawStringDelimiter(StringRef TokenText) {
static StringRef
getCanonicalRawStringDelimiter(const FormatStyle &Style,
FormatStyle::LanguageKind Language) {
- for (const auto &Format : Style.RawStringFormats) {
+ for (const auto &Format : Style.RawStringFormats)
if (Format.Language == Language)
return StringRef(Format.CanonicalDelimiter);
- }
return "";
}
@@ -197,12 +198,10 @@ RawStringFormatStyleManager::RawStringFormatStyleManager(
LanguageStyle = PredefinedStyle;
}
LanguageStyle->ColumnLimit = CodeStyle.ColumnLimit;
- for (StringRef Delimiter : RawStringFormat.Delimiters) {
+ for (StringRef Delimiter : RawStringFormat.Delimiters)
DelimiterStyle.insert({Delimiter, *LanguageStyle});
- }
- for (StringRef EnclosingFunction : RawStringFormat.EnclosingFunctions) {
+ for (StringRef EnclosingFunction : RawStringFormat.EnclosingFunctions)
EnclosingFunctionStyle.insert({EnclosingFunction, *LanguageStyle});
- }
}
}
@@ -249,14 +248,14 @@ LineState ContinuationIndenter::getInitialState(unsigned FirstIndent,
// preprocessor indent.
if (Style.IndentPPDirectives == FormatStyle::PPDIS_AfterHash &&
(Line->Type == LT_PreprocessorDirective ||
- Line->Type == LT_ImportStatement))
+ Line->Type == LT_ImportStatement)) {
State.Column = 0;
+ }
State.Line = Line;
State.NextToken = Line->First;
State.Stack.push_back(ParenState(/*Tok=*/nullptr, FirstIndent, FirstIndent,
/*AvoidBinPacking=*/false,
/*NoLineBreak=*/false));
- State.LineContainsContinuedForLoopSection = false;
State.NoContinuation = false;
State.StartOfStringLiteral = 0;
State.StartOfLineLevel = 0;
@@ -266,9 +265,10 @@ LineState ContinuationIndenter::getInitialState(unsigned FirstIndent,
if (Style.Language == FormatStyle::LK_TextProto) {
// We need this in order to deal with the bin packing of text fields at
// global scope.
- State.Stack.back().AvoidBinPacking = true;
- State.Stack.back().BreakBeforeParameter = true;
- State.Stack.back().AlignColons = false;
+ auto &CurrentState = State.Stack.back();
+ CurrentState.AvoidBinPacking = true;
+ CurrentState.BreakBeforeParameter = true;
+ CurrentState.AlignColons = false;
}
// The first token has already been indented and thus consumed.
@@ -279,17 +279,20 @@ LineState ContinuationIndenter::getInitialState(unsigned FirstIndent,
bool ContinuationIndenter::canBreak(const LineState &State) {
const FormatToken &Current = *State.NextToken;
const FormatToken &Previous = *Current.Previous;
+ const auto &CurrentState = State.Stack.back();
assert(&Previous == Current.Previous);
- if (!Current.CanBreakBefore && !(State.Stack.back().BreakBeforeClosingBrace &&
- Current.closesBlockOrBlockTypeList(Style)))
+ if (!Current.CanBreakBefore && !(CurrentState.BreakBeforeClosingBrace &&
+ Current.closesBlockOrBlockTypeList(Style))) {
return false;
+ }
// The opening "{" of a braced list has to be on the same line as the first
// element if it is nested in another braced init list or function call.
if (!Current.MustBreakBefore && Previous.is(tok::l_brace) &&
Previous.isNot(TT_DictLiteral) && Previous.is(BK_BracedInit) &&
Previous.Previous &&
- Previous.Previous->isOneOf(tok::l_brace, tok::l_paren, tok::comma))
+ Previous.Previous->isOneOf(tok::l_brace, tok::l_paren, tok::comma)) {
return false;
+ }
// This prevents breaks like:
// ...
// SomeParameter, OtherParameter).DoSomething(
@@ -297,17 +300,19 @@ bool ContinuationIndenter::canBreak(const LineState &State) {
// As they hide "DoSomething" and are generally bad for readability.
if (Previous.opensScope() && Previous.isNot(tok::l_brace) &&
State.LowestLevelOnLine < State.StartOfLineLevel &&
- State.LowestLevelOnLine < Current.NestingLevel)
+ State.LowestLevelOnLine < Current.NestingLevel) {
return false;
- if (Current.isMemberAccess() && State.Stack.back().ContainsUnwrappedBuilder)
+ }
+ if (Current.isMemberAccess() && CurrentState.ContainsUnwrappedBuilder)
return false;
// Don't create a 'hanging' indent if there are multiple blocks in a single
// statement.
if (Previous.is(tok::l_brace) && State.Stack.size() > 1 &&
State.Stack[State.Stack.size() - 2].NestedBlockInlined &&
- State.Stack[State.Stack.size() - 2].HasMultipleNestedBlocks)
+ State.Stack[State.Stack.size() - 2].HasMultipleNestedBlocks) {
return false;
+ }
// Don't break after very short return types (e.g. "void") as that is often
// unexpected.
@@ -319,31 +324,32 @@ bool ContinuationIndenter::canBreak(const LineState &State) {
// If binary operators are moved to the next line (including commas for some
// styles of constructor initializers), that's always ok.
if (!Current.isOneOf(TT_BinaryOperator, tok::comma) &&
- State.Stack.back().NoLineBreakInOperand)
+ CurrentState.NoLineBreakInOperand) {
return false;
+ }
if (Previous.is(tok::l_square) && Previous.is(TT_ObjCMethodExpr))
return false;
- return !State.Stack.back().NoLineBreak;
+ return !CurrentState.NoLineBreak;
}
bool ContinuationIndenter::mustBreak(const LineState &State) {
const FormatToken &Current = *State.NextToken;
const FormatToken &Previous = *Current.Previous;
+ const auto &CurrentState = State.Stack.back();
if (Style.BraceWrapping.BeforeLambdaBody && Current.CanBreakBefore &&
Current.is(TT_LambdaLBrace) && Previous.isNot(TT_LineComment)) {
auto LambdaBodyLength = getLengthToMatchingParen(Current, State.Stack);
- return (LambdaBodyLength > getColumnLimit(State));
+ return LambdaBodyLength > getColumnLimit(State);
}
if (Current.MustBreakBefore || Current.is(TT_InlineASMColon))
return true;
- if (State.Stack.back().BreakBeforeClosingBrace &&
- Current.closesBlockOrBlockTypeList(Style))
- return true;
- if (State.Stack.back().BreakBeforeClosingParen && Current.is(tok::r_paren))
+ if (CurrentState.BreakBeforeClosingBrace &&
+ Current.closesBlockOrBlockTypeList(Style)) {
return true;
- if (Previous.is(tok::semi) && State.LineContainsContinuedForLoopSection)
+ }
+ if (CurrentState.BreakBeforeClosingParen && Current.is(tok::r_paren))
return true;
if (Style.Language == FormatStyle::LK_ObjC &&
Style.ObjCBreakBeforeNestedBlockParam &&
@@ -353,9 +359,10 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
}
// Avoid producing inconsistent states by requiring breaks where they are not
// permitted for C# generic type constraints.
- if (State.Stack.back().IsCSharpGenericTypeConstraint &&
- Previous.isNot(TT_CSharpGenericTypeConstraintComma))
+ if (CurrentState.IsCSharpGenericTypeConstraint &&
+ Previous.isNot(TT_CSharpGenericTypeConstraintComma)) {
return false;
+ }
if ((startsNextParameter(Current, Style) || Previous.is(tok::semi) ||
(Previous.is(TT_TemplateCloser) && Current.is(TT_StartOfName) &&
Style.isCpp() &&
@@ -368,23 +375,26 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
Previous.isNot(tok::question)) ||
(!Style.BreakBeforeTernaryOperators &&
Previous.is(TT_ConditionalExpr))) &&
- State.Stack.back().BreakBeforeParameter && !Current.isTrailingComment() &&
- !Current.isOneOf(tok::r_paren, tok::r_brace))
+ CurrentState.BreakBeforeParameter && !Current.isTrailingComment() &&
+ !Current.isOneOf(tok::r_paren, tok::r_brace)) {
return true;
- if (State.Stack.back().IsChainedConditional &&
+ }
+ if (CurrentState.IsChainedConditional &&
((Style.BreakBeforeTernaryOperators && Current.is(TT_ConditionalExpr) &&
Current.is(tok::colon)) ||
(!Style.BreakBeforeTernaryOperators && Previous.is(TT_ConditionalExpr) &&
- Previous.is(tok::colon))))
+ Previous.is(tok::colon)))) {
return true;
+ }
if (((Previous.is(TT_DictLiteral) && Previous.is(tok::l_brace)) ||
(Previous.is(TT_ArrayInitializerLSquare) &&
Previous.ParameterCount > 1) ||
opensProtoMessageField(Previous, Style)) &&
Style.ColumnLimit > 0 &&
getLengthToMatchingParen(Previous, State.Stack) + State.Column - 1 >
- getColumnLimit(State))
+ getColumnLimit(State)) {
return true;
+ }
const FormatToken &BreakConstructorInitializersToken =
Style.BreakConstructorInitializers == FormatStyle::BCIS_AfterColon
@@ -393,46 +403,51 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
if (BreakConstructorInitializersToken.is(TT_CtorInitializerColon) &&
(State.Column + State.Line->Last->TotalLength - Previous.TotalLength >
getColumnLimit(State) ||
- State.Stack.back().BreakBeforeParameter) &&
+ CurrentState.BreakBeforeParameter) &&
(Style.AllowShortFunctionsOnASingleLine != FormatStyle::SFS_All ||
Style.BreakConstructorInitializers != FormatStyle::BCIS_BeforeColon ||
- Style.ColumnLimit != 0))
+ Style.ColumnLimit != 0)) {
return true;
+ }
if (Current.is(TT_ObjCMethodExpr) && !Previous.is(TT_SelectorName) &&
- State.Line->startsWith(TT_ObjCMethodSpecifier))
+ State.Line->startsWith(TT_ObjCMethodSpecifier)) {
return true;
+ }
if (Current.is(TT_SelectorName) && !Previous.is(tok::at) &&
- State.Stack.back().ObjCSelectorNameFound &&
- State.Stack.back().BreakBeforeParameter &&
+ CurrentState.ObjCSelectorNameFound && CurrentState.BreakBeforeParameter &&
(Style.ObjCBreakBeforeNestedBlockParam ||
- !Current.startsSequence(TT_SelectorName, tok::colon, tok::caret)))
+ !Current.startsSequence(TT_SelectorName, tok::colon, tok::caret))) {
return true;
+ }
unsigned NewLineColumn = getNewLineColumn(State);
if (Current.isMemberAccess() && Style.ColumnLimit != 0 &&
State.Column + getLengthToNextOperator(Current) > Style.ColumnLimit &&
(State.Column > NewLineColumn ||
- Current.NestingLevel < State.StartOfLineLevel))
+ Current.NestingLevel < State.StartOfLineLevel)) {
return true;
+ }
if (startsSegmentOfBuilderTypeCall(Current) &&
- (State.Stack.back().CallContinuation != 0 ||
- State.Stack.back().BreakBeforeParameter) &&
+ (CurrentState.CallContinuation != 0 ||
+ CurrentState.BreakBeforeParameter) &&
// JavaScript is treated different here as there is a frequent pattern:
// SomeFunction(function() {
// ...
// }.bind(...));
// FIXME: We should find a more generic solution to this problem.
!(State.Column <= NewLineColumn && Style.isJavaScript()) &&
- !(Previous.closesScopeAfterBlock() && State.Column <= NewLineColumn))
+ !(Previous.closesScopeAfterBlock() && State.Column <= NewLineColumn)) {
return true;
+ }
// If the template declaration spans multiple lines, force wrap before the
// function/class declaration
- if (Previous.ClosesTemplateDeclaration &&
- State.Stack.back().BreakBeforeParameter && Current.CanBreakBefore)
+ if (Previous.ClosesTemplateDeclaration && CurrentState.BreakBeforeParameter &&
+ Current.CanBreakBefore) {
return true;
+ }
if (!State.Line->First->is(tok::kw_enum) && State.Column <= NewLineColumn)
return false;
@@ -443,56 +458,85 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
!Previous.isOneOf(tok::kw_return, tok::lessless, tok::at,
Keywords.kw_dollar) &&
!Previous.isOneOf(TT_InlineASMColon, TT_ConditionalExpr) &&
- nextIsMultilineString(State))
+ nextIsMultilineString(State)) {
return true;
+ }
// Using CanBreakBefore here and below takes care of the decision whether the
// current style uses wrapping before or after operators for the given
// operator.
if (Previous.is(TT_BinaryOperator) && Current.CanBreakBefore) {
- // If we need to break somewhere inside the LHS of a binary expression, we
- // should also break after the operator. Otherwise, the formatting would
- // hide the operator precedence, e.g. in:
- // if (aaaaaaaaaaaaaa ==
- // bbbbbbbbbbbbbb && c) {..
- // For comparisons, we only apply this rule, if the LHS is a binary
- // expression itself as otherwise, the line breaks seem superfluous.
- // We need special cases for ">>" which we have split into two ">" while
- // lexing in order to make template parsing easier.
- bool IsComparison = (Previous.getPrecedence() == prec::Relational ||
- Previous.getPrecedence() == prec::Equality ||
- Previous.getPrecedence() == prec::Spaceship) &&
- Previous.Previous &&
- Previous.Previous->isNot(TT_BinaryOperator); // For >>.
- bool LHSIsBinaryExpr =
- Previous.Previous && Previous.Previous->EndsBinaryExpression;
- if ((!IsComparison || LHSIsBinaryExpr) && !Current.isTrailingComment() &&
- Previous.getPrecedence() != prec::Assignment &&
- State.Stack.back().BreakBeforeParameter)
- return true;
+ const auto PreviousPrecedence = Previous.getPrecedence();
+ if (PreviousPrecedence != prec::Assignment &&
+ CurrentState.BreakBeforeParameter && !Current.isTrailingComment()) {
+ const bool LHSIsBinaryExpr =
+ Previous.Previous && Previous.Previous->EndsBinaryExpression;
+ if (LHSIsBinaryExpr)
+ return true;
+ // If we need to break somewhere inside the LHS of a binary expression, we
+ // should also break after the operator. Otherwise, the formatting would
+ // hide the operator precedence, e.g. in:
+ // if (aaaaaaaaaaaaaa ==
+ // bbbbbbbbbbbbbb && c) {..
+ // For comparisons, we only apply this rule, if the LHS is a binary
+ // expression itself as otherwise, the line breaks seem superfluous.
+ // We need special cases for ">>" which we have split into two ">" while
+ // lexing in order to make template parsing easier.
+ const bool IsComparison =
+ (PreviousPrecedence == prec::Relational ||
+ PreviousPrecedence == prec::Equality ||
+ PreviousPrecedence == prec::Spaceship) &&
+ Previous.Previous &&
+ Previous.Previous->isNot(TT_BinaryOperator); // For >>.
+ if (!IsComparison)
+ return true;
+ }
} else if (Current.is(TT_BinaryOperator) && Current.CanBreakBefore &&
- State.Stack.back().BreakBeforeParameter) {
+ CurrentState.BreakBeforeParameter) {
return true;
}
// Same as above, but for the first "<<" operator.
if (Current.is(tok::lessless) && Current.isNot(TT_OverloadedOperator) &&
- State.Stack.back().BreakBeforeParameter &&
- State.Stack.back().FirstLessLess == 0)
+ CurrentState.BreakBeforeParameter && CurrentState.FirstLessLess == 0) {
return true;
+ }
if (Current.NestingLevel == 0 && !Current.isTrailingComment()) {
- // Always break after "template <...>" and leading annotations. This is only
- // for cases where the entire line does not fit on a single line as a
+ // Always break after "template <...>"(*) and leading annotations. This is
+ // only for cases where the entire line does not fit on a single line as a
// different LineFormatter would be used otherwise.
- if (Previous.ClosesTemplateDeclaration)
+ // *: Except when another option interferes with that, like concepts.
+ if (Previous.ClosesTemplateDeclaration) {
+ if (Current.is(tok::kw_concept)) {
+ switch (Style.BreakBeforeConceptDeclarations) {
+ case FormatStyle::BBCDS_Allowed:
+ break;
+ case FormatStyle::BBCDS_Always:
+ return true;
+ case FormatStyle::BBCDS_Never:
+ return false;
+ }
+ }
+ if (Current.is(TT_RequiresClause)) {
+ switch (Style.RequiresClausePosition) {
+ case FormatStyle::RCPS_SingleLine:
+ case FormatStyle::RCPS_WithPreceding:
+ return false;
+ default:
+ return true;
+ }
+ }
return Style.AlwaysBreakTemplateDeclarations != FormatStyle::BTDS_No;
+ }
if (Previous.is(TT_FunctionAnnotationRParen) &&
- State.Line->Type != LT_PreprocessorDirective)
+ State.Line->Type != LT_PreprocessorDirective) {
return true;
+ }
if (Previous.is(TT_LeadingJavaAnnotation) && Current.isNot(tok::l_paren) &&
- Current.isNot(TT_LeadingJavaAnnotation))
+ Current.isNot(TT_LeadingJavaAnnotation)) {
return true;
+ }
}
if (Style.isJavaScript() && Previous.is(tok::r_paren) &&
@@ -514,21 +558,25 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
// name.
!Style.isJavaScript()) ||
(Current.is(tok::kw_operator) && !Previous.is(tok::coloncolon))) &&
- !Previous.is(tok::kw_template) && State.Stack.back().BreakBeforeParameter)
+ !Previous.is(tok::kw_template) && CurrentState.BreakBeforeParameter) {
return true;
+ }
// The following could be precomputed as they do not depend on the state.
// However, as they should take effect only if the UnwrappedLine does not fit
// into the ColumnLimit, they are checked here in the ContinuationIndenter.
if (Style.ColumnLimit != 0 && Previous.is(BK_Block) &&
- Previous.is(tok::l_brace) && !Current.isOneOf(tok::r_brace, tok::comment))
+ Previous.is(tok::l_brace) &&
+ !Current.isOneOf(tok::r_brace, tok::comment)) {
return true;
+ }
if (Current.is(tok::lessless) &&
((Previous.is(tok::identifier) && Previous.TokenText == "endl") ||
(Previous.Tok.isLiteral() && (Previous.TokenText.endswith("\\n\"") ||
- Previous.TokenText == "\'\\n\'"))))
+ Previous.TokenText == "\'\\n\'")))) {
return true;
+ }
if (Previous.is(TT_BlockComment) && Previous.IsMultiline)
return true;
@@ -583,21 +631,22 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
FormatToken &Current = *State.NextToken;
assert(State.NextToken->Previous);
const FormatToken &Previous = *State.NextToken->Previous;
+ auto &CurrentState = State.Stack.back();
if (Current.is(tok::equal) &&
(State.Line->First->is(tok::kw_for) || Current.NestingLevel == 0) &&
- State.Stack.back().VariablePos == 0) {
- State.Stack.back().VariablePos = State.Column;
+ CurrentState.VariablePos == 0) {
+ CurrentState.VariablePos = State.Column;
// Move over * and & if they are bound to the variable name.
const FormatToken *Tok = &Previous;
- while (Tok && State.Stack.back().VariablePos >= Tok->ColumnWidth) {
- State.Stack.back().VariablePos -= Tok->ColumnWidth;
+ while (Tok && CurrentState.VariablePos >= Tok->ColumnWidth) {
+ CurrentState.VariablePos -= Tok->ColumnWidth;
if (Tok->SpacesRequiredBefore != 0)
break;
Tok = Tok->Previous;
}
if (Previous.PartOfMultiVariableDeclStmt)
- State.Stack.back().LastSpace = State.Stack.back().VariablePos;
+ CurrentState.LastSpace = CurrentState.VariablePos;
}
unsigned Spaces = Current.SpacesRequiredBefore + ExtraSpaces;
@@ -618,31 +667,32 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
PPColumnCorrection = -1;
}
- if (!DryRun)
+ if (!DryRun) {
Whitespaces.replaceWhitespace(Current, /*Newlines=*/0, Spaces,
State.Column + Spaces + PPColumnCorrection);
+ }
// If "BreakBeforeInheritanceComma" mode, don't break within the inheritance
// declaration unless there is multiple inheritance.
if (Style.BreakInheritanceList == FormatStyle::BILS_BeforeComma &&
- Current.is(TT_InheritanceColon))
- State.Stack.back().NoLineBreak = true;
+ Current.is(TT_InheritanceColon)) {
+ CurrentState.NoLineBreak = true;
+ }
if (Style.BreakInheritanceList == FormatStyle::BILS_AfterColon &&
- Previous.is(TT_InheritanceColon))
- State.Stack.back().NoLineBreak = true;
-
- if (Current.is(TT_SelectorName) &&
- !State.Stack.back().ObjCSelectorNameFound) {
- unsigned MinIndent =
- std::max(State.FirstIndent + Style.ContinuationIndentWidth,
- State.Stack.back().Indent);
+ Previous.is(TT_InheritanceColon)) {
+ CurrentState.NoLineBreak = true;
+ }
+
+ if (Current.is(TT_SelectorName) && !CurrentState.ObjCSelectorNameFound) {
+ unsigned MinIndent = std::max(
+ State.FirstIndent + Style.ContinuationIndentWidth, CurrentState.Indent);
unsigned FirstColonPos = State.Column + Spaces + Current.ColumnWidth;
if (Current.LongestObjCSelectorName == 0)
- State.Stack.back().AlignColons = false;
+ CurrentState.AlignColons = false;
else if (MinIndent + Current.LongestObjCSelectorName > FirstColonPos)
- State.Stack.back().ColonPos = MinIndent + Current.LongestObjCSelectorName;
+ CurrentState.ColonPos = MinIndent + Current.LongestObjCSelectorName;
else
- State.Stack.back().ColonPos = FirstColonPos;
+ CurrentState.ColonPos = FirstColonPos;
}
// In "AlwaysBreak" or "BlockIndent" mode, enforce wrapping directly after the
@@ -665,36 +715,39 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
// caaaaaaaaaaaall(
// caaaaaaaaaaaaaaaaaaaaaaall(aaaaaaaaaaaaaa, aaaaaaaaa))));
Current.FakeLParens.size() > 0 &&
- Current.FakeLParens.back() > prec::Unknown)
- State.Stack.back().NoLineBreak = true;
+ Current.FakeLParens.back() > prec::Unknown) {
+ CurrentState.NoLineBreak = true;
+ }
if (Previous.is(TT_TemplateString) && Previous.opensScope())
- State.Stack.back().NoLineBreak = true;
+ CurrentState.NoLineBreak = true;
if (Style.AlignAfterOpenBracket != FormatStyle::BAS_DontAlign &&
- !State.Stack.back().IsCSharpGenericTypeConstraint &&
- Previous.opensScope() && Previous.isNot(TT_ObjCMethodExpr) &&
+ !CurrentState.IsCSharpGenericTypeConstraint && Previous.opensScope() &&
+ Previous.isNot(TT_ObjCMethodExpr) && Previous.isNot(TT_RequiresClause) &&
(Current.isNot(TT_LineComment) || Previous.is(BK_BracedInit))) {
- State.Stack.back().Indent = State.Column + Spaces;
- State.Stack.back().IsAligned = true;
+ CurrentState.Indent = State.Column + Spaces;
+ CurrentState.IsAligned = true;
}
- if (State.Stack.back().AvoidBinPacking && startsNextParameter(Current, Style))
- State.Stack.back().NoLineBreak = true;
+ if (CurrentState.AvoidBinPacking && startsNextParameter(Current, Style))
+ CurrentState.NoLineBreak = true;
if (startsSegmentOfBuilderTypeCall(Current) &&
- State.Column > getNewLineColumn(State))
- State.Stack.back().ContainsUnwrappedBuilder = true;
+ State.Column > getNewLineColumn(State)) {
+ CurrentState.ContainsUnwrappedBuilder = true;
+ }
if (Current.is(TT_LambdaArrow) && Style.Language == FormatStyle::LK_Java)
- State.Stack.back().NoLineBreak = true;
+ CurrentState.NoLineBreak = true;
if (Current.isMemberAccess() && Previous.is(tok::r_paren) &&
(Previous.MatchingParen &&
- (Previous.TotalLength - Previous.MatchingParen->TotalLength > 10)))
+ (Previous.TotalLength - Previous.MatchingParen->TotalLength > 10))) {
// If there is a function call with long parameters, break before trailing
// calls. This prevents things like:
// EXPECT_CALL(SomeLongParameter).Times(
// 2);
// We don't want to do this for short parameters as they can just be
// indexes.
- State.Stack.back().NoLineBreak = true;
+ CurrentState.NoLineBreak = true;
+ }
// Don't allow the RHS of an operator to be split over multiple lines unless
// there is a line-break right after the operator.
@@ -721,8 +774,9 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
if ((!BreakBeforeOperator &&
!(HasTwoOperands &&
Style.AlignOperands != FormatStyle::OAS_DontAlign)) ||
- (!State.Stack.back().LastOperatorWrapped && BreakBeforeOperator))
- State.Stack.back().NoLineBreakInOperand = true;
+ (!CurrentState.LastOperatorWrapped && BreakBeforeOperator)) {
+ CurrentState.NoLineBreakInOperand = true;
+ }
}
State.Column += Spaces;
@@ -731,18 +785,18 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
(Previous.Previous->is(tok::kw_for) || Previous.Previous->isIf())) {
// Treat the condition inside an if as if it was a second function
// parameter, i.e. let nested calls have a continuation indent.
- State.Stack.back().LastSpace = State.Column;
- State.Stack.back().NestedBlockIndent = State.Column;
+ CurrentState.LastSpace = State.Column;
+ CurrentState.NestedBlockIndent = State.Column;
} else if (!Current.isOneOf(tok::comment, tok::caret) &&
((Previous.is(tok::comma) &&
!Previous.is(TT_OverloadedOperator)) ||
(Previous.is(tok::colon) && Previous.is(TT_ObjCMethodExpr)))) {
- State.Stack.back().LastSpace = State.Column;
+ CurrentState.LastSpace = State.Column;
} else if (Previous.is(TT_CtorInitializerColon) &&
Style.BreakConstructorInitializers ==
FormatStyle::BCIS_AfterColon) {
- State.Stack.back().Indent = State.Column;
- State.Stack.back().LastSpace = State.Column;
+ CurrentState.Indent = State.Column;
+ CurrentState.LastSpace = State.Column;
} else if ((Previous.isOneOf(TT_BinaryOperator, TT_ConditionalExpr,
TT_CtorInitializerColon)) &&
((Previous.getPrecedence() != prec::Assignment &&
@@ -753,26 +807,25 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
// assignment without binary expression on the RHS. Also indent relative to
// unary operators and the colons of constructor initializers.
if (Style.BreakBeforeBinaryOperators == FormatStyle::BOS_None)
- State.Stack.back().LastSpace = State.Column;
+ CurrentState.LastSpace = State.Column;
} else if (Previous.is(TT_InheritanceColon)) {
- State.Stack.back().Indent = State.Column;
- State.Stack.back().LastSpace = State.Column;
+ CurrentState.Indent = State.Column;
+ CurrentState.LastSpace = State.Column;
} else if (Current.is(TT_CSharpGenericTypeConstraintColon)) {
- State.Stack.back().ColonPos = State.Column;
+ CurrentState.ColonPos = State.Column;
} else if (Previous.opensScope()) {
// If a function has a trailing call, indent all parameters from the
// opening parenthesis. This avoids confusing indents like:
// OuterFunction(InnerFunctionCall( // break
// ParameterToInnerFunction)) // break
// .SecondInnerFunctionCall();
- bool HasTrailingCall = false;
if (Previous.MatchingParen) {
const FormatToken *Next = Previous.MatchingParen->getNextNonComment();
- HasTrailingCall = Next && Next->isMemberAccess();
+ if (Next && Next->isMemberAccess() && State.Stack.size() > 1 &&
+ State.Stack[State.Stack.size() - 2].CallContinuation == 0) {
+ CurrentState.LastSpace = State.Column;
+ }
}
- if (HasTrailingCall && State.Stack.size() > 1 &&
- State.Stack[State.Stack.size() - 2].CallContinuation == 0)
- State.Stack.back().LastSpace = State.Column;
}
}
@@ -781,6 +834,7 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
FormatToken &Current = *State.NextToken;
assert(State.NextToken->Previous);
const FormatToken &Previous = *State.NextToken->Previous;
+ auto &CurrentState = State.Stack.back();
// Extra penalty that needs to be added because of the way certain line
// breaks are chosen.
@@ -792,20 +846,20 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
NextNonComment = &Current;
// The first line break on any NestingLevel causes an extra penalty in order
// prefer similar line breaks.
- if (!State.Stack.back().ContainsLineBreak)
+ if (!CurrentState.ContainsLineBreak)
Penalty += 15;
- State.Stack.back().ContainsLineBreak = true;
+ CurrentState.ContainsLineBreak = true;
Penalty += State.NextToken->SplitPenalty;
// Breaking before the first "<<" is generally not desirable if the LHS is
// short. Also always add the penalty if the LHS is split over multiple lines
// to avoid unnecessary line breaks that just work around this penalty.
- if (NextNonComment->is(tok::lessless) &&
- State.Stack.back().FirstLessLess == 0 &&
+ if (NextNonComment->is(tok::lessless) && CurrentState.FirstLessLess == 0 &&
(State.Column <= Style.ColumnLimit / 3 ||
- State.Stack.back().BreakBeforeParameter))
+ CurrentState.BreakBeforeParameter)) {
Penalty += Style.PenaltyBreakFirstLessLess;
+ }
State.Column = getNewLineColumn(State);
@@ -821,9 +875,10 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
// member(value),
// looooooooooooooooong_member(
// looooooooooong_call(param_1, param_2, param_3))
- if (State.Column > State.FirstIndent)
+ if (State.Column > State.FirstIndent) {
Penalty +=
Style.PenaltyIndentedWhitespace * (State.Column - State.FirstIndent);
+ }
// Indent nested blocks relative to this column, unless in a very specific
// JavaScript special case where:
@@ -838,28 +893,29 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
if (!Current.is(TT_LambdaArrow) &&
(!Style.isJavaScript() || Current.NestingLevel != 0 ||
!PreviousNonComment || !PreviousNonComment->is(tok::equal) ||
- !Current.isOneOf(Keywords.kw_async, Keywords.kw_function)))
- State.Stack.back().NestedBlockIndent = State.Column;
+ !Current.isOneOf(Keywords.kw_async, Keywords.kw_function))) {
+ CurrentState.NestedBlockIndent = State.Column;
+ }
if (NextNonComment->isMemberAccess()) {
- if (State.Stack.back().CallContinuation == 0)
- State.Stack.back().CallContinuation = State.Column;
+ if (CurrentState.CallContinuation == 0)
+ CurrentState.CallContinuation = State.Column;
} else if (NextNonComment->is(TT_SelectorName)) {
- if (!State.Stack.back().ObjCSelectorNameFound) {
+ if (!CurrentState.ObjCSelectorNameFound) {
if (NextNonComment->LongestObjCSelectorName == 0) {
- State.Stack.back().AlignColons = false;
+ CurrentState.AlignColons = false;
} else {
- State.Stack.back().ColonPos =
+ CurrentState.ColonPos =
(shouldIndentWrappedSelectorName(Style, State.Line->Type)
- ? std::max(State.Stack.back().Indent,
+ ? std::max(CurrentState.Indent,
State.FirstIndent + Style.ContinuationIndentWidth)
- : State.Stack.back().Indent) +
+ : CurrentState.Indent) +
std::max(NextNonComment->LongestObjCSelectorName,
NextNonComment->ColumnWidth);
}
- } else if (State.Stack.back().AlignColons &&
- State.Stack.back().ColonPos <= NextNonComment->ColumnWidth) {
- State.Stack.back().ColonPos = State.Column + NextNonComment->ColumnWidth;
+ } else if (CurrentState.AlignColons &&
+ CurrentState.ColonPos <= NextNonComment->ColumnWidth) {
+ CurrentState.ColonPos = State.Column + NextNonComment->ColumnWidth;
}
} else if (PreviousNonComment && PreviousNonComment->is(tok::colon) &&
PreviousNonComment->isOneOf(TT_ObjCMethodExpr, TT_DictLiteral)) {
@@ -872,26 +928,31 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
// }];
// Thus, we set LastSpace of the next higher NestingLevel, to which we move
// when we consume all of the "}"'s FakeRParens at the "{".
- if (State.Stack.size() > 1)
+ if (State.Stack.size() > 1) {
State.Stack[State.Stack.size() - 2].LastSpace =
- std::max(State.Stack.back().LastSpace, State.Stack.back().Indent) +
+ std::max(CurrentState.LastSpace, CurrentState.Indent) +
Style.ContinuationIndentWidth;
+ }
}
if ((PreviousNonComment &&
PreviousNonComment->isOneOf(tok::comma, tok::semi) &&
- !State.Stack.back().AvoidBinPacking) ||
- Previous.is(TT_BinaryOperator))
- State.Stack.back().BreakBeforeParameter = false;
+ !CurrentState.AvoidBinPacking) ||
+ Previous.is(TT_BinaryOperator)) {
+ CurrentState.BreakBeforeParameter = false;
+ }
if (PreviousNonComment &&
- PreviousNonComment->isOneOf(TT_TemplateCloser, TT_JavaAnnotation) &&
- Current.NestingLevel == 0)
- State.Stack.back().BreakBeforeParameter = false;
+ (PreviousNonComment->isOneOf(TT_TemplateCloser, TT_JavaAnnotation) ||
+ PreviousNonComment->ClosesRequiresClause) &&
+ Current.NestingLevel == 0) {
+ CurrentState.BreakBeforeParameter = false;
+ }
if (NextNonComment->is(tok::question) ||
- (PreviousNonComment && PreviousNonComment->is(tok::question)))
- State.Stack.back().BreakBeforeParameter = true;
+ (PreviousNonComment && PreviousNonComment->is(tok::question))) {
+ CurrentState.BreakBeforeParameter = true;
+ }
if (Current.is(TT_BinaryOperator) && Current.CanBreakBefore)
- State.Stack.back().BreakBeforeParameter = false;
+ CurrentState.BreakBeforeParameter = false;
if (!DryRun) {
unsigned MaxEmptyLinesToKeep = Style.MaxEmptyLinesToKeep + 1;
@@ -909,17 +970,17 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
bool ContinuePPDirective =
State.Line->InPPDirective && State.Line->Type != LT_ImportStatement;
Whitespaces.replaceWhitespace(Current, Newlines, State.Column, State.Column,
- State.Stack.back().IsAligned,
- ContinuePPDirective);
+ CurrentState.IsAligned, ContinuePPDirective);
}
if (!Current.isTrailingComment())
- State.Stack.back().LastSpace = State.Column;
- if (Current.is(tok::lessless))
+ CurrentState.LastSpace = State.Column;
+ if (Current.is(tok::lessless)) {
// If we are breaking before a "<<", we always want to indent relative to
// RHS. This is necessary only for "<<", as we special-case it and don't
// always indent relative to the RHS.
- State.Stack.back().LastSpace += 3; // 3 -> width of "<< ".
+ CurrentState.LastSpace += 3; // 3 -> width of "<< ".
+ }
State.StartOfLineLevel = Current.NestingLevel;
State.LowestLevelOnLine = Current.NestingLevel;
@@ -931,32 +992,41 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
State.Stack[State.Stack.size() - 2].NestedBlockInlined) ||
(Style.Language == FormatStyle::LK_ObjC && Current.is(tok::r_brace) &&
State.Stack.size() > 1 && !Style.ObjCBreakBeforeNestedBlockParam);
+ // Do not force parameter break for statements with requires expressions.
+ NestedBlockSpecialCase =
+ NestedBlockSpecialCase ||
+ (Current.MatchingParen &&
+ Current.MatchingParen->is(TT_RequiresExpressionLBrace));
if (!NestedBlockSpecialCase)
- for (unsigned i = 0, e = State.Stack.size() - 1; i != e; ++i)
- State.Stack[i].BreakBeforeParameter = true;
+ for (ParenState &PState : llvm::drop_end(State.Stack))
+ PState.BreakBeforeParameter = true;
if (PreviousNonComment &&
!PreviousNonComment->isOneOf(tok::comma, tok::colon, tok::semi) &&
- (PreviousNonComment->isNot(TT_TemplateCloser) ||
+ ((PreviousNonComment->isNot(TT_TemplateCloser) &&
+ !PreviousNonComment->ClosesRequiresClause) ||
Current.NestingLevel != 0) &&
!PreviousNonComment->isOneOf(
TT_BinaryOperator, TT_FunctionAnnotationRParen, TT_JavaAnnotation,
TT_LeadingJavaAnnotation) &&
- Current.isNot(TT_BinaryOperator) && !PreviousNonComment->opensScope())
- State.Stack.back().BreakBeforeParameter = true;
+ Current.isNot(TT_BinaryOperator) && !PreviousNonComment->opensScope()) {
+ CurrentState.BreakBeforeParameter = true;
+ }
// If we break after { or the [ of an array initializer, we should also break
// before the corresponding } or ].
if (PreviousNonComment &&
(PreviousNonComment->isOneOf(tok::l_brace, TT_ArrayInitializerLSquare) ||
- opensProtoMessageField(*PreviousNonComment, Style)))
- State.Stack.back().BreakBeforeClosingBrace = true;
+ opensProtoMessageField(*PreviousNonComment, Style))) {
+ CurrentState.BreakBeforeClosingBrace = true;
+ }
- if (PreviousNonComment && PreviousNonComment->is(tok::l_paren))
- State.Stack.back().BreakBeforeClosingParen =
+ if (PreviousNonComment && PreviousNonComment->is(tok::l_paren)) {
+ CurrentState.BreakBeforeClosingParen =
Style.AlignAfterOpenBracket == FormatStyle::BAS_BlockIndent;
+ }
- if (State.Stack.back().AvoidBinPacking) {
+ if (CurrentState.AvoidBinPacking) {
// If we are breaking after '(', '{', '<', or this is the break after a ':'
// to start a member initializater list in a constructor, this should not
// be considered bin packing unless the relevant AllowAll option is false or
@@ -972,15 +1042,17 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
!State.Line->MustBeDeclaration) ||
(Style.PackConstructorInitializers != FormatStyle::PCIS_NextLine &&
PreviousIsBreakingCtorInitializerColon) ||
- Previous.is(TT_DictLiteral))
- State.Stack.back().BreakBeforeParameter = true;
+ Previous.is(TT_DictLiteral)) {
+ CurrentState.BreakBeforeParameter = true;
+ }
// If we are breaking after a ':' to start a member initializer list,
// and we allow all arguments on the next line, we should not break
// before the next parameter.
if (PreviousIsBreakingCtorInitializerColon &&
- Style.PackConstructorInitializers == FormatStyle::PCIS_NextLine)
- State.Stack.back().BreakBeforeParameter = false;
+ Style.PackConstructorInitializers == FormatStyle::PCIS_NextLine) {
+ CurrentState.BreakBeforeParameter = false;
+ }
}
return Penalty;
@@ -991,15 +1063,17 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
return 0;
FormatToken &Current = *State.NextToken;
+ const auto &CurrentState = State.Stack.back();
- if (State.Stack.back().IsCSharpGenericTypeConstraint &&
- Current.isNot(TT_CSharpGenericTypeConstraint))
- return State.Stack.back().ColonPos + 2;
+ if (CurrentState.IsCSharpGenericTypeConstraint &&
+ Current.isNot(TT_CSharpGenericTypeConstraint)) {
+ return CurrentState.ColonPos + 2;
+ }
const FormatToken &Previous = *Current.Previous;
// If we are continuing an expression, we want to use the continuation indent.
unsigned ContinuationIndent =
- std::max(State.Stack.back().LastSpace, State.Stack.back().Indent) +
+ std::max(CurrentState.LastSpace, CurrentState.Indent) +
Style.ContinuationIndentWidth;
const FormatToken *PreviousNonComment = Current.getPreviousNonComment();
const FormatToken *NextNonComment = Previous.getNextNonComment();
@@ -1008,18 +1082,19 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
// Java specific bits.
if (Style.Language == FormatStyle::LK_Java &&
- Current.isOneOf(Keywords.kw_implements, Keywords.kw_extends))
- return std::max(State.Stack.back().LastSpace,
- State.Stack.back().Indent + Style.ContinuationIndentWidth);
+ Current.isOneOf(Keywords.kw_implements, Keywords.kw_extends)) {
+ return std::max(CurrentState.LastSpace,
+ CurrentState.Indent + Style.ContinuationIndentWidth);
+ }
if (Style.BreakBeforeBraces == FormatStyle::BS_Whitesmiths &&
- State.Line->First->is(tok::kw_enum))
+ State.Line->First->is(tok::kw_enum)) {
return (Style.IndentWidth * State.Line->First->IndentLevel) +
Style.IndentWidth;
+ }
if (NextNonComment->is(tok::l_brace) && NextNonComment->is(BK_Block))
- return Current.NestingLevel == 0 ? State.FirstIndent
- : State.Stack.back().Indent;
+ return Current.NestingLevel == 0 ? State.FirstIndent : CurrentState.Indent;
if ((Current.isOneOf(tok::r_brace, tok::r_square) ||
(Current.is(tok::greater) &&
(Style.Language == FormatStyle::LK_Proto ||
@@ -1048,33 +1123,36 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
// }
if (Current.is(tok::r_paren) && State.Stack.size() > 1 &&
(!Current.Next ||
- Current.Next->isOneOf(tok::semi, tok::kw_const, tok::l_brace)))
+ Current.Next->isOneOf(tok::semi, tok::kw_const, tok::l_brace))) {
return State.Stack[State.Stack.size() - 2].LastSpace;
+ }
if (Style.AlignAfterOpenBracket == FormatStyle::BAS_BlockIndent &&
- Current.is(tok::r_paren) && State.Stack.size() > 1)
+ Current.is(tok::r_paren) && State.Stack.size() > 1) {
return State.Stack[State.Stack.size() - 2].LastSpace;
+ }
if (NextNonComment->is(TT_TemplateString) && NextNonComment->closesScope())
return State.Stack[State.Stack.size() - 2].LastSpace;
if (Current.is(tok::identifier) && Current.Next &&
(Current.Next->is(TT_DictLiteral) ||
((Style.Language == FormatStyle::LK_Proto ||
Style.Language == FormatStyle::LK_TextProto) &&
- Current.Next->isOneOf(tok::less, tok::l_brace))))
- return State.Stack.back().Indent;
+ Current.Next->isOneOf(tok::less, tok::l_brace)))) {
+ return CurrentState.Indent;
+ }
if (NextNonComment->is(TT_ObjCStringLiteral) &&
- State.StartOfStringLiteral != 0)
+ State.StartOfStringLiteral != 0) {
return State.StartOfStringLiteral - 1;
+ }
if (NextNonComment->isStringLiteral() && State.StartOfStringLiteral != 0)
return State.StartOfStringLiteral;
- if (NextNonComment->is(tok::lessless) &&
- State.Stack.back().FirstLessLess != 0)
- return State.Stack.back().FirstLessLess;
+ if (NextNonComment->is(tok::lessless) && CurrentState.FirstLessLess != 0)
+ return CurrentState.FirstLessLess;
if (NextNonComment->isMemberAccess()) {
- if (State.Stack.back().CallContinuation == 0)
+ if (CurrentState.CallContinuation == 0)
return ContinuationIndent;
- return State.Stack.back().CallContinuation;
+ return CurrentState.CallContinuation;
}
- if (State.Stack.back().QuestionColumn != 0 &&
+ if (CurrentState.QuestionColumn != 0 &&
((NextNonComment->is(tok::colon) &&
NextNonComment->is(TT_ConditionalExpr)) ||
Previous.is(TT_ConditionalExpr))) {
@@ -1083,38 +1161,50 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
NextNonComment->Next->FakeLParens.back() == prec::Conditional) ||
(Previous.is(tok::colon) && !Current.FakeLParens.empty() &&
Current.FakeLParens.back() == prec::Conditional)) &&
- !State.Stack.back().IsWrappedConditional) {
+ !CurrentState.IsWrappedConditional) {
// NOTE: we may tweak this slightly:
// * not remove the 'lead' ContinuationIndentWidth
// * always un-indent by the operator when
// BreakBeforeTernaryOperators=true
- unsigned Indent = State.Stack.back().Indent;
- if (Style.AlignOperands != FormatStyle::OAS_DontAlign) {
+ unsigned Indent = CurrentState.Indent;
+ if (Style.AlignOperands != FormatStyle::OAS_DontAlign)
Indent -= Style.ContinuationIndentWidth;
- }
- if (Style.BreakBeforeTernaryOperators &&
- State.Stack.back().UnindentOperator)
+ if (Style.BreakBeforeTernaryOperators && CurrentState.UnindentOperator)
Indent -= 2;
return Indent;
}
- return State.Stack.back().QuestionColumn;
+ return CurrentState.QuestionColumn;
+ }
+ if (Previous.is(tok::comma) && CurrentState.VariablePos != 0)
+ return CurrentState.VariablePos;
+ if (Current.is(TT_RequiresClause)) {
+ if (Style.IndentRequiresClause)
+ return CurrentState.Indent + Style.IndentWidth;
+ switch (Style.RequiresClausePosition) {
+ case FormatStyle::RCPS_OwnLine:
+ case FormatStyle::RCPS_WithFollowing:
+ return CurrentState.Indent;
+ default:
+ break;
+ }
}
- if (Previous.is(tok::comma) && State.Stack.back().VariablePos != 0)
- return State.Stack.back().VariablePos;
if ((PreviousNonComment &&
(PreviousNonComment->ClosesTemplateDeclaration ||
+ PreviousNonComment->ClosesRequiresClause ||
PreviousNonComment->isOneOf(
TT_AttributeParen, TT_AttributeSquare, TT_FunctionAnnotationRParen,
TT_JavaAnnotation, TT_LeadingJavaAnnotation))) ||
(!Style.IndentWrappedFunctionNames &&
- NextNonComment->isOneOf(tok::kw_operator, TT_FunctionDeclarationName)))
- return std::max(State.Stack.back().LastSpace, State.Stack.back().Indent);
+ NextNonComment->isOneOf(tok::kw_operator, TT_FunctionDeclarationName))) {
+ return std::max(CurrentState.LastSpace, CurrentState.Indent);
+ }
if (NextNonComment->is(TT_SelectorName)) {
- if (!State.Stack.back().ObjCSelectorNameFound) {
- unsigned MinIndent = State.Stack.back().Indent;
- if (shouldIndentWrappedSelectorName(Style, State.Line->Type))
+ if (!CurrentState.ObjCSelectorNameFound) {
+ unsigned MinIndent = CurrentState.Indent;
+ if (shouldIndentWrappedSelectorName(Style, State.Line->Type)) {
MinIndent = std::max(MinIndent,
State.FirstIndent + Style.ContinuationIndentWidth);
+ }
// If LongestObjCSelectorName is 0, we are indenting the first
// part of an ObjC selector (or a selector component which is
// not colon-aligned due to block formatting).
@@ -1129,64 +1219,75 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
NextNonComment->ColumnWidth) -
NextNonComment->ColumnWidth;
}
- if (!State.Stack.back().AlignColons)
- return State.Stack.back().Indent;
- if (State.Stack.back().ColonPos > NextNonComment->ColumnWidth)
- return State.Stack.back().ColonPos - NextNonComment->ColumnWidth;
- return State.Stack.back().Indent;
+ if (!CurrentState.AlignColons)
+ return CurrentState.Indent;
+ if (CurrentState.ColonPos > NextNonComment->ColumnWidth)
+ return CurrentState.ColonPos - NextNonComment->ColumnWidth;
+ return CurrentState.Indent;
}
if (NextNonComment->is(tok::colon) && NextNonComment->is(TT_ObjCMethodExpr))
- return State.Stack.back().ColonPos;
+ return CurrentState.ColonPos;
if (NextNonComment->is(TT_ArraySubscriptLSquare)) {
- if (State.Stack.back().StartOfArraySubscripts != 0)
- return State.Stack.back().StartOfArraySubscripts;
- else if (Style.isCSharp()) // C# allows `["key"] = value` inside object
- // initializers.
- return State.Stack.back().Indent;
+ if (CurrentState.StartOfArraySubscripts != 0) {
+ return CurrentState.StartOfArraySubscripts;
+ } else if (Style.isCSharp()) { // C# allows `["key"] = value` inside object
+ // initializers.
+ return CurrentState.Indent;
+ }
return ContinuationIndent;
}
// This ensure that we correctly format ObjC methods calls without inputs,
// i.e. where the last element isn't selector like: [callee method];
if (NextNonComment->is(tok::identifier) && NextNonComment->FakeRParens == 0 &&
- NextNonComment->Next && NextNonComment->Next->is(TT_ObjCMethodExpr))
- return State.Stack.back().Indent;
+ NextNonComment->Next && NextNonComment->Next->is(TT_ObjCMethodExpr)) {
+ return CurrentState.Indent;
+ }
if (NextNonComment->isOneOf(TT_StartOfName, TT_PointerOrReference) ||
- Previous.isOneOf(tok::coloncolon, tok::equal, TT_JsTypeColon))
+ Previous.isOneOf(tok::coloncolon, tok::equal, TT_JsTypeColon)) {
return ContinuationIndent;
+ }
if (PreviousNonComment && PreviousNonComment->is(tok::colon) &&
- PreviousNonComment->isOneOf(TT_ObjCMethodExpr, TT_DictLiteral))
+ PreviousNonComment->isOneOf(TT_ObjCMethodExpr, TT_DictLiteral)) {
return ContinuationIndent;
+ }
if (NextNonComment->is(TT_CtorInitializerComma))
- return State.Stack.back().Indent;
+ return CurrentState.Indent;
if (PreviousNonComment && PreviousNonComment->is(TT_CtorInitializerColon) &&
- Style.BreakConstructorInitializers == FormatStyle::BCIS_AfterColon)
- return State.Stack.back().Indent;
+ Style.BreakConstructorInitializers == FormatStyle::BCIS_AfterColon) {
+ return CurrentState.Indent;
+ }
if (PreviousNonComment && PreviousNonComment->is(TT_InheritanceColon) &&
- Style.BreakInheritanceList == FormatStyle::BILS_AfterColon)
- return State.Stack.back().Indent;
+ Style.BreakInheritanceList == FormatStyle::BILS_AfterColon) {
+ return CurrentState.Indent;
+ }
if (NextNonComment->isOneOf(TT_CtorInitializerColon, TT_InheritanceColon,
- TT_InheritanceComma))
+ TT_InheritanceComma)) {
return State.FirstIndent + Style.ConstructorInitializerIndentWidth;
+ }
if (Previous.is(tok::r_paren) && !Current.isBinaryOperator() &&
- !Current.isOneOf(tok::colon, tok::comment))
+ !Current.isOneOf(tok::colon, tok::comment)) {
return ContinuationIndent;
+ }
if (Current.is(TT_ProtoExtensionLSquare))
- return State.Stack.back().Indent;
- if (Current.isBinaryOperator() && State.Stack.back().UnindentOperator)
- return State.Stack.back().Indent - Current.Tok.getLength() -
+ return CurrentState.Indent;
+ if (Current.isBinaryOperator() && CurrentState.UnindentOperator) {
+ return CurrentState.Indent - Current.Tok.getLength() -
Current.SpacesRequiredBefore;
+ }
if (Current.isOneOf(tok::comment, TT_BlockComment, TT_LineComment) &&
- NextNonComment->isBinaryOperator() && State.Stack.back().UnindentOperator)
- return State.Stack.back().Indent - NextNonComment->Tok.getLength() -
+ NextNonComment->isBinaryOperator() && CurrentState.UnindentOperator) {
+ return CurrentState.Indent - NextNonComment->Tok.getLength() -
NextNonComment->SpacesRequiredBefore;
- if (State.Stack.back().Indent == State.FirstIndent && PreviousNonComment &&
- !PreviousNonComment->isOneOf(tok::r_brace, TT_CtorInitializerComma))
+ }
+ if (CurrentState.Indent == State.FirstIndent && PreviousNonComment &&
+ !PreviousNonComment->isOneOf(tok::r_brace, TT_CtorInitializerComma)) {
// Ensure that we fall back to the continuation indent width instead of
// just flushing continuations left.
- return State.Stack.back().Indent + Style.ContinuationIndentWidth;
- return State.Stack.back().Indent;
+ return CurrentState.Indent + Style.ContinuationIndentWidth;
+ }
+ return CurrentState.Indent;
}
static bool hasNestedBlockInlined(const FormatToken *Previous,
@@ -1198,57 +1299,67 @@ static bool hasNestedBlockInlined(const FormatToken *Previous,
return true;
// Also a nested block if contains a lambda inside function with 1 parameter
- return (Style.BraceWrapping.BeforeLambdaBody && Current.is(TT_LambdaLSquare));
+ return Style.BraceWrapping.BeforeLambdaBody && Current.is(TT_LambdaLSquare);
}
unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
bool DryRun, bool Newline) {
assert(State.Stack.size());
const FormatToken &Current = *State.NextToken;
+ auto &CurrentState = State.Stack.back();
if (Current.is(TT_CSharpGenericTypeConstraint))
- State.Stack.back().IsCSharpGenericTypeConstraint = true;
+ CurrentState.IsCSharpGenericTypeConstraint = true;
if (Current.isOneOf(tok::comma, TT_BinaryOperator))
- State.Stack.back().NoLineBreakInOperand = false;
+ CurrentState.NoLineBreakInOperand = false;
if (Current.isOneOf(TT_InheritanceColon, TT_CSharpGenericTypeConstraintColon))
- State.Stack.back().AvoidBinPacking = true;
+ CurrentState.AvoidBinPacking = true;
if (Current.is(tok::lessless) && Current.isNot(TT_OverloadedOperator)) {
- if (State.Stack.back().FirstLessLess == 0)
- State.Stack.back().FirstLessLess = State.Column;
+ if (CurrentState.FirstLessLess == 0)
+ CurrentState.FirstLessLess = State.Column;
else
- State.Stack.back().LastOperatorWrapped = Newline;
+ CurrentState.LastOperatorWrapped = Newline;
}
if (Current.is(TT_BinaryOperator) && Current.isNot(tok::lessless))
- State.Stack.back().LastOperatorWrapped = Newline;
+ CurrentState.LastOperatorWrapped = Newline;
if (Current.is(TT_ConditionalExpr) && Current.Previous &&
- !Current.Previous->is(TT_ConditionalExpr))
- State.Stack.back().LastOperatorWrapped = Newline;
+ !Current.Previous->is(TT_ConditionalExpr)) {
+ CurrentState.LastOperatorWrapped = Newline;
+ }
if (Current.is(TT_ArraySubscriptLSquare) &&
- State.Stack.back().StartOfArraySubscripts == 0)
- State.Stack.back().StartOfArraySubscripts = State.Column;
- if (Current.is(TT_ConditionalExpr) && Current.is(tok::question) &&
- ((Current.MustBreakBefore) ||
- (Current.getNextNonComment() &&
- Current.getNextNonComment()->MustBreakBefore)))
- State.Stack.back().IsWrappedConditional = true;
+ CurrentState.StartOfArraySubscripts == 0) {
+ CurrentState.StartOfArraySubscripts = State.Column;
+ }
+
+ auto IsWrappedConditional = [](const FormatToken &Tok) {
+ if (!(Tok.is(TT_ConditionalExpr) && Tok.is(tok::question)))
+ return false;
+ if (Tok.MustBreakBefore)
+ return true;
+
+ const FormatToken *Next = Tok.getNextNonComment();
+ return Next && Next->MustBreakBefore;
+ };
+ if (IsWrappedConditional(Current))
+ CurrentState.IsWrappedConditional = true;
if (Style.BreakBeforeTernaryOperators && Current.is(tok::question))
- State.Stack.back().QuestionColumn = State.Column;
+ CurrentState.QuestionColumn = State.Column;
if (!Style.BreakBeforeTernaryOperators && Current.isNot(tok::colon)) {
const FormatToken *Previous = Current.Previous;
while (Previous && Previous->isTrailingComment())
Previous = Previous->Previous;
if (Previous && Previous->is(tok::question))
- State.Stack.back().QuestionColumn = State.Column;
+ CurrentState.QuestionColumn = State.Column;
}
if (!Current.opensScope() && !Current.closesScope() &&
- !Current.is(TT_PointerOrReference))
+ !Current.is(TT_PointerOrReference)) {
State.LowestLevelOnLine =
std::min(State.LowestLevelOnLine, Current.NestingLevel);
+ }
if (Current.isMemberAccess())
- State.Stack.back().StartOfFunctionCall =
- !Current.NextOperator ? 0 : State.Column;
+ CurrentState.StartOfFunctionCall = !Current.NextOperator ? 0 : State.Column;
if (Current.is(TT_SelectorName))
- State.Stack.back().ObjCSelectorNameFound = true;
+ CurrentState.ObjCSelectorNameFound = true;
if (Current.is(TT_CtorInitializerColon) &&
Style.BreakConstructorInitializers != FormatStyle::BCIS_AfterColon) {
// Indent 2 from the column, so:
@@ -1256,36 +1367,37 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
// : First(...), ...
// Next(...)
// ^ line up here.
- State.Stack.back().Indent =
- State.Column +
- (Style.BreakConstructorInitializers == FormatStyle::BCIS_BeforeComma
- ? 0
- : 2);
- State.Stack.back().NestedBlockIndent = State.Stack.back().Indent;
+ CurrentState.Indent = State.Column + (Style.BreakConstructorInitializers ==
+ FormatStyle::BCIS_BeforeComma
+ ? 0
+ : 2);
+ CurrentState.NestedBlockIndent = CurrentState.Indent;
if (Style.PackConstructorInitializers > FormatStyle::PCIS_BinPack) {
- State.Stack.back().AvoidBinPacking = true;
- State.Stack.back().BreakBeforeParameter =
+ CurrentState.AvoidBinPacking = true;
+ CurrentState.BreakBeforeParameter =
Style.PackConstructorInitializers != FormatStyle::PCIS_NextLine;
} else {
- State.Stack.back().BreakBeforeParameter = false;
+ CurrentState.BreakBeforeParameter = false;
}
}
if (Current.is(TT_CtorInitializerColon) &&
Style.BreakConstructorInitializers == FormatStyle::BCIS_AfterColon) {
- State.Stack.back().Indent =
+ CurrentState.Indent =
State.FirstIndent + Style.ConstructorInitializerIndentWidth;
- State.Stack.back().NestedBlockIndent = State.Stack.back().Indent;
+ CurrentState.NestedBlockIndent = CurrentState.Indent;
if (Style.PackConstructorInitializers > FormatStyle::PCIS_BinPack)
- State.Stack.back().AvoidBinPacking = true;
+ CurrentState.AvoidBinPacking = true;
}
- if (Current.is(TT_InheritanceColon))
- State.Stack.back().Indent =
+ if (Current.is(TT_InheritanceColon)) {
+ CurrentState.Indent =
State.FirstIndent + Style.ConstructorInitializerIndentWidth;
+ }
if (Current.isOneOf(TT_BinaryOperator, TT_ConditionalExpr) && Newline)
- State.Stack.back().NestedBlockIndent =
- State.Column + Current.ColumnWidth + 1;
+ CurrentState.NestedBlockIndent = State.Column + Current.ColumnWidth + 1;
if (Current.isOneOf(TT_LambdaLSquare, TT_LambdaArrow))
- State.Stack.back().LastSpace = State.Column;
+ CurrentState.LastSpace = State.Column;
+ if (Current.is(TT_RequiresExpression))
+ CurrentState.NestedBlockIndent = State.Column;
// Insert scopes created by fake parenthesis.
const FormatToken *Previous = Current.getPreviousNonComment();
@@ -1296,24 +1408,26 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
// foo();
// bar();
// }, a, b, c);
- if (Current.isNot(tok::comment) && Previous &&
- Previous->isOneOf(tok::l_brace, TT_ArrayInitializerLSquare) &&
+ if (Current.isNot(tok::comment) && !Current.ClosesRequiresClause &&
+ Previous && Previous->isOneOf(tok::l_brace, TT_ArrayInitializerLSquare) &&
!Previous->is(TT_DictLiteral) && State.Stack.size() > 1 &&
- !State.Stack.back().HasMultipleNestedBlocks) {
+ !CurrentState.HasMultipleNestedBlocks) {
if (State.Stack[State.Stack.size() - 2].NestedBlockInlined && Newline)
- for (unsigned i = 0, e = State.Stack.size() - 1; i != e; ++i)
- State.Stack[i].NoLineBreak = true;
+ for (ParenState &PState : llvm::drop_end(State.Stack))
+ PState.NoLineBreak = true;
State.Stack[State.Stack.size() - 2].NestedBlockInlined = false;
}
if (Previous && (Previous->isOneOf(TT_BinaryOperator, TT_ConditionalExpr) ||
(Previous->isOneOf(tok::l_paren, tok::comma, tok::colon) &&
!Previous->isOneOf(TT_DictLiteral, TT_ObjCMethodExpr)))) {
- State.Stack.back().NestedBlockInlined =
+ CurrentState.NestedBlockInlined =
!Newline && hasNestedBlockInlined(Previous, Current, Style);
}
moveStatePastFakeLParens(State, Newline);
moveStatePastScopeCloser(State);
+ // Do not use CurrentState here, since the two functions before may change the
+ // Stack.
bool AllowBreak = !State.Stack.back().NoLineBreak &&
!State.Stack.back().NoLineBreakInOperand;
moveStatePastScopeOpener(State, Newline);
@@ -1321,13 +1435,14 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
if (Current.is(TT_ObjCStringLiteral) && State.StartOfStringLiteral == 0)
State.StartOfStringLiteral = State.Column + 1;
- if (Current.is(TT_CSharpStringLiteral) && State.StartOfStringLiteral == 0)
+ if (Current.is(TT_CSharpStringLiteral) && State.StartOfStringLiteral == 0) {
State.StartOfStringLiteral = State.Column + 1;
- else if (Current.isStringLiteral() && State.StartOfStringLiteral == 0)
+ } else if (Current.isStringLiteral() && State.StartOfStringLiteral == 0) {
State.StartOfStringLiteral = State.Column;
- else if (!Current.isOneOf(tok::comment, tok::identifier, tok::hash) &&
- !Current.isStringLiteral())
+ } else if (!Current.isOneOf(tok::comment, tok::identifier, tok::hash) &&
+ !Current.isStringLiteral()) {
State.StartOfStringLiteral = 0;
+ }
State.Column += Current.ColumnWidth;
State.NextToken = State.NextToken->Next;
@@ -1357,16 +1472,18 @@ void ContinuationIndenter::moveStatePastFakeLParens(LineState &State,
const FormatToken *Previous = Current.getPreviousNonComment();
// Don't add extra indentation for the first fake parenthesis after
- // 'return', assignments or opening <({[. The indentation for these cases
- // is special cased.
+ // 'return', assignments, opening <({[, or requires clauses. The indentation
+ // for these cases is special cased.
bool SkipFirstExtraIndent =
- (Previous && (Previous->opensScope() ||
- Previous->isOneOf(tok::semi, tok::kw_return) ||
- (Previous->getPrecedence() == prec::Assignment &&
- Style.AlignOperands != FormatStyle::OAS_DontAlign) ||
- Previous->is(TT_ObjCMethodExpr)));
+ Previous &&
+ (Previous->opensScope() ||
+ Previous->isOneOf(tok::semi, tok::kw_return, TT_RequiresClause) ||
+ (Previous->getPrecedence() == prec::Assignment &&
+ Style.AlignOperands != FormatStyle::OAS_DontAlign) ||
+ Previous->is(TT_ObjCMethodExpr));
for (const auto &PrecedenceLevel : llvm::reverse(Current.FakeLParens)) {
- ParenState NewParenState = State.Stack.back();
+ const auto &CurrentState = State.Stack.back();
+ ParenState NewParenState = CurrentState;
NewParenState.Tok = nullptr;
NewParenState.ContainsLineBreak = false;
NewParenState.LastOperatorWrapped = true;
@@ -1374,7 +1491,7 @@ void ContinuationIndenter::moveStatePastFakeLParens(LineState &State,
NewParenState.IsWrappedConditional = false;
NewParenState.UnindentOperator = false;
NewParenState.NoLineBreak =
- NewParenState.NoLineBreak || State.Stack.back().NoLineBreakInOperand;
+ NewParenState.NoLineBreak || CurrentState.NoLineBreakInOperand;
// Don't propagate AvoidBinPacking into subexpressions of arg/param lists.
if (PrecedenceLevel > prec::Comma)
@@ -1390,14 +1507,13 @@ void ContinuationIndenter::moveStatePastFakeLParens(LineState &State,
(Style.Language != FormatStyle::LK_Java && PrecedenceLevel > 0)) &&
(Style.AlignAfterOpenBracket != FormatStyle::BAS_DontAlign ||
PrecedenceLevel != prec::Comma || Current.NestingLevel == 0)) {
- NewParenState.Indent =
- std::max(std::max(State.Column, NewParenState.Indent),
- State.Stack.back().LastSpace);
+ NewParenState.Indent = std::max(
+ std::max(State.Column, NewParenState.Indent), CurrentState.LastSpace);
}
if (Previous &&
(Previous->getPrecedence() == prec::Assignment ||
- Previous->is(tok::kw_return) ||
+ Previous->isOneOf(tok::kw_return, TT_RequiresClause) ||
(PrecedenceLevel == prec::Conditional && Previous->is(tok::question) &&
Previous->is(TT_ConditionalExpr))) &&
!Newline) {
@@ -1419,8 +1535,9 @@ void ContinuationIndenter::moveStatePastFakeLParens(LineState &State,
if (PrecedenceLevel > prec::Unknown)
NewParenState.LastSpace = std::max(NewParenState.LastSpace, State.Column);
if (PrecedenceLevel != prec::Conditional && !Current.is(TT_UnaryOperator) &&
- Style.AlignAfterOpenBracket != FormatStyle::BAS_DontAlign)
+ Style.AlignAfterOpenBracket != FormatStyle::BAS_DontAlign) {
NewParenState.StartOfFunctionCall = State.Column;
+ }
// Indent conditional expressions, unless they are chained "else-if"
// conditionals. Never indent expression where the 'operator' is ',', ';' or
@@ -1430,7 +1547,7 @@ void ContinuationIndenter::moveStatePastFakeLParens(LineState &State,
if (PrecedenceLevel == prec::Conditional && Previous &&
Previous->is(tok::colon) && Previous->is(TT_ConditionalExpr) &&
&PrecedenceLevel == &Current.FakeLParens.back() &&
- !State.Stack.back().IsWrappedConditional) {
+ !CurrentState.IsWrappedConditional) {
NewParenState.IsChainedConditional = true;
NewParenState.UnindentOperator = State.Stack.back().UnindentOperator;
} else if (PrecedenceLevel == prec::Conditional ||
@@ -1455,6 +1572,12 @@ void ContinuationIndenter::moveStatePastFakeRParens(LineState &State) {
State.Stack.pop_back();
State.Stack.back().VariablePos = VariablePos;
}
+
+ if (State.NextToken->ClosesRequiresClause && Style.IndentRequiresClause) {
+ // Remove the indentation of the requires clauses (which is not in Indent,
+ // but in LastSpace).
+ State.Stack.back().LastSpace -= Style.IndentWidth;
+ }
}
void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
@@ -1463,10 +1586,13 @@ void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
if (!Current.opensScope())
return;
+ const auto &CurrentState = State.Stack.back();
+
// Don't allow '<' or '(' in C# generic type constraints to start new scopes.
if (Current.isOneOf(tok::less, tok::l_paren) &&
- State.Stack.back().IsCSharpGenericTypeConstraint)
+ CurrentState.IsCSharpGenericTypeConstraint) {
return;
+ }
if (Current.MatchingParen && Current.is(BK_Block)) {
moveStateToNewBlock(State);
@@ -1474,18 +1600,18 @@ void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
}
unsigned NewIndent;
- unsigned LastSpace = State.Stack.back().LastSpace;
+ unsigned LastSpace = CurrentState.LastSpace;
bool AvoidBinPacking;
bool BreakBeforeParameter = false;
- unsigned NestedBlockIndent = std::max(State.Stack.back().StartOfFunctionCall,
- State.Stack.back().NestedBlockIndent);
+ unsigned NestedBlockIndent = std::max(CurrentState.StartOfFunctionCall,
+ CurrentState.NestedBlockIndent);
if (Current.isOneOf(tok::l_brace, TT_ArrayInitializerLSquare) ||
opensProtoMessageField(Current, Style)) {
if (Current.opensBlockOrBlockTypeList(Style)) {
NewIndent = Style.IndentWidth +
- std::min(State.Column, State.Stack.back().NestedBlockIndent);
+ std::min(State.Column, CurrentState.NestedBlockIndent);
} else {
- NewIndent = State.Stack.back().LastSpace + Style.ContinuationIndentWidth;
+ NewIndent = CurrentState.LastSpace + Style.ContinuationIndentWidth;
}
const FormatToken *NextNoComment = Current.getNextNonComment();
bool EndsInComma = Current.MatchingParen &&
@@ -1502,17 +1628,17 @@ void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
if (Current.ParameterCount > 1)
NestedBlockIndent = std::max(NestedBlockIndent, State.Column + 1);
} else {
- NewIndent = Style.ContinuationIndentWidth +
- std::max(State.Stack.back().LastSpace,
- State.Stack.back().StartOfFunctionCall);
+ NewIndent =
+ Style.ContinuationIndentWidth +
+ std::max(CurrentState.LastSpace, CurrentState.StartOfFunctionCall);
// Ensure that different different brackets force relative alignment, e.g.:
// void SomeFunction(vector< // break
// int> v);
// FIXME: We likely want to do this for more combinations of brackets.
if (Current.is(tok::less) && Current.ParentBracket == tok::l_paren) {
- NewIndent = std::max(NewIndent, State.Stack.back().Indent);
- LastSpace = std::max(LastSpace, State.Stack.back().Indent);
+ NewIndent = std::max(NewIndent, CurrentState.Indent);
+ LastSpace = std::max(LastSpace, CurrentState.Indent);
}
bool EndsInComma =
@@ -1532,7 +1658,7 @@ void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
(State.Line->Type == LT_ObjCDecl && ObjCBinPackProtocolList);
AvoidBinPacking =
- (State.Stack.back().IsCSharpGenericTypeConstraint) ||
+ (CurrentState.IsCSharpGenericTypeConstraint) ||
(Style.isJavaScript() && EndsInComma) ||
(State.Line->MustBeDeclaration && !BinPackDeclaration) ||
(!State.Line->MustBeDeclaration && !Style.BinPackArguments) ||
@@ -1546,8 +1672,9 @@ void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
// If this '[' opens an ObjC call, determine whether all parameters fit
// into one line and put one per line if they don't.
if (getLengthToMatchingParen(Current, State.Stack) + State.Column >
- getColumnLimit(State))
+ getColumnLimit(State)) {
BreakBeforeParameter = true;
+ }
} else {
// For ColumnLimit = 0, we have to figure out whether there is or has to
// be a line break within this call.
@@ -1571,33 +1698,32 @@ void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
bool NoLineBreak =
Current.Children.empty() &&
!Current.isOneOf(TT_DictLiteral, TT_ArrayInitializerLSquare) &&
- (State.Stack.back().NoLineBreak ||
- State.Stack.back().NoLineBreakInOperand ||
+ (CurrentState.NoLineBreak || CurrentState.NoLineBreakInOperand ||
(Current.is(TT_TemplateOpener) &&
- State.Stack.back().ContainsUnwrappedBuilder));
+ CurrentState.ContainsUnwrappedBuilder));
State.Stack.push_back(
ParenState(&Current, NewIndent, LastSpace, AvoidBinPacking, NoLineBreak));
- State.Stack.back().NestedBlockIndent = NestedBlockIndent;
- State.Stack.back().BreakBeforeParameter = BreakBeforeParameter;
- State.Stack.back().HasMultipleNestedBlocks =
- (Current.BlockParameterCount > 1);
+ auto &NewState = State.Stack.back();
+ NewState.NestedBlockIndent = NestedBlockIndent;
+ NewState.BreakBeforeParameter = BreakBeforeParameter;
+ NewState.HasMultipleNestedBlocks = (Current.BlockParameterCount > 1);
if (Style.BraceWrapping.BeforeLambdaBody && Current.Next != nullptr &&
- Current.Tok.is(tok::l_paren)) {
+ Current.is(tok::l_paren)) {
// Search for any parameter that is a lambda
FormatToken const *next = Current.Next;
while (next != nullptr) {
if (next->is(TT_LambdaLSquare)) {
- State.Stack.back().HasMultipleNestedBlocks = true;
+ NewState.HasMultipleNestedBlocks = true;
break;
}
next = next->Next;
}
}
- State.Stack.back().IsInsideObjCArrayLiteral =
- Current.is(TT_ArrayInitializerLSquare) && Current.Previous &&
- Current.Previous->is(tok::at);
+ NewState.IsInsideObjCArrayLiteral = Current.is(TT_ArrayInitializerLSquare) &&
+ Current.Previous &&
+ Current.Previous->is(tok::at);
}
void ContinuationIndenter::moveStatePastScopeCloser(LineState &State) {
@@ -1611,8 +1737,11 @@ void ContinuationIndenter::moveStatePastScopeCloser(LineState &State) {
(Current.isOneOf(tok::r_paren, tok::r_square, TT_TemplateString) ||
(Current.is(tok::r_brace) && State.NextToken != State.Line->First) ||
State.NextToken->is(TT_TemplateCloser) ||
- (Current.is(tok::greater) && Current.is(TT_DictLiteral))))
+ (Current.is(tok::greater) && Current.is(TT_DictLiteral)))) {
State.Stack.pop_back();
+ }
+
+ auto &CurrentState = State.Stack.back();
// Reevaluate whether ObjC message arguments fit into one line.
// If a receiver spans multiple lines, e.g.:
@@ -1624,7 +1753,7 @@ void ContinuationIndenter::moveStatePastScopeCloser(LineState &State) {
// considering a line break inside a message receiver).
// We check whether arguments fit after receiver scope closer (into the same
// line).
- if (State.Stack.back().BreakBeforeParameter && Current.MatchingParen &&
+ if (CurrentState.BreakBeforeParameter && Current.MatchingParen &&
Current.MatchingParen->Previous) {
const FormatToken &CurrentScopeOpener = *Current.MatchingParen->Previous;
if (CurrentScopeOpener.is(TT_ObjCMethodExpr) &&
@@ -1633,8 +1762,9 @@ void ContinuationIndenter::moveStatePastScopeCloser(LineState &State) {
getLengthToMatchingParen(CurrentScopeOpener, State.Stack) +
CurrentScopeOpener.TotalLength - Current.TotalLength - 1;
if (State.Column + Current.ColumnWidth + NecessarySpaceInLine <=
- Style.ColumnLimit)
- State.Stack.back().BreakBeforeParameter = false;
+ Style.ColumnLimit) {
+ CurrentState.BreakBeforeParameter = false;
+ }
}
}
@@ -1642,7 +1772,7 @@ void ContinuationIndenter::moveStatePastScopeCloser(LineState &State) {
// If this ends the array subscript expr, reset the corresponding value.
const FormatToken *NextNonComment = Current.getNextNonComment();
if (NextNonComment && NextNonComment->isNot(tok::l_square))
- State.Stack.back().StartOfArraySubscripts = 0;
+ CurrentState.StartOfArraySubscripts = 0;
}
}
@@ -1762,9 +1892,8 @@ unsigned ContinuationIndenter::reformatRawStringLiteral(
auto NewCode = applyAllReplacements(RawText, Fixes.first);
tooling::Replacements NoFixes;
- if (!NewCode) {
+ if (!NewCode)
return addMultilineToken(Current, State);
- }
if (!DryRun) {
if (NewDelimiter != OldDelimiter) {
// In 'R"delimiter(...', the delimiter starts 2 characters after the start
@@ -1922,9 +2051,10 @@ ContinuationIndenter::getRawStringStyle(const FormatToken &Current,
if (!Delimiter)
return None;
auto RawStringStyle = RawStringFormats.getDelimiterStyle(*Delimiter);
- if (!RawStringStyle && Delimiter->empty())
+ if (!RawStringStyle && Delimiter->empty()) {
RawStringStyle = RawStringFormats.getEnclosingFunctionStyle(
getEnclosingFunctionName(Current));
+ }
if (!RawStringStyle)
return None;
RawStringStyle->ColumnLimit = getColumnLimit(State);
@@ -1941,8 +2071,9 @@ ContinuationIndenter::createBreakableToken(const FormatToken &Current,
// don't support.
if (Style.Language == FormatStyle::LK_Java || Style.isJavaScript() ||
Style.isCSharp() || Style.isJson() || !Style.BreakStringLiterals ||
- !AllowBreak)
+ !AllowBreak) {
return nullptr;
+ }
// Don't break string literals inside preprocessor directives (except for
// #define directives, as their contents are stored in separate lines and
@@ -1957,9 +2088,8 @@ ContinuationIndenter::createBreakableToken(const FormatToken &Current,
return nullptr;
// Don't break string literals inside Objective-C array literals (doing so
// raises the warning -Wobjc-string-concatenation).
- if (State.Stack.back().IsInsideObjCArrayLiteral) {
+ if (State.Stack.back().IsInsideObjCArrayLiteral)
return nullptr;
- }
StringRef Text = Current.TokenText;
StringRef Prefix;
@@ -2009,8 +2139,9 @@ ContinuationIndenter::createBreakableToken(const FormatToken &Current,
}();
if (!Style.ReflowComments ||
CommentPragmasRegex.match(Current.TokenText.substr(2)) ||
- switchesFormatting(Current) || !RegularComments)
+ switchesFormatting(Current) || !RegularComments) {
return nullptr;
+ }
return std::make_unique<BreakableLineCommentSection>(
Current, StartColumn, /*InPPDirective=*/false, Encoding, Style);
}
@@ -2099,11 +2230,12 @@ ContinuationIndenter::breakProtrudingToken(const FormatToken &Current,
if (Split.first == StringRef::npos) {
// No break opportunity - update the penalty and continue with the next
// logical line.
- if (LineIndex < EndIndex - 1)
+ if (LineIndex < EndIndex - 1) {
// The last line's penalty is handled in addNextStateToQueue() or when
// calling replaceWhitespaceAfterLastLine below.
Penalty += Style.PenaltyExcessCharacter *
(ContentStartColumn + RemainingTokenColumns - ColumnLimit);
+ }
LLVM_DEBUG(llvm::dbgs() << " No break opportunity.\n");
break;
}
@@ -2179,9 +2311,10 @@ ContinuationIndenter::breakProtrudingToken(const FormatToken &Current,
// The current line fits after compressing the whitespace - reflow
// the next line into it if possible.
TryReflow = true;
- if (!DryRun)
+ if (!DryRun) {
Token->compressWhitespace(LineIndex, TailOffset, Split,
Whitespaces);
+ }
// When we continue on the same line, leave one space between content.
ContentStartColumn += ToSplitColumns + 1;
Penalty += ExcessCharactersPenalty;
@@ -2225,9 +2358,10 @@ ContinuationIndenter::breakProtrudingToken(const FormatToken &Current,
LLVM_DEBUG(llvm::dbgs() << " Breaking at: " << TailOffset + Split.first
<< ", " << Split.second << "\n");
- if (!DryRun)
+ if (!DryRun) {
Token->insertBreak(LineIndex, TailOffset, Split, ContentIndent,
Whitespaces);
+ }
Penalty += NewBreakPenalty;
TailOffset += Split.first + Split.second;
@@ -2239,10 +2373,11 @@ ContinuationIndenter::breakProtrudingToken(const FormatToken &Current,
// line.
if (LineIndex + 1 != EndIndex) {
unsigned NextLineIndex = LineIndex + 1;
- if (NewBreakBefore)
+ if (NewBreakBefore) {
// After breaking a line, try to reflow the next line into the current
// one once RemainingTokenColumns fits.
TryReflow = true;
+ }
if (TryReflow) {
// We decided that we want to try reflowing the next line into the
// current one.
@@ -2305,9 +2440,8 @@ ContinuationIndenter::breakProtrudingToken(const FormatToken &Current,
unsigned ExcessCharactersPenalty =
(ContentStartColumn + ToSplitColumns - ColumnLimit) *
Style.PenaltyExcessCharacter;
- if (NewBreakPenalty < ExcessCharactersPenalty) {
+ if (NewBreakPenalty < ExcessCharactersPenalty)
Reflow = false;
- }
}
}
}
@@ -2361,9 +2495,10 @@ ContinuationIndenter::breakProtrudingToken(const FormatToken &Current,
Penalty += Style.PenaltyExcessCharacter *
(ContentStartColumn + RemainingTokenColumns - ColumnLimit);
- if (!DryRun)
+ if (!DryRun) {
Token->replaceWhitespaceAfterLastLine(TailOffset, SplitAfterLastLine,
Whitespaces);
+ }
ContentStartColumn =
Token->getContentStartColumn(Token->getLineCount() - 1, /*Break=*/true);
RemainingTokenColumns = Token->getRemainingLength(
@@ -2379,10 +2514,9 @@ ContinuationIndenter::breakProtrudingToken(const FormatToken &Current,
// If we break the token inside a parameter list, we need to break before
// the next parameter on all levels, so that the next parameter is clearly
// visible. Line comments already introduce a break.
- if (Current.isNot(TT_LineComment)) {
+ if (Current.isNot(TT_LineComment))
for (ParenState &Paren : State.Stack)
Paren.BreakBeforeParameter = true;
- }
if (Current.is(TT_BlockComment))
State.NoContinuation = true;
@@ -2412,12 +2546,14 @@ bool ContinuationIndenter::nextIsMultilineString(const LineState &State) {
if (Current.IsMultiline)
return true;
if (Current.getNextNonComment() &&
- Current.getNextNonComment()->isStringLiteral())
+ Current.getNextNonComment()->isStringLiteral()) {
return true; // Implicit concatenation.
+ }
if (Style.ColumnLimit != 0 && Style.BreakStringLiterals &&
State.Column + Current.ColumnWidth + Current.UnbreakableTailLength >
- Style.ColumnLimit)
+ Style.ColumnLimit) {
return true; // String will be split.
+ }
return false;
}
diff --git a/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.h b/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.h
index 0eb53cbd0293..494a9727d5ed 100644
--- a/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.h
+++ b/contrib/llvm-project/clang/lib/Format/ContinuationIndenter.h
@@ -419,9 +419,6 @@ struct LineState {
/// The token that needs to be next formatted.
FormatToken *NextToken;
- /// \c true if this line contains a continued for-loop section.
- bool LineContainsContinuedForLoopSection;
-
/// \c true if \p NextToken should not continue this line.
bool NoContinuation;
@@ -468,9 +465,6 @@ struct LineState {
return NextToken < Other.NextToken;
if (Column != Other.Column)
return Column < Other.Column;
- if (LineContainsContinuedForLoopSection !=
- Other.LineContainsContinuedForLoopSection)
- return LineContainsContinuedForLoopSection;
if (NoContinuation != Other.NoContinuation)
return NoContinuation;
if (StartOfLineLevel != Other.StartOfLineLevel)
diff --git a/contrib/llvm-project/clang/lib/Format/DefinitionBlockSeparator.cpp b/contrib/llvm-project/clang/lib/Format/DefinitionBlockSeparator.cpp
index cfb019a471dc..f6edcd13ecd6 100644
--- a/contrib/llvm-project/clang/lib/Format/DefinitionBlockSeparator.cpp
+++ b/contrib/llvm-project/clang/lib/Format/DefinitionBlockSeparator.cpp
@@ -45,8 +45,9 @@ void DefinitionBlockSeparator::separateBlocks(
auto LikelyDefinition = [&](const AnnotatedLine *Line,
bool ExcludeEnum = false) {
if ((Line->MightBeFunctionDecl && Line->mightBeFunctionDefinition()) ||
- Line->startsWithNamespace())
+ Line->startsWithNamespace()) {
return true;
+ }
int BracketLevel = 0;
for (const FormatToken *CurrentToken = Line->First; CurrentToken;
CurrentToken = CurrentToken->Next) {
@@ -54,8 +55,9 @@ void DefinitionBlockSeparator::separateBlocks(
if ((CurrentToken->isOneOf(tok::kw_class, tok::kw_struct,
tok::kw_union) ||
(Style.isJavaScript() &&
- CurrentToken->is(ExtraKeywords.kw_function))))
+ CurrentToken->is(ExtraKeywords.kw_function)))) {
return true;
+ }
if (!ExcludeEnum && CurrentToken->is(tok::kw_enum))
return true;
}
@@ -92,8 +94,9 @@ void DefinitionBlockSeparator::separateBlocks(
return;
if (IsAccessSpecifierToken(TargetToken) ||
(OpeningLineIndex > 0 &&
- IsAccessSpecifierToken(Lines[OpeningLineIndex - 1]->First)))
+ IsAccessSpecifierToken(Lines[OpeningLineIndex - 1]->First))) {
return;
+ }
if (!TargetLine->Affected)
return;
Whitespaces.replaceWhitespace(*TargetToken, NewlineToInsert,
@@ -156,8 +159,9 @@ void DefinitionBlockSeparator::separateBlocks(
if (NextLine->MightBeFunctionDecl &&
NextLine->mightBeFunctionDefinition() &&
NextLine->First->NewlinesBefore == 1 &&
- OperateLine->First->is(TT_FunctionLikeOrFreestandingMacro))
+ OperateLine->First->is(TT_FunctionLikeOrFreestandingMacro)) {
return true;
+ }
}
if ((Style.isCSharp() && OperateLine->First->is(TT_AttributeSquare)))
@@ -183,10 +187,9 @@ void DefinitionBlockSeparator::separateBlocks(
TargetToken = TargetLine->First;
while (TargetToken && !TargetToken->is(tok::r_brace))
TargetToken = TargetToken->Next;
- if (!TargetToken) {
+ if (!TargetToken)
while (I < Lines.size() && !Lines[I]->First->is(tok::r_brace))
++I;
- }
} else if (CurrentLine->First->closesScope()) {
if (OpeningLineIndex > Lines.size())
continue;
@@ -195,8 +198,9 @@ void DefinitionBlockSeparator::separateBlocks(
// misrecognition.
if (OpeningLineIndex > 0 &&
Lines[OpeningLineIndex]->First->is(tok::l_brace) &&
- Lines[OpeningLineIndex - 1]->Last->isNot(tok::l_brace))
+ Lines[OpeningLineIndex - 1]->Last->isNot(tok::l_brace)) {
--OpeningLineIndex;
+ }
OpeningLine = Lines[OpeningLineIndex];
// Closing a function definition.
if (LikelyDefinition(OpeningLine)) {
@@ -210,8 +214,9 @@ void DefinitionBlockSeparator::separateBlocks(
// Avoid duplicated replacement.
if (TargetToken->isNot(tok::l_brace))
InsertReplacement(NewlineCount);
- } else if (IsNeverStyle)
+ } else if (IsNeverStyle) {
InsertReplacement(OpeningLineIndex != 0);
+ }
}
}
@@ -227,8 +232,9 @@ void DefinitionBlockSeparator::separateBlocks(
if (!TargetToken->closesScope() && !IsPPConditional(OpeningLineIndex)) {
// Check whether current line may precede a definition line.
while (OpeningLineIndex + 1 < Lines.size() &&
- MayPrecedeDefinition(/*Direction=*/0))
+ MayPrecedeDefinition(/*Direction=*/0)) {
++OpeningLineIndex;
+ }
TargetLine = Lines[OpeningLineIndex];
if (!LikelyDefinition(TargetLine)) {
OpeningLineIndex = I + 1;
@@ -236,16 +242,18 @@ void DefinitionBlockSeparator::separateBlocks(
TargetToken = TargetLine->First;
InsertReplacement(NewlineCount);
}
- } else if (IsNeverStyle)
+ } else if (IsNeverStyle) {
InsertReplacement(/*NewlineToInsert=*/1);
+ }
}
}
- for (const auto &R : Whitespaces.generateReplacements())
+ for (const auto &R : Whitespaces.generateReplacements()) {
// The add method returns an Error instance which simulates program exit
// code through overloading boolean operator, thus false here indicates
// success.
if (Result.add(R))
return;
+ }
}
} // namespace format
} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Format/Format.cpp b/contrib/llvm-project/clang/lib/Format/Format.cpp
index dd4755c2227e..51526dc2a681 100644
--- a/contrib/llvm-project/clang/lib/Format/Format.cpp
+++ b/contrib/llvm-project/clang/lib/Format/Format.cpp
@@ -18,6 +18,7 @@
#include "ContinuationIndenter.h"
#include "DefinitionBlockSeparator.h"
#include "FormatInternal.h"
+#include "FormatToken.h"
#include "FormatTokenLexer.h"
#include "NamespaceEndCommentsFixer.h"
#include "QualifierAlignmentFixer.h"
@@ -34,6 +35,7 @@
#include "clang/Lex/Lexer.h"
#include "clang/Tooling/Inclusions/HeaderIncludes.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/Sequence.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Debug.h"
@@ -44,7 +46,6 @@
#include <algorithm>
#include <memory>
#include <mutex>
-#include <numeric>
#include <string>
#include <unordered_map>
@@ -151,18 +152,55 @@ template <> struct ScalarEnumerationTraits<FormatStyle::ShortFunctionStyle> {
}
};
-template <> struct ScalarEnumerationTraits<FormatStyle::AlignConsecutiveStyle> {
- static void enumeration(IO &IO, FormatStyle::AlignConsecutiveStyle &Value) {
- IO.enumCase(Value, "None", FormatStyle::ACS_None);
- IO.enumCase(Value, "Consecutive", FormatStyle::ACS_Consecutive);
- IO.enumCase(Value, "AcrossEmptyLines", FormatStyle::ACS_AcrossEmptyLines);
- IO.enumCase(Value, "AcrossComments", FormatStyle::ACS_AcrossComments);
+template <> struct MappingTraits<FormatStyle::AlignConsecutiveStyle> {
+ static void enumInput(IO &IO, FormatStyle::AlignConsecutiveStyle &Value) {
+ IO.enumCase(Value, "None",
+ FormatStyle::AlignConsecutiveStyle(
+ {/*Enabled=*/false, /*AcrossEmptyLines=*/false,
+ /*AcrossComments=*/false, /*AlignCompound=*/false,
+ /*PadOperators=*/true}));
+ IO.enumCase(Value, "Consecutive",
+ FormatStyle::AlignConsecutiveStyle(
+ {/*Enabled=*/true, /*AcrossEmptyLines=*/false,
+ /*AcrossComments=*/false, /*AlignCompound=*/false,
+ /*PadOperators=*/true}));
+ IO.enumCase(Value, "AcrossEmptyLines",
+ FormatStyle::AlignConsecutiveStyle(
+ {/*Enabled=*/true, /*AcrossEmptyLines=*/true,
+ /*AcrossComments=*/false, /*AlignCompound=*/false,
+ /*PadOperators=*/true}));
+ IO.enumCase(Value, "AcrossComments",
+ FormatStyle::AlignConsecutiveStyle({/*Enabled=*/true,
+ /*AcrossEmptyLines=*/false,
+ /*AcrossComments=*/true,
+ /*AlignCompound=*/false,
+ /*PadOperators=*/true}));
IO.enumCase(Value, "AcrossEmptyLinesAndComments",
- FormatStyle::ACS_AcrossEmptyLinesAndComments);
+ FormatStyle::AlignConsecutiveStyle({/*Enabled=*/true,
+ /*AcrossEmptyLines=*/true,
+ /*AcrossComments=*/true,
+ /*AlignCompound=*/false,
+ /*PadOperators=*/true}));
// For backward compatibility.
- IO.enumCase(Value, "true", FormatStyle::ACS_Consecutive);
- IO.enumCase(Value, "false", FormatStyle::ACS_None);
+ IO.enumCase(Value, "true",
+ FormatStyle::AlignConsecutiveStyle(
+ {/*Enabled=*/true, /*AcrossEmptyLines=*/false,
+ /*AcrossComments=*/false, /*AlignCompound=*/false,
+ /*PadOperators=*/true}));
+ IO.enumCase(Value, "false",
+ FormatStyle::AlignConsecutiveStyle(
+ {/*Enabled=*/false, /*AcrossEmptyLines=*/false,
+ /*AcrossComments=*/false, /*AlignCompound=*/false,
+ /*PadOperators=*/true}));
+ }
+
+ static void mapping(IO &IO, FormatStyle::AlignConsecutiveStyle &Value) {
+ IO.mapOptional("Enabled", Value.Enabled);
+ IO.mapOptional("AcrossEmptyLines", Value.AcrossEmptyLines);
+ IO.mapOptional("AcrossComments", Value.AcrossComments);
+ IO.mapOptional("AlignCompound", Value.AlignCompound);
+ IO.mapOptional("PadOperators", Value.PadOperators);
}
};
@@ -257,6 +295,21 @@ struct ScalarEnumerationTraits<
};
template <>
+struct ScalarEnumerationTraits<
+ FormatStyle::BreakBeforeConceptDeclarationsStyle> {
+ static void
+ enumeration(IO &IO, FormatStyle::BreakBeforeConceptDeclarationsStyle &Value) {
+ IO.enumCase(Value, "Never", FormatStyle::BBCDS_Never);
+ IO.enumCase(Value, "Allowed", FormatStyle::BBCDS_Allowed);
+ IO.enumCase(Value, "Always", FormatStyle::BBCDS_Always);
+
+ // For backward compatibility.
+ IO.enumCase(Value, "true", FormatStyle::BBCDS_Always);
+ IO.enumCase(Value, "false", FormatStyle::BBCDS_Allowed);
+ }
+};
+
+template <>
struct ScalarEnumerationTraits<FormatStyle::BreakConstructorInitializersStyle> {
static void
enumeration(IO &IO, FormatStyle::BreakConstructorInitializersStyle &Value) {
@@ -463,6 +516,17 @@ struct ScalarEnumerationTraits<FormatStyle::ReferenceAlignmentStyle> {
};
template <>
+struct ScalarEnumerationTraits<FormatStyle::RequiresClausePositionStyle> {
+ static void enumeration(IO &IO,
+ FormatStyle::RequiresClausePositionStyle &Value) {
+ IO.enumCase(Value, "OwnLine", FormatStyle::RCPS_OwnLine);
+ IO.enumCase(Value, "WithPreceding", FormatStyle::RCPS_WithPreceding);
+ IO.enumCase(Value, "WithFollowing", FormatStyle::RCPS_WithFollowing);
+ IO.enumCase(Value, "SingleLine", FormatStyle::RCPS_SingleLine);
+ }
+};
+
+template <>
struct ScalarEnumerationTraits<FormatStyle::SpaceBeforeParensStyle> {
static void enumeration(IO &IO, FormatStyle::SpaceBeforeParensStyle &Value) {
IO.enumCase(Value, "Never", FormatStyle::SBPO_Never);
@@ -564,6 +628,7 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("DerivePointerBinding", Style.DerivePointerAlignment);
IO.mapOptional("IndentFunctionDeclarationAfterType",
Style.IndentWrappedFunctionNames);
+ IO.mapOptional("IndentRequires", Style.IndentRequiresClause);
IO.mapOptional("PointerBindsToType", Style.PointerAlignment);
IO.mapOptional("SpaceAfterControlStatementKeyword",
Style.SpaceBeforeParens);
@@ -572,13 +637,13 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("AccessModifierOffset", Style.AccessModifierOffset);
IO.mapOptional("AlignAfterOpenBracket", Style.AlignAfterOpenBracket);
IO.mapOptional("AlignArrayOfStructures", Style.AlignArrayOfStructures);
- IO.mapOptional("AlignConsecutiveMacros", Style.AlignConsecutiveMacros);
IO.mapOptional("AlignConsecutiveAssignments",
Style.AlignConsecutiveAssignments);
IO.mapOptional("AlignConsecutiveBitFields",
Style.AlignConsecutiveBitFields);
IO.mapOptional("AlignConsecutiveDeclarations",
Style.AlignConsecutiveDeclarations);
+ IO.mapOptional("AlignConsecutiveMacros", Style.AlignConsecutiveMacros);
IO.mapOptional("AlignEscapedNewlines", Style.AlignEscapedNewlines);
IO.mapOptional("AlignOperands", Style.AlignOperands);
IO.mapOptional("AlignTrailingComments", Style.AlignTrailingComments);
@@ -610,12 +675,14 @@ template <> struct MappingTraits<FormatStyle> {
// former for backwards compatibility.
if (Style.AlwaysBreakAfterDefinitionReturnType != FormatStyle::DRTBS_None &&
Style.AlwaysBreakAfterReturnType == FormatStyle::RTBS_None) {
- if (Style.AlwaysBreakAfterDefinitionReturnType == FormatStyle::DRTBS_All)
+ if (Style.AlwaysBreakAfterDefinitionReturnType ==
+ FormatStyle::DRTBS_All) {
Style.AlwaysBreakAfterReturnType = FormatStyle::RTBS_AllDefinitions;
- else if (Style.AlwaysBreakAfterDefinitionReturnType ==
- FormatStyle::DRTBS_TopLevel)
+ } else if (Style.AlwaysBreakAfterDefinitionReturnType ==
+ FormatStyle::DRTBS_TopLevel) {
Style.AlwaysBreakAfterReturnType =
FormatStyle::RTBS_TopLevelDefinitions;
+ }
}
IO.mapOptional("AlwaysBreakBeforeMultilineStrings",
@@ -639,8 +706,9 @@ template <> struct MappingTraits<FormatStyle> {
// BreakInheritance was not, initialize the latter from the
// former for backwards compatibility.
if (BreakBeforeInheritanceComma &&
- Style.BreakInheritanceList == FormatStyle::BILS_BeforeColon)
+ Style.BreakInheritanceList == FormatStyle::BILS_BeforeColon) {
Style.BreakInheritanceList = FormatStyle::BILS_BeforeComma;
+ }
IO.mapOptional("BreakBeforeTernaryOperators",
Style.BreakBeforeTernaryOperators);
@@ -654,8 +722,9 @@ template <> struct MappingTraits<FormatStyle> {
// BreakConstructorInitializers was not, initialize the latter from the
// former for backwards compatibility.
if (BreakConstructorInitializersBeforeComma &&
- Style.BreakConstructorInitializers == FormatStyle::BCIS_BeforeColon)
+ Style.BreakConstructorInitializers == FormatStyle::BCIS_BeforeColon) {
Style.BreakConstructorInitializers = FormatStyle::BCIS_BeforeComma;
+ }
IO.mapOptional("BreakAfterJavaFieldAnnotations",
Style.BreakAfterJavaFieldAnnotations);
@@ -665,13 +734,12 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("QualifierAlignment", Style.QualifierAlignment);
// Default Order for Left/Right based Qualifier alignment.
- if (Style.QualifierAlignment == FormatStyle::QAS_Right) {
+ if (Style.QualifierAlignment == FormatStyle::QAS_Right)
Style.QualifierOrder = {"type", "const", "volatile"};
- } else if (Style.QualifierAlignment == FormatStyle::QAS_Left) {
+ else if (Style.QualifierAlignment == FormatStyle::QAS_Left)
Style.QualifierOrder = {"const", "volatile", "type"};
- } else if (Style.QualifierAlignment == FormatStyle::QAS_Custom) {
+ else if (Style.QualifierAlignment == FormatStyle::QAS_Custom)
IO.mapOptional("QualifierOrder", Style.QualifierOrder);
- }
IO.mapOptional("CompactNamespaces", Style.CompactNamespaces);
IO.mapOptional("ConstructorInitializerIndentWidth",
@@ -737,10 +805,11 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("IndentGotoLabels", Style.IndentGotoLabels);
IO.mapOptional("IndentPPDirectives", Style.IndentPPDirectives);
IO.mapOptional("IndentExternBlock", Style.IndentExternBlock);
- IO.mapOptional("IndentRequires", Style.IndentRequires);
+ IO.mapOptional("IndentRequiresClause", Style.IndentRequiresClause);
IO.mapOptional("IndentWidth", Style.IndentWidth);
IO.mapOptional("IndentWrappedFunctionNames",
Style.IndentWrappedFunctionNames);
+ IO.mapOptional("InsertBraces", Style.InsertBraces);
IO.mapOptional("InsertTrailingCommas", Style.InsertTrailingCommas);
IO.mapOptional("JavaImportGroups", Style.JavaImportGroups);
IO.mapOptional("JavaScriptQuotes", Style.JavaScriptQuotes);
@@ -782,6 +851,7 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("ReferenceAlignment", Style.ReferenceAlignment);
IO.mapOptional("ReflowComments", Style.ReflowComments);
IO.mapOptional("RemoveBracesLLVM", Style.RemoveBracesLLVM);
+ IO.mapOptional("RequiresClausePosition", Style.RequiresClausePosition);
IO.mapOptional("SeparateDefinitionBlocks", Style.SeparateDefinitionBlocks);
IO.mapOptional("ShortNamespaceLines", Style.ShortNamespaceLines);
IO.mapOptional("SortIncludes", Style.SortIncludes);
@@ -870,6 +940,9 @@ template <> struct MappingTraits<FormatStyle::SpaceBeforeParensCustom> {
Spacing.AfterFunctionDeclarationName);
IO.mapOptional("AfterIfMacros", Spacing.AfterIfMacros);
IO.mapOptional("AfterOverloadedOperator", Spacing.AfterOverloadedOperator);
+ IO.mapOptional("AfterRequiresInClause", Spacing.AfterRequiresInClause);
+ IO.mapOptional("AfterRequiresInExpression",
+ Spacing.AfterRequiresInExpression);
IO.mapOptional("BeforeNonEmptyParentheses",
Spacing.BeforeNonEmptyParentheses);
}
@@ -893,9 +966,8 @@ template <> struct MappingTraits<FormatStyle::SpacesInLineComment> {
IO.mapOptional("Maximum", signedMaximum);
Space.Maximum = static_cast<unsigned>(signedMaximum);
- if (Space.Maximum != -1u) {
+ if (Space.Maximum != -1u)
Space.Minimum = std::min(Space.Minimum, Space.Maximum);
- }
}
};
@@ -960,11 +1032,11 @@ std::string ParseErrorCategory::message(int EV) const {
case ParseError::InvalidQualifierSpecified:
return "Invalid qualifier specified in QualifierOrder";
case ParseError::DuplicateQualifierSpecified:
- return "Duplicate qualifier specified in QualfierOrder";
+ return "Duplicate qualifier specified in QualifierOrder";
case ParseError::MissingQualifierType:
- return "Missing type in QualfierOrder";
+ return "Missing type in QualifierOrder";
case ParseError::MissingQualifierOrder:
- return "Missing QualfierOrder";
+ return "Missing QualifierOrder";
}
llvm_unreachable("unexpected parse error");
}
@@ -1110,10 +1182,15 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.AlignArrayOfStructures = FormatStyle::AIAS_None;
LLVMStyle.AlignOperands = FormatStyle::OAS_Align;
LLVMStyle.AlignTrailingComments = true;
- LLVMStyle.AlignConsecutiveAssignments = FormatStyle::ACS_None;
- LLVMStyle.AlignConsecutiveBitFields = FormatStyle::ACS_None;
- LLVMStyle.AlignConsecutiveDeclarations = FormatStyle::ACS_None;
- LLVMStyle.AlignConsecutiveMacros = FormatStyle::ACS_None;
+ LLVMStyle.AlignConsecutiveAssignments = {};
+ LLVMStyle.AlignConsecutiveAssignments.Enabled = false;
+ LLVMStyle.AlignConsecutiveAssignments.AcrossEmptyLines = false;
+ LLVMStyle.AlignConsecutiveAssignments.AcrossComments = false;
+ LLVMStyle.AlignConsecutiveAssignments.AlignCompound = false;
+ LLVMStyle.AlignConsecutiveAssignments.PadOperators = true;
+ LLVMStyle.AlignConsecutiveBitFields = {};
+ LLVMStyle.AlignConsecutiveDeclarations = {};
+ LLVMStyle.AlignConsecutiveMacros = {};
LLVMStyle.AllowAllArgumentsOnNextLine = true;
LLVMStyle.AllowAllParametersOfDeclarationOnNextLine = true;
LLVMStyle.AllowShortEnumsOnASingleLine = true;
@@ -1131,7 +1208,7 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.BinPackArguments = true;
LLVMStyle.BinPackParameters = true;
LLVMStyle.BreakBeforeBinaryOperators = FormatStyle::BOS_None;
- LLVMStyle.BreakBeforeConceptDeclarations = true;
+ LLVMStyle.BreakBeforeConceptDeclarations = FormatStyle::BBCDS_Always;
LLVMStyle.BreakBeforeTernaryOperators = true;
LLVMStyle.BreakBeforeBraces = FormatStyle::BS_Attach;
LLVMStyle.BraceWrapping = {/*AfterCaseLabel=*/false,
@@ -1189,10 +1266,11 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.IndentCaseBlocks = false;
LLVMStyle.IndentGotoLabels = true;
LLVMStyle.IndentPPDirectives = FormatStyle::PPDIS_None;
- LLVMStyle.IndentRequires = false;
+ LLVMStyle.IndentRequiresClause = true;
LLVMStyle.IndentWrappedFunctionNames = false;
LLVMStyle.IndentWidth = 2;
LLVMStyle.PPIndentWidth = -1;
+ LLVMStyle.InsertBraces = false;
LLVMStyle.InsertTrailingCommas = FormatStyle::TCS_None;
LLVMStyle.JavaScriptQuotes = FormatStyle::JSQS_Leave;
LLVMStyle.JavaScriptWrapImports = true;
@@ -1208,6 +1286,7 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.ObjCSpaceBeforeProtocolList = true;
LLVMStyle.PointerAlignment = FormatStyle::PAS_Right;
LLVMStyle.ReferenceAlignment = FormatStyle::RAS_Pointer;
+ LLVMStyle.RequiresClausePosition = FormatStyle::RCPS_OwnLine;
LLVMStyle.SeparateDefinitionBlocks = FormatStyle::SDS_Leave;
LLVMStyle.ShortNamespaceLines = 1;
LLVMStyle.SpacesBeforeTrailingComments = 1;
@@ -1231,6 +1310,7 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.SpaceBeforeCtorInitializerColon = true;
LLVMStyle.SpaceBeforeInheritanceColon = true;
LLVMStyle.SpaceBeforeParens = FormatStyle::SBPO_ControlStatements;
+ LLVMStyle.SpaceBeforeParensOptions = {};
LLVMStyle.SpaceBeforeParensOptions.AfterControlStatements = true;
LLVMStyle.SpaceBeforeParensOptions.AfterForeachMacros = true;
LLVMStyle.SpaceBeforeParensOptions.AfterIfMacros = true;
@@ -1267,12 +1347,10 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.WhitespaceSensitiveMacros.push_back("CF_SWIFT_NAME");
// Defaults that differ when not C++.
- if (Language == FormatStyle::LK_TableGen) {
+ if (Language == FormatStyle::LK_TableGen)
LLVMStyle.SpacesInContainerLiterals = false;
- }
- if (LLVMStyle.isJson()) {
+ if (LLVMStyle.isJson())
LLVMStyle.ColumnLimit = 0;
- }
return LLVMStyle;
}
@@ -1581,27 +1659,26 @@ FormatStyle getNoStyle() {
bool getPredefinedStyle(StringRef Name, FormatStyle::LanguageKind Language,
FormatStyle *Style) {
- if (Name.equals_insensitive("llvm")) {
+ if (Name.equals_insensitive("llvm"))
*Style = getLLVMStyle(Language);
- } else if (Name.equals_insensitive("chromium")) {
+ else if (Name.equals_insensitive("chromium"))
*Style = getChromiumStyle(Language);
- } else if (Name.equals_insensitive("mozilla")) {
+ else if (Name.equals_insensitive("mozilla"))
*Style = getMozillaStyle();
- } else if (Name.equals_insensitive("google")) {
+ else if (Name.equals_insensitive("google"))
*Style = getGoogleStyle(Language);
- } else if (Name.equals_insensitive("webkit")) {
+ else if (Name.equals_insensitive("webkit"))
*Style = getWebKitStyle();
- } else if (Name.equals_insensitive("gnu")) {
+ else if (Name.equals_insensitive("gnu"))
*Style = getGNUStyle();
- } else if (Name.equals_insensitive("microsoft")) {
+ else if (Name.equals_insensitive("microsoft"))
*Style = getMicrosoftStyle(Language);
- } else if (Name.equals_insensitive("none")) {
+ else if (Name.equals_insensitive("none"))
*Style = getNoStyle();
- } else if (Name.equals_insensitive("inheritparentconfig")) {
+ else if (Name.equals_insensitive("inheritparentconfig"))
Style->InheritsParentConfig = true;
- } else {
+ else
return false;
- }
Style->Language = Language;
return true;
@@ -1621,7 +1698,8 @@ ParseError validateQualifierOrder(FormatStyle *Style) {
if (token == tok::identifier)
return ParseError::InvalidQualifierSpecified;
}
- // Ensure the list is unqiue (no duplicates).
+
+ // Ensure the list is unique (no duplicates).
std::set<std::string> UniqueQualifiers(Style->QualifierOrder.begin(),
Style->QualifierOrder.end());
if (Style->QualifierOrder.size() != UniqueQualifiers.size()) {
@@ -1631,10 +1709,12 @@ ParseError validateQualifierOrder(FormatStyle *Style) {
return ParseError::DuplicateQualifierSpecified;
}
+ // Ensure the list has 'type' in it.
auto type = std::find(Style->QualifierOrder.begin(),
Style->QualifierOrder.end(), "type");
if (type == Style->QualifierOrder.end())
return ParseError::MissingQualifierType;
+
return ParseError::Success;
}
@@ -1646,7 +1726,7 @@ std::error_code parseConfiguration(llvm::MemoryBufferRef Config,
FormatStyle::LanguageKind Language = Style->Language;
assert(Language != FormatStyle::LK_None);
if (Config.getBuffer().trim().empty())
- return make_error_code(ParseError::Error);
+ return make_error_code(ParseError::Success);
Style->StyleSet.Clear();
std::vector<FormatStyle> Styles;
llvm::yaml::Input Input(Config, /*Ctxt=*/nullptr, DiagHandler,
@@ -1750,10 +1830,56 @@ FormatStyle::GetLanguageStyle(FormatStyle::LanguageKind Language) const {
namespace {
+class BracesInserter : public TokenAnalyzer {
+public:
+ BracesInserter(const Environment &Env, const FormatStyle &Style)
+ : TokenAnalyzer(Env, Style) {
+ this->Style.RemoveBracesLLVM = false;
+ }
+
+ std::pair<tooling::Replacements, unsigned>
+ analyze(TokenAnnotator &Annotator,
+ SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
+ FormatTokenLexer &Tokens) override {
+ AffectedRangeMgr.computeAffectedLines(AnnotatedLines);
+ tooling::Replacements Result;
+ insertBraces(AnnotatedLines, Result);
+ return {Result, 0};
+ }
+
+private:
+ void insertBraces(SmallVectorImpl<AnnotatedLine *> &Lines,
+ tooling::Replacements &Result) {
+ const auto &SourceMgr = Env.getSourceManager();
+ for (AnnotatedLine *Line : Lines) {
+ insertBraces(Line->Children, Result);
+ if (!Line->Affected)
+ continue;
+ for (FormatToken *Token = Line->First; Token && !Token->Finalized;
+ Token = Token->Next) {
+ if (Token->BraceCount == 0)
+ continue;
+ std::string Brace;
+ if (Token->BraceCount < 0) {
+ assert(Token->BraceCount == -1);
+ Brace = '{';
+ } else {
+ Brace = '\n' + std::string(Token->BraceCount, '}');
+ }
+ Token->BraceCount = 0;
+ const auto Start = Token->Tok.getEndLoc();
+ cantFail(Result.add(tooling::Replacement(SourceMgr, Start, 0, Brace)));
+ }
+ }
+ }
+};
+
class BracesRemover : public TokenAnalyzer {
public:
BracesRemover(const Environment &Env, const FormatStyle &Style)
- : TokenAnalyzer(Env, Style) {}
+ : TokenAnalyzer(Env, Style) {
+ this->Style.InsertBraces = false;
+ }
std::pair<tooling::Replacements, unsigned>
analyze(TokenAnnotator &Annotator,
@@ -1766,7 +1892,6 @@ public:
}
private:
- // Remove optional braces.
void removeBraces(SmallVectorImpl<AnnotatedLine *> &Lines,
tooling::Replacements &Result) {
const auto &SourceMgr = Env.getSourceManager();
@@ -1774,13 +1899,18 @@ private:
removeBraces(Line->Children, Result);
if (!Line->Affected)
continue;
- for (FormatToken *Token = Line->First; Token; Token = Token->Next) {
+ for (FormatToken *Token = Line->First; Token && !Token->Finalized;
+ Token = Token->Next) {
if (!Token->Optional)
continue;
assert(Token->isOneOf(tok::l_brace, tok::r_brace));
- const auto Start = Token == Line->Last
- ? Token->WhitespaceRange.getBegin()
- : Token->Tok.getLocation();
+ assert(Token->Next || Token == Line->Last);
+ const auto Start =
+ Token == Line->Last ||
+ (Token->Next->isOneOf(tok::kw_else, tok::comment) &&
+ Token->Next->NewlinesBefore > 0)
+ ? Token->WhitespaceRange.getBegin()
+ : Token->Tok.getLocation();
const auto Range =
CharSourceRange::getCharRange(Start, Token->Tok.getEndLoc());
cantFail(Result.add(tooling::Replacement(SourceMgr, Range, "")));
@@ -1822,8 +1952,9 @@ private:
(Style.JavaScriptQuotes == FormatStyle::JSQS_Single &&
!Input.startswith("\"")) ||
(Style.JavaScriptQuotes == FormatStyle::JSQS_Double &&
- !Input.startswith("\'")))
+ !Input.startswith("\'"))) {
continue;
+ }
// Change start and end quote.
bool IsSingle = Style.JavaScriptQuotes == FormatStyle::JSQS_Single;
@@ -1930,8 +2061,9 @@ private:
if (Tok->is(tok::coloncolon) && Tok->Previous->is(TT_TemplateOpener))
return true;
if (Tok->is(TT_TemplateCloser) &&
- Tok->Previous->is(TT_TemplateCloser))
+ Tok->Previous->is(TT_TemplateCloser)) {
return true;
+ }
}
}
}
@@ -1945,6 +2077,18 @@ private:
for (FormatToken *Tok = Line->First; Tok && Tok->Next; Tok = Tok->Next) {
if (!Tok->is(TT_PointerOrReference))
continue;
+ // Don't treat space in `void foo() &&` as evidence.
+ if (const auto *Prev = Tok->getPreviousNonComment()) {
+ if (Prev->is(tok::r_paren) && Prev->MatchingParen) {
+ if (const auto *Func =
+ Prev->MatchingParen->getPreviousNonComment()) {
+ if (Func->isOneOf(TT_FunctionDeclarationName, TT_StartOfName,
+ TT_OverloadedOperator)) {
+ continue;
+ }
+ }
+ }
+ }
bool SpaceBefore = Tok->hasWhitespaceBefore();
bool SpaceAfter = Tok->Next->hasWhitespaceBefore();
if (SpaceBefore && !SpaceAfter)
@@ -1979,10 +2123,11 @@ private:
: FormatStyle::PAS_Right;
Style.ReferenceAlignment = FormatStyle::RAS_Pointer;
}
- if (Style.Standard == FormatStyle::LS_Auto)
+ if (Style.Standard == FormatStyle::LS_Auto) {
Style.Standard = hasCpp03IncompatibleFormat(AnnotatedLines)
? FormatStyle::LS_Latest
: FormatStyle::LS_Cpp03;
+ }
BinPackInconclusiveFunctions =
HasBinPackedFunction || !HasOnePerLineFunction;
}
@@ -2035,8 +2180,9 @@ private:
continue;
if (!(FormatTok->is(tok::r_square) &&
Matching->is(TT_ArrayInitializerLSquare)) &&
- !(FormatTok->is(tok::r_brace) && Matching->is(TT_DictLiteral)))
+ !(FormatTok->is(tok::r_brace) && Matching->is(TT_DictLiteral))) {
continue;
+ }
FormatToken *Prev = FormatTok->getPreviousNonComment();
if (Prev->is(tok::comma) || Prev->is(tok::semi))
continue;
@@ -2092,9 +2238,8 @@ public:
private:
void cleanupLine(AnnotatedLine *Line) {
- for (auto *Child : Line->Children) {
+ for (auto *Child : Line->Children)
cleanupLine(Child);
- }
if (Line->Affected) {
cleanupRight(Line->First, tok::comma, tok::comma);
@@ -2108,10 +2253,9 @@ private:
}
bool containsOnlyComments(const AnnotatedLine &Line) {
- for (FormatToken *Tok = Line.First; Tok != nullptr; Tok = Tok->Next) {
+ for (FormatToken *Tok = Line.First; Tok != nullptr; Tok = Tok->Next)
if (Tok->isNot(tok::comment))
return false;
- }
return true;
}
@@ -2120,9 +2264,8 @@ private:
std::set<unsigned> DeletedLines;
for (unsigned i = 0, e = AnnotatedLines.size(); i != e; ++i) {
auto &Line = *AnnotatedLines[i];
- if (Line.startsWithNamespace()) {
+ if (Line.startsWithNamespace())
checkEmptyNamespace(AnnotatedLines, i, i, DeletedLines);
- }
}
for (auto Line : DeletedLines) {
@@ -2159,8 +2302,9 @@ private:
if (AnnotatedLines[CurrentLine]->startsWithNamespace()) {
if (!checkEmptyNamespace(AnnotatedLines, CurrentLine, NewLine,
- DeletedLines))
+ DeletedLines)) {
return false;
+ }
CurrentLine = NewLine;
continue;
}
@@ -2181,12 +2325,12 @@ private:
// Check if the empty namespace is actually affected by changed ranges.
if (!AffectedRangeMgr.affectsCharSourceRange(CharSourceRange::getCharRange(
AnnotatedLines[InitLine]->First->Tok.getLocation(),
- AnnotatedLines[CurrentLine]->Last->Tok.getEndLoc())))
+ AnnotatedLines[CurrentLine]->Last->Tok.getEndLoc()))) {
return false;
+ }
- for (unsigned i = InitLine; i <= CurrentLine; ++i) {
+ for (unsigned i = InitLine; i <= CurrentLine; ++i)
DeletedLines.insert(i);
- }
return true;
}
@@ -2199,10 +2343,12 @@ private:
void cleanupPair(FormatToken *Start, LeftKind LK, RightKind RK,
bool DeleteLeft) {
auto NextNotDeleted = [this](const FormatToken &Tok) -> FormatToken * {
- for (auto *Res = Tok.Next; Res; Res = Res->Next)
+ for (auto *Res = Tok.Next; Res; Res = Res->Next) {
if (!Res->is(tok::comment) &&
- DeletedTokens.find(Res) == DeletedTokens.end())
+ DeletedTokens.find(Res) == DeletedTokens.end()) {
return Res;
+ }
+ }
return nullptr;
};
for (auto *Left = Start; Left;) {
@@ -2250,10 +2396,8 @@ private:
unsigned Idx = 0;
while (Idx < Tokens.size()) {
unsigned St = Idx, End = Idx;
- while ((End + 1) < Tokens.size() &&
- Tokens[End]->Next == Tokens[End + 1]) {
+ while ((End + 1) < Tokens.size() && Tokens[End]->Next == Tokens[End + 1])
++End;
- }
auto SR = CharSourceRange::getCharRange(Tokens[St]->Tok.getLocation(),
Tokens[End]->Tok.getEndLoc());
auto Err =
@@ -2387,8 +2531,9 @@ private:
for (auto Line : AnnotatedLines) {
if (Line->First && (Line->First->TokenText.startswith("#") ||
Line->First->TokenText == "__pragma" ||
- Line->First->TokenText == "_Pragma"))
+ Line->First->TokenText == "_Pragma")) {
continue;
+ }
for (const FormatToken *FormatTok = Line->First; FormatTok;
FormatTok = FormatTok->Next) {
if ((FormatTok->Previous && FormatTok->Previous->is(tok::at) &&
@@ -2446,8 +2591,9 @@ static bool affectsRange(ArrayRef<tooling::Range> Ranges, unsigned Start,
unsigned End) {
for (auto Range : Ranges) {
if (Range.getOffset() < End &&
- Range.getOffset() + Range.getLength() > Start)
+ Range.getOffset() + Range.getLength() > Start) {
return true;
+ }
}
return false;
}
@@ -2515,15 +2661,14 @@ static void sortCppIncludes(const FormatStyle &Style,
StringRef Code, tooling::Replacements &Replaces,
unsigned *Cursor) {
tooling::IncludeCategoryManager Categories(Style.IncludeStyle, FileName);
- unsigned IncludesBeginOffset = Includes.front().Offset;
- unsigned IncludesEndOffset =
+ const unsigned IncludesBeginOffset = Includes.front().Offset;
+ const unsigned IncludesEndOffset =
Includes.back().Offset + Includes.back().Text.size();
- unsigned IncludesBlockSize = IncludesEndOffset - IncludesBeginOffset;
+ const unsigned IncludesBlockSize = IncludesEndOffset - IncludesBeginOffset;
if (!affectsRange(Ranges, IncludesBeginOffset, IncludesEndOffset))
return;
- SmallVector<unsigned, 16> Indices;
- Indices.resize(Includes.size());
- std::iota(Indices.begin(), Indices.end(), 0);
+ SmallVector<unsigned, 16> Indices =
+ llvm::to_vector<16>(llvm::seq<unsigned>(0, Includes.size()));
if (Style.SortIncludes == FormatStyle::SI_CaseInsensitive) {
llvm::stable_sort(Indices, [&](unsigned LHSI, unsigned RHSI) {
@@ -2546,9 +2691,10 @@ static void sortCppIncludes(const FormatStyle &Style,
unsigned CursorIndex;
// The offset from cursor to the end of line.
unsigned CursorToEOLOffset;
- if (Cursor)
+ if (Cursor) {
std::tie(CursorIndex, CursorToEOLOffset) =
FindCursorIndex(Includes, Indices, *Cursor);
+ }
// Deduplicate #includes.
Indices.erase(std::unique(Indices.begin(), Indices.end(),
@@ -2564,11 +2710,12 @@ static void sortCppIncludes(const FormatStyle &Style,
// the entire block. Otherwise, no replacement is generated.
// In case Style.IncldueStyle.IncludeBlocks != IBS_Preserve, this check is not
// enough as additional newlines might be added or removed across #include
- // blocks. This we handle below by generating the updated #imclude blocks and
+ // blocks. This we handle below by generating the updated #include blocks and
// comparing it to the original.
if (Indices.size() == Includes.size() && llvm::is_sorted(Indices) &&
- Style.IncludeStyle.IncludeBlocks == tooling::IncludeStyle::IBS_Preserve)
+ Style.IncludeStyle.IncludeBlocks == tooling::IncludeStyle::IBS_Preserve) {
return;
+ }
std::string result;
for (unsigned Index : Indices) {
@@ -2576,8 +2723,9 @@ static void sortCppIncludes(const FormatStyle &Style,
result += "\n";
if (Style.IncludeStyle.IncludeBlocks ==
tooling::IncludeStyle::IBS_Regroup &&
- CurrentCategory != Includes[Index].Category)
+ CurrentCategory != Includes[Index].Category) {
result += "\n";
+ }
}
result += Includes[Index].Text;
if (Cursor && CursorIndex == Index)
@@ -2585,11 +2733,15 @@ static void sortCppIncludes(const FormatStyle &Style,
CurrentCategory = Includes[Index].Category;
}
+ if (Cursor && *Cursor >= IncludesEndOffset)
+ *Cursor += result.size() - IncludesBlockSize;
+
// If the #includes are out of order, we generate a single replacement fixing
// the entire range of blocks. Otherwise, no replacement is generated.
if (replaceCRLF(result) == replaceCRLF(std::string(Code.substr(
- IncludesBeginOffset, IncludesBlockSize))))
+ IncludesBeginOffset, IncludesBlockSize)))) {
return;
+ }
auto Err = Replaces.add(tooling::Replacement(
FileName, Includes.front().Offset, IncludesBlockSize, result));
@@ -2655,15 +2807,16 @@ tooling::Replacements sortCppIncludes(const FormatStyle &Style, StringRef Code,
FormattingOff = true;
}
- if (Trimmed.contains(RawStringTermination)) {
+ if (Trimmed.contains(RawStringTermination))
FormattingOff = false;
- }
- if (Trimmed == "// clang-format off" || Trimmed == "/* clang-format off */")
+ if (Trimmed == "// clang-format off" ||
+ Trimmed == "/* clang-format off */") {
FormattingOff = true;
- else if (Trimmed == "// clang-format on" ||
- Trimmed == "/* clang-format on */")
+ } else if (Trimmed == "// clang-format on" ||
+ Trimmed == "/* clang-format on */") {
FormattingOff = false;
+ }
const bool EmptyLineSkipped =
Trimmed.empty() &&
@@ -2750,10 +2903,8 @@ static void sortJavaImports(const FormatStyle &Style,
if (!affectsRange(Ranges, ImportsBeginOffset, ImportsEndOffset))
return;
- SmallVector<unsigned, 16> Indices;
- Indices.resize(Imports.size());
- std::iota(Indices.begin(), Indices.end(), 0);
-
+ SmallVector<unsigned, 16> Indices =
+ llvm::to_vector<16>(llvm::seq<unsigned>(0, Imports.size()));
SmallVector<unsigned, 16> JavaImportGroups;
JavaImportGroups.reserve(Imports.size());
for (const JavaImportDirective &Import : Imports)
@@ -2786,8 +2937,9 @@ static void sortJavaImports(const FormatStyle &Style,
if (!result.empty()) {
result += "\n";
if (CurrentIsStatic != Imports[Index].IsStatic ||
- CurrentImportGroup != JavaImportGroups[Index])
+ CurrentImportGroup != JavaImportGroups[Index]) {
result += "\n";
+ }
}
for (StringRef CommentLine : Imports[Index].AssociatedCommentLines) {
result += CommentLine;
@@ -2801,8 +2953,9 @@ static void sortJavaImports(const FormatStyle &Style,
// If the imports are out of order, we generate a single replacement fixing
// the entire block. Otherwise, no replacement is generated.
if (replaceCRLF(result) == replaceCRLF(std::string(Code.substr(
- Imports.front().Offset, ImportsBlockSize))))
+ Imports.front().Offset, ImportsBlockSize)))) {
return;
+ }
auto Err = Replaces.add(tooling::Replacement(FileName, Imports.front().Offset,
ImportsBlockSize, result));
@@ -2854,9 +3007,8 @@ tooling::Replacements sortJavaImports(const FormatStyle &Style, StringRef Code,
StringRef Static = Matches[1];
StringRef Identifier = Matches[2];
bool IsStatic = false;
- if (Static.contains("static")) {
+ if (Static.contains("static"))
IsStatic = true;
- }
ImportsInBlock.push_back(
{Identifier, Line, Prev, AssociatedCommentLines, IsStatic});
AssociatedCommentLines.clear();
@@ -2892,8 +3044,9 @@ tooling::Replacements sortIncludes(const FormatStyle &Style, StringRef Code,
if (isLikelyXml(Code))
return Replaces;
if (Style.Language == FormatStyle::LanguageKind::LK_JavaScript &&
- isMpegTS(Code))
+ isMpegTS(Code)) {
return Replaces;
+ }
if (Style.Language == FormatStyle::LanguageKind::LK_JavaScript)
return sortJavaScriptImports(Style, Code, Ranges, FileName);
if (Style.Language == FormatStyle::LanguageKind::LK_Java)
@@ -3057,6 +3210,15 @@ reformat(const FormatStyle &Style, StringRef Code,
FormatStyle Expanded = Style;
expandPresetsBraceWrapping(Expanded);
expandPresetsSpaceBeforeParens(Expanded);
+ switch (Expanded.RequiresClausePosition) {
+ case FormatStyle::RCPS_SingleLine:
+ case FormatStyle::RCPS_WithPreceding:
+ Expanded.IndentRequiresClause = false;
+ break;
+ default:
+ break;
+ }
+
if (Expanded.DisableFormat)
return {tooling::Replacements(), 0};
if (isLikelyXml(Code))
@@ -3077,9 +3239,8 @@ reformat(const FormatStyle &Style, StringRef Code,
// add a replacement to remove the "x = " from the result.
if (!Replaces.add(tooling::Replacement(FileName, 0, 4, ""))) {
// apply the reformatting changes and the removal of "x = ".
- if (applyAllReplacements(Code, Replaces)) {
+ if (applyAllReplacements(Code, Replaces))
return {Replaces, 0};
- }
}
return {tooling::Replacements(), 0};
}
@@ -3087,53 +3248,66 @@ reformat(const FormatStyle &Style, StringRef Code,
typedef std::function<std::pair<tooling::Replacements, unsigned>(
const Environment &)>
AnalyzerPass;
- SmallVector<AnalyzerPass, 4> Passes;
+ SmallVector<AnalyzerPass, 8> Passes;
- if (Style.isCpp() && Style.QualifierAlignment != FormatStyle::QAS_Leave) {
- Passes.emplace_back([&](const Environment &Env) {
- return QualifierAlignmentFixer(Env, Expanded, Code, Ranges,
- FirstStartColumn, NextStartColumn,
- LastStartColumn, FileName)
- .process();
- });
- }
+ if (Style.isCpp()) {
+ if (Style.QualifierAlignment != FormatStyle::QAS_Leave) {
+ Passes.emplace_back([&](const Environment &Env) {
+ return QualifierAlignmentFixer(Env, Expanded, Code, Ranges,
+ FirstStartColumn, NextStartColumn,
+ LastStartColumn, FileName)
+ .process();
+ });
+ }
- if (Style.isCpp() && Style.RemoveBracesLLVM)
- Passes.emplace_back([&](const Environment &Env) {
- return BracesRemover(Env, Expanded).process();
- });
+ if (Style.InsertBraces) {
+ Passes.emplace_back([&](const Environment &Env) {
+ return BracesInserter(Env, Expanded).process();
+ });
+ }
- if (Style.Language == FormatStyle::LK_Cpp) {
- if (Style.FixNamespaceComments)
+ if (Style.RemoveBracesLLVM) {
+ Passes.emplace_back([&](const Environment &Env) {
+ return BracesRemover(Env, Expanded).process();
+ });
+ }
+
+ if (Style.FixNamespaceComments) {
Passes.emplace_back([&](const Environment &Env) {
return NamespaceEndCommentsFixer(Env, Expanded).process();
});
+ }
- if (Style.SortUsingDeclarations)
+ if (Style.SortUsingDeclarations) {
Passes.emplace_back([&](const Environment &Env) {
return UsingDeclarationsSorter(Env, Expanded).process();
});
+ }
}
- if (Style.SeparateDefinitionBlocks != FormatStyle::SDS_Leave)
+ if (Style.SeparateDefinitionBlocks != FormatStyle::SDS_Leave) {
Passes.emplace_back([&](const Environment &Env) {
return DefinitionBlockSeparator(Env, Expanded).process();
});
+ }
- if (Style.isJavaScript() && Style.JavaScriptQuotes != FormatStyle::JSQS_Leave)
+ if (Style.isJavaScript() &&
+ Style.JavaScriptQuotes != FormatStyle::JSQS_Leave) {
Passes.emplace_back([&](const Environment &Env) {
return JavaScriptRequoter(Env, Expanded).process();
});
+ }
Passes.emplace_back([&](const Environment &Env) {
return Formatter(Env, Expanded, Status).process();
});
if (Style.isJavaScript() &&
- Style.InsertTrailingCommas == FormatStyle::TCS_Wrapped)
+ Style.InsertTrailingCommas == FormatStyle::TCS_Wrapped) {
Passes.emplace_back([&](const Environment &Env) {
return TrailingCommaInserter(Env, Expanded).process();
});
+ }
auto Env = Environment::make(Code, FileName, Ranges, FirstStartColumn,
NextStartColumn, LastStartColumn);
@@ -3242,6 +3416,10 @@ LangOptions getFormattingLangOpts(const FormatStyle &Style) {
LangOpts.CPlusPlus17 = LexingStd >= FormatStyle::LS_Cpp17;
LangOpts.CPlusPlus20 = LexingStd >= FormatStyle::LS_Cpp20;
LangOpts.Char8 = LexingStd >= FormatStyle::LS_Cpp20;
+ // Turning on digraphs in standards before C++0x is error-prone, because e.g.
+ // the sequence "<::" will be unconditionally treated as "[:".
+ // Cf. Lexer::LexTokenInternal.
+ LangOpts.Digraphs = LexingStd >= FormatStyle::LS_Cpp11;
LangOpts.LineComment = 1;
bool AlternativeOperators = Style.isCpp();
@@ -3261,7 +3439,7 @@ const char *StyleOptionHelpDescription =
".clang-format file located in one of the parent\n"
"directories of the source file (or current\n"
"directory for stdin).\n"
- "Use -style=file:<format_file_path> to explicitly specify"
+ "Use -style=file:<format_file_path> to explicitly specify\n"
"the configuration file.\n"
"Use -style=\"{key: value, ...}\" to set specific\n"
"parameters, e.g.:\n"
@@ -3272,24 +3450,33 @@ static FormatStyle::LanguageKind getLanguageByFileName(StringRef FileName) {
return FormatStyle::LK_Java;
if (FileName.endswith_insensitive(".js") ||
FileName.endswith_insensitive(".mjs") ||
- FileName.endswith_insensitive(".ts"))
+ FileName.endswith_insensitive(".ts")) {
return FormatStyle::LK_JavaScript; // (module) JavaScript or TypeScript.
+ }
if (FileName.endswith(".m") || FileName.endswith(".mm"))
return FormatStyle::LK_ObjC;
if (FileName.endswith_insensitive(".proto") ||
- FileName.endswith_insensitive(".protodevel"))
+ FileName.endswith_insensitive(".protodevel")) {
return FormatStyle::LK_Proto;
+ }
if (FileName.endswith_insensitive(".textpb") ||
FileName.endswith_insensitive(".pb.txt") ||
FileName.endswith_insensitive(".textproto") ||
- FileName.endswith_insensitive(".asciipb"))
+ FileName.endswith_insensitive(".asciipb")) {
return FormatStyle::LK_TextProto;
+ }
if (FileName.endswith_insensitive(".td"))
return FormatStyle::LK_TableGen;
if (FileName.endswith_insensitive(".cs"))
return FormatStyle::LK_CSharp;
if (FileName.endswith_insensitive(".json"))
return FormatStyle::LK_Json;
+ if (FileName.endswith_insensitive(".sv") ||
+ FileName.endswith_insensitive(".svh") ||
+ FileName.endswith_insensitive(".v") ||
+ FileName.endswith_insensitive(".vh")) {
+ return FormatStyle::LK_Verilog;
+ }
return FormatStyle::LK_Cpp;
}
@@ -3331,9 +3518,8 @@ llvm::Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
StringRef FallbackStyleName,
StringRef Code, llvm::vfs::FileSystem *FS,
bool AllowUnknownOptions) {
- if (!FS) {
+ if (!FS)
FS = llvm::vfs::getRealFileSystem().get();
- }
FormatStyle Style = getLLVMStyle(guessLanguage(FileName, Code));
FormatStyle FallbackStyle = getNoStyle();
@@ -3348,13 +3534,15 @@ llvm::Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
StringRef Source = "<command-line>";
if (std::error_code ec =
parseConfiguration(llvm::MemoryBufferRef(StyleName, Source), &Style,
- AllowUnknownOptions))
+ AllowUnknownOptions)) {
return make_string_error("Error parsing -style: " + ec.message());
- if (Style.InheritsParentConfig)
+ }
+ if (Style.InheritsParentConfig) {
ChildFormatTextToApply.emplace_back(
llvm::MemoryBuffer::getMemBuffer(StyleName, Source, false));
- else
+ } else {
return Style;
+ }
}
// User provided clang-format file using -style=file:path/to/format/file.
@@ -3363,9 +3551,10 @@ llvm::Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
auto ConfigFile = StyleName.substr(5);
llvm::ErrorOr<std::unique_ptr<llvm::MemoryBuffer>> Text =
loadAndParseConfigFile(ConfigFile, FS, &Style, AllowUnknownOptions);
- if (auto EC = Text.getError())
+ if (auto EC = Text.getError()) {
return make_string_error("Error reading " + ConfigFile + ": " +
EC.message());
+ }
LLVM_DEBUG(llvm::dbgs()
<< "Using configuration file " << ConfigFile << "\n");
@@ -3473,10 +3662,11 @@ llvm::Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
}
}
}
- if (!UnsuitableConfigFiles.empty())
+ if (!UnsuitableConfigFiles.empty()) {
return make_string_error("Configuration file(s) do(es) not support " +
getLanguageName(Style.Language) + ": " +
UnsuitableConfigFiles);
+ }
if (!ChildFormatTextToApply.empty()) {
LLVM_DEBUG(llvm::dbgs()
diff --git a/contrib/llvm-project/clang/lib/Format/FormatToken.cpp b/contrib/llvm-project/clang/lib/Format/FormatToken.cpp
index 59d6f29bb54d..2c0fee6975c2 100644
--- a/contrib/llvm-project/clang/lib/Format/FormatToken.cpp
+++ b/contrib/llvm-project/clang/lib/Format/FormatToken.cpp
@@ -74,6 +74,20 @@ bool FormatToken::isTypeOrIdentifier() const {
return isSimpleTypeSpecifier() || Tok.isOneOf(tok::kw_auto, tok::identifier);
}
+bool FormatToken::opensBlockOrBlockTypeList(const FormatStyle &Style) const {
+ // C# Does not indent object initialisers as continuations.
+ if (is(tok::l_brace) && getBlockKind() == BK_BracedInit && Style.isCSharp())
+ return true;
+ if (is(TT_TemplateString) && opensScope())
+ return true;
+ return is(TT_ArrayInitializerLSquare) || is(TT_ProtoExtensionLSquare) ||
+ (is(tok::l_brace) &&
+ (getBlockKind() == BK_Block || is(TT_DictLiteral) ||
+ (!Style.Cpp11BracedListStyle && NestingLevel == 0))) ||
+ (is(tok::less) && (Style.Language == FormatStyle::LK_Proto ||
+ Style.Language == FormatStyle::LK_TextProto));
+}
+
TokenRole::~TokenRole() {}
void TokenRole::precomputeFormattingInfos(const FormatToken *Token) {}
@@ -92,8 +106,9 @@ unsigned CommaSeparatedList::formatAfterToken(LineState &State,
State.NextToken->Previous->getPreviousNonComment();
if (!LBrace || !LBrace->isOneOf(tok::l_brace, TT_ArrayInitializerLSquare) ||
LBrace->is(BK_Block) || LBrace->is(TT_DictLiteral) ||
- LBrace->Next->is(TT_DesignatedInitializerPeriod))
+ LBrace->Next->is(TT_DesignatedInitializerPeriod)) {
return 0;
+ }
// Calculate the number of code points we have to format this list. As the
// first token is already placed, we have to subtract it.
@@ -158,15 +173,17 @@ static unsigned CodePointsBetween(const FormatToken *Begin,
void CommaSeparatedList::precomputeFormattingInfos(const FormatToken *Token) {
// FIXME: At some point we might want to do this for other lists, too.
if (!Token->MatchingParen ||
- !Token->isOneOf(tok::l_brace, TT_ArrayInitializerLSquare))
+ !Token->isOneOf(tok::l_brace, TT_ArrayInitializerLSquare)) {
return;
+ }
// In C++11 braced list style, we should not format in columns unless they
// have many items (20 or more) or we allow bin-packing of function call
// arguments.
if (Style.Cpp11BracedListStyle && !Style.BinPackArguments &&
- Commas.size() < 19)
+ Commas.size() < 19) {
return;
+ }
// Limit column layout for JavaScript array initializers to 20 or more items
// for now to introduce it carefully. We can become more aggressive if this
@@ -186,6 +203,9 @@ void CommaSeparatedList::precomputeFormattingInfos(const FormatToken *Token) {
// The lengths of an item if it is put at the end of the line. This includes
// trailing comments which are otherwise ignored for column alignment.
SmallVector<unsigned, 8> EndOfLineItemLength;
+ MustBreakBeforeItem.reserve(Commas.size() + 1);
+ EndOfLineItemLength.reserve(Commas.size() + 1);
+ ItemLengths.reserve(Commas.size() + 1);
bool HasSeparatingComment = false;
for (unsigned i = 0, e = Commas.size() + 1; i != e; ++i) {
@@ -221,8 +241,9 @@ void CommaSeparatedList::precomputeFormattingInfos(const FormatToken *Token) {
// Consume trailing comments so the are included in EndOfLineItemLength.
if (ItemEnd->Next && !ItemEnd->Next->HasUnescapedNewline &&
- ItemEnd->Next->isTrailingComment())
+ ItemEnd->Next->isTrailingComment()) {
ItemEnd = ItemEnd->Next;
+ }
}
EndOfLineItemLength.push_back(CodePointsBetween(ItemBegin, ItemEnd));
// If there is a trailing comma in the list, the next item will start at the
@@ -283,8 +304,9 @@ void CommaSeparatedList::precomputeFormattingInfos(const FormatToken *Token) {
if (Format.ColumnSizes[i] - MinSizeInColumn[i] > 10)
return true;
return false;
- }())
+ }()) {
continue;
+ }
// Ignore layouts that are bound to violate the column limit.
if (Format.TotalWidth > Style.ColumnLimit && Columns > 1)
diff --git a/contrib/llvm-project/clang/lib/Format/FormatToken.h b/contrib/llvm-project/clang/lib/Format/FormatToken.h
index f116a89ac644..b6cc021affae 100644
--- a/contrib/llvm-project/clang/lib/Format/FormatToken.h
+++ b/contrib/llvm-project/clang/lib/Format/FormatToken.h
@@ -35,17 +35,31 @@ namespace format {
TYPE(BinaryOperator) \
TYPE(BitFieldColon) \
TYPE(BlockComment) \
+ TYPE(BracedListLBrace) \
TYPE(CastRParen) \
+ TYPE(ClassLBrace) \
+ TYPE(CompoundRequirementLBrace) \
TYPE(ConditionalExpr) \
TYPE(ConflictAlternative) \
TYPE(ConflictEnd) \
TYPE(ConflictStart) \
- TYPE(ConstraintJunctions) \
+ /* l_brace of if/for/while */ \
+ TYPE(ControlStatementLBrace) \
+ TYPE(CppCastLParen) \
+ TYPE(CSharpGenericTypeConstraint) \
+ TYPE(CSharpGenericTypeConstraintColon) \
+ TYPE(CSharpGenericTypeConstraintComma) \
+ TYPE(CSharpNamedArgumentColon) \
+ TYPE(CSharpNullable) \
+ TYPE(CSharpNullConditionalLSquare) \
+ TYPE(CSharpStringLiteral) \
TYPE(CtorInitializerColon) \
TYPE(CtorInitializerComma) \
TYPE(DesignatedInitializerLSquare) \
TYPE(DesignatedInitializerPeriod) \
TYPE(DictLiteral) \
+ TYPE(ElseLBrace) \
+ TYPE(EnumLBrace) \
TYPE(FatArrow) \
TYPE(ForEachMacro) \
TYPE(FunctionAnnotationRParen) \
@@ -61,6 +75,7 @@ namespace format {
TYPE(InlineASMColon) \
TYPE(InlineASMSymbolicNameLSquare) \
TYPE(JavaAnnotation) \
+ TYPE(JsAndAndEqual) \
TYPE(JsComputedPropertyName) \
TYPE(JsExponentiation) \
TYPE(JsExponentiationEqual) \
@@ -69,7 +84,6 @@ namespace format {
TYPE(JsTypeColon) \
TYPE(JsTypeOperator) \
TYPE(JsTypeOptionalQuestion) \
- TYPE(JsAndAndEqual) \
TYPE(LambdaArrow) \
TYPE(LambdaLBrace) \
TYPE(LambdaLSquare) \
@@ -94,63 +108,35 @@ namespace format {
TYPE(OverloadedOperator) \
TYPE(OverloadedOperatorLParen) \
TYPE(PointerOrReference) \
+ TYPE(ProtoExtensionLSquare) \
TYPE(PureVirtualSpecifier) \
TYPE(RangeBasedForLoopColon) \
TYPE(RecordLBrace) \
TYPE(RegexLiteral) \
+ TYPE(RequiresClause) \
+ TYPE(RequiresClauseInARequiresExpression) \
+ TYPE(RequiresExpression) \
+ TYPE(RequiresExpressionLBrace) \
+ TYPE(RequiresExpressionLParen) \
TYPE(SelectorName) \
TYPE(StartOfName) \
TYPE(StatementAttributeLikeMacro) \
TYPE(StatementMacro) \
+ TYPE(StructLBrace) \
TYPE(StructuredBindingLSquare) \
TYPE(TemplateCloser) \
TYPE(TemplateOpener) \
TYPE(TemplateString) \
- TYPE(ProtoExtensionLSquare) \
TYPE(TrailingAnnotation) \
TYPE(TrailingReturnArrow) \
TYPE(TrailingUnaryOperator) \
TYPE(TypeDeclarationParen) \
TYPE(TypenameMacro) \
TYPE(UnaryOperator) \
+ TYPE(UnionLBrace) \
TYPE(UntouchableMacroFunc) \
- TYPE(CSharpStringLiteral) \
- TYPE(CSharpNamedArgumentColon) \
- TYPE(CSharpNullable) \
- TYPE(CSharpNullConditionalLSquare) \
- TYPE(CSharpGenericTypeConstraint) \
- TYPE(CSharpGenericTypeConstraintColon) \
- TYPE(CSharpGenericTypeConstraintComma) \
TYPE(Unknown)
-/// Sorted operators that can follow a C variable.
-static const std::vector<clang::tok::TokenKind> COperatorsFollowingVar = [] {
- std::vector<clang::tok::TokenKind> ReturnVal = {
- tok::l_square, tok::r_square,
- tok::l_paren, tok::r_paren,
- tok::r_brace, tok::period,
- tok::ellipsis, tok::ampamp,
- tok::ampequal, tok::star,
- tok::starequal, tok::plus,
- tok::plusplus, tok::plusequal,
- tok::minus, tok::arrow,
- tok::minusminus, tok::minusequal,
- tok::exclaim, tok::exclaimequal,
- tok::slash, tok::slashequal,
- tok::percent, tok::percentequal,
- tok::less, tok::lessless,
- tok::lessequal, tok::lesslessequal,
- tok::greater, tok::greatergreater,
- tok::greaterequal, tok::greatergreaterequal,
- tok::caret, tok::caretequal,
- tok::pipe, tok::pipepipe,
- tok::pipeequal, tok::question,
- tok::semi, tok::equal,
- tok::equalequal, tok::comma};
- assert(std::is_sorted(ReturnVal.begin(), ReturnVal.end()));
- return ReturnVal;
-}();
-
/// Determines the semantic type of a syntactic token, e.g. whether "<" is a
/// template opener or binary operator.
enum TokenType : uint8_t {
@@ -245,8 +231,9 @@ struct FormatToken {
CanBreakBefore(false), ClosesTemplateDeclaration(false),
StartsBinaryExpression(false), EndsBinaryExpression(false),
PartOfMultiVariableDeclStmt(false), ContinuesLineCommentSection(false),
- Finalized(false), BlockKind(BK_Unknown), Decision(FD_Unformatted),
- PackingKind(PPK_Inconclusive), Type(TT_Unknown) {}
+ Finalized(false), ClosesRequiresClause(false), BlockKind(BK_Unknown),
+ Decision(FD_Unformatted), PackingKind(PPK_Inconclusive),
+ TypeIsFinalized(false), Type(TT_Unknown) {}
/// The \c Token.
Token Tok;
@@ -312,6 +299,9 @@ struct FormatToken {
/// changes.
unsigned Finalized : 1;
+ /// \c true if this is the last token within requires clause.
+ unsigned ClosesRequiresClause : 1;
+
private:
/// Contains the kind of block if this token is a brace.
unsigned BlockKind : 2;
@@ -352,13 +342,31 @@ public:
}
private:
+ unsigned TypeIsFinalized : 1;
TokenType Type;
public:
/// Returns the token's type, e.g. whether "<" is a template opener or
/// binary operator.
TokenType getType() const { return Type; }
- void setType(TokenType T) { Type = T; }
+ void setType(TokenType T) {
+ assert((!TypeIsFinalized || T == Type) &&
+ "Please use overwriteFixedType to change a fixed type.");
+ Type = T;
+ }
+ /// Sets the type and also the finalized flag. This prevents the type to be
+ /// reset in TokenAnnotator::resetTokenMetadata(). If the type needs to be set
+ /// to another one please use overwriteFixedType, or even better remove the
+ /// need to reassign the type.
+ void setFinalizedType(TokenType T) {
+ Type = T;
+ TypeIsFinalized = true;
+ }
+ void overwriteFixedType(TokenType T) {
+ TypeIsFinalized = false;
+ setType(T);
+ }
+ bool isTypeFinalized() const { return TypeIsFinalized; }
/// The number of newlines immediately before the \c Token.
///
@@ -475,6 +483,12 @@ public:
/// Is optional and can be removed.
bool Optional = false;
+ /// Number of optional braces to be inserted after this token:
+ /// -1: a single left brace
+ /// 0: no braces
+ /// >0: number of right braces
+ int8_t BraceCount = 0;
+
/// If this token starts a block, this contains all the unwrapped lines
/// in it.
SmallVector<AnnotatedLine *, 1> Children;
@@ -679,7 +693,7 @@ public:
}
/// Returns the previous token ignoring comments.
- FormatToken *getPreviousNonComment() const {
+ LLVM_NODISCARD FormatToken *getPreviousNonComment() const {
FormatToken *Tok = Previous;
while (Tok && Tok->is(tok::comment))
Tok = Tok->Previous;
@@ -687,7 +701,7 @@ public:
}
/// Returns the next token ignoring comments.
- const FormatToken *getNextNonComment() const {
+ LLVM_NODISCARD const FormatToken *getNextNonComment() const {
const FormatToken *Tok = Next;
while (Tok && Tok->is(tok::comment))
Tok = Tok->Next;
@@ -696,19 +710,7 @@ public:
/// Returns \c true if this tokens starts a block-type list, i.e. a
/// list that should be indented with a block indent.
- bool opensBlockOrBlockTypeList(const FormatStyle &Style) const {
- // C# Does not indent object initialisers as continuations.
- if (is(tok::l_brace) && getBlockKind() == BK_BracedInit && Style.isCSharp())
- return true;
- if (is(TT_TemplateString) && opensScope())
- return true;
- return is(TT_ArrayInitializerLSquare) || is(TT_ProtoExtensionLSquare) ||
- (is(tok::l_brace) &&
- (getBlockKind() == BK_Block || is(TT_DictLiteral) ||
- (!Style.Cpp11BracedListStyle && NestingLevel == 0))) ||
- (is(tok::less) && (Style.Language == FormatStyle::LK_Proto ||
- Style.Language == FormatStyle::LK_TextProto));
- }
+ LLVM_NODISCARD bool opensBlockOrBlockTypeList(const FormatStyle &Style) const;
/// Returns whether the token is the left square bracket of a C++
/// structured binding declaration.
@@ -928,6 +930,7 @@ struct AdditionalKeywords {
kw___has_include_next = &IdentTable.get("__has_include_next");
kw_mark = &IdentTable.get("mark");
+ kw_region = &IdentTable.get("region");
kw_extend = &IdentTable.get("extend");
kw_option = &IdentTable.get("option");
@@ -941,6 +944,10 @@ struct AdditionalKeywords {
kw_slots = &IdentTable.get("slots");
kw_qslots = &IdentTable.get("Q_SLOTS");
+ // For internal clang-format use.
+ kw_internal_ident_after_define =
+ &IdentTable.get("__CLANG_FORMAT_INTERNAL_IDENT_AFTER_DEFINE__");
+
// C# keywords
kw_dollar = &IdentTable.get("dollar");
kw_base = &IdentTable.get("base");
@@ -951,6 +958,7 @@ struct AdditionalKeywords {
kw_event = &IdentTable.get("event");
kw_fixed = &IdentTable.get("fixed");
kw_foreach = &IdentTable.get("foreach");
+ kw_init = &IdentTable.get("init");
kw_implicit = &IdentTable.get("implicit");
kw_internal = &IdentTable.get("internal");
kw_lock = &IdentTable.get("lock");
@@ -971,6 +979,138 @@ struct AdditionalKeywords {
kw_when = &IdentTable.get("when");
kw_where = &IdentTable.get("where");
+ kw_always = &IdentTable.get("always");
+ kw_always_comb = &IdentTable.get("always_comb");
+ kw_always_ff = &IdentTable.get("always_ff");
+ kw_always_latch = &IdentTable.get("always_latch");
+ kw_assign = &IdentTable.get("assign");
+ kw_assume = &IdentTable.get("assume");
+ kw_automatic = &IdentTable.get("automatic");
+ kw_before = &IdentTable.get("before");
+ kw_begin = &IdentTable.get("begin");
+ kw_begin_keywords = &IdentTable.get("begin_keywords");
+ kw_bins = &IdentTable.get("bins");
+ kw_binsof = &IdentTable.get("binsof");
+ kw_casex = &IdentTable.get("casex");
+ kw_casez = &IdentTable.get("casez");
+ kw_celldefine = &IdentTable.get("celldefine");
+ kw_checker = &IdentTable.get("checker");
+ kw_clocking = &IdentTable.get("clocking");
+ kw_constraint = &IdentTable.get("constraint");
+ kw_cover = &IdentTable.get("cover");
+ kw_covergroup = &IdentTable.get("covergroup");
+ kw_coverpoint = &IdentTable.get("coverpoint");
+ kw_default_decay_time = &IdentTable.get("default_decay_time");
+ kw_default_nettype = &IdentTable.get("default_nettype");
+ kw_default_trireg_strength = &IdentTable.get("default_trireg_strength");
+ kw_delay_mode_distributed = &IdentTable.get("delay_mode_distributed");
+ kw_delay_mode_path = &IdentTable.get("delay_mode_path");
+ kw_delay_mode_unit = &IdentTable.get("delay_mode_unit");
+ kw_delay_mode_zero = &IdentTable.get("delay_mode_zero");
+ kw_disable = &IdentTable.get("disable");
+ kw_dist = &IdentTable.get("dist");
+ kw_elsif = &IdentTable.get("elsif");
+ kw_end = &IdentTable.get("end");
+ kw_end_keywords = &IdentTable.get("end_keywords");
+ kw_endcase = &IdentTable.get("endcase");
+ kw_endcelldefine = &IdentTable.get("endcelldefine");
+ kw_endchecker = &IdentTable.get("endchecker");
+ kw_endclass = &IdentTable.get("endclass");
+ kw_endclocking = &IdentTable.get("endclocking");
+ kw_endfunction = &IdentTable.get("endfunction");
+ kw_endgenerate = &IdentTable.get("endgenerate");
+ kw_endgroup = &IdentTable.get("endgroup");
+ kw_endinterface = &IdentTable.get("endinterface");
+ kw_endmodule = &IdentTable.get("endmodule");
+ kw_endpackage = &IdentTable.get("endpackage");
+ kw_endprimitive = &IdentTable.get("endprimitive");
+ kw_endprogram = &IdentTable.get("endprogram");
+ kw_endproperty = &IdentTable.get("endproperty");
+ kw_endsequence = &IdentTable.get("endsequence");
+ kw_endspecify = &IdentTable.get("endspecify");
+ kw_endtable = &IdentTable.get("endtable");
+ kw_endtask = &IdentTable.get("endtask");
+ kw_forever = &IdentTable.get("forever");
+ kw_fork = &IdentTable.get("fork");
+ kw_generate = &IdentTable.get("generate");
+ kw_highz0 = &IdentTable.get("highz0");
+ kw_highz1 = &IdentTable.get("highz1");
+ kw_iff = &IdentTable.get("iff");
+ kw_ifnone = &IdentTable.get("ifnone");
+ kw_ignore_bins = &IdentTable.get("ignore_bins");
+ kw_illegal_bins = &IdentTable.get("illegal_bins");
+ kw_initial = &IdentTable.get("initial");
+ kw_inout = &IdentTable.get("inout");
+ kw_input = &IdentTable.get("input");
+ kw_inside = &IdentTable.get("inside");
+ kw_interconnect = &IdentTable.get("interconnect");
+ kw_intersect = &IdentTable.get("intersect");
+ kw_join = &IdentTable.get("join");
+ kw_join_any = &IdentTable.get("join_any");
+ kw_join_none = &IdentTable.get("join_none");
+ kw_large = &IdentTable.get("large");
+ kw_local = &IdentTable.get("local");
+ kw_localparam = &IdentTable.get("localparam");
+ kw_macromodule = &IdentTable.get("macromodule");
+ kw_matches = &IdentTable.get("matches");
+ kw_medium = &IdentTable.get("medium");
+ kw_nounconnected_drive = &IdentTable.get("nounconnected_drive");
+ kw_output = &IdentTable.get("output");
+ kw_packed = &IdentTable.get("packed");
+ kw_parameter = &IdentTable.get("parameter");
+ kw_primitive = &IdentTable.get("primitive");
+ kw_priority = &IdentTable.get("priority");
+ kw_program = &IdentTable.get("program");
+ kw_property = &IdentTable.get("property");
+ kw_pull0 = &IdentTable.get("pull0");
+ kw_pull1 = &IdentTable.get("pull1");
+ kw_pure = &IdentTable.get("pure");
+ kw_rand = &IdentTable.get("rand");
+ kw_randc = &IdentTable.get("randc");
+ kw_randcase = &IdentTable.get("randcase");
+ kw_randsequence = &IdentTable.get("randsequence");
+ kw_repeat = &IdentTable.get("repeat");
+ kw_resetall = &IdentTable.get("resetall");
+ kw_sample = &IdentTable.get("sample");
+ kw_scalared = &IdentTable.get("scalared");
+ kw_sequence = &IdentTable.get("sequence");
+ kw_small = &IdentTable.get("small");
+ kw_soft = &IdentTable.get("soft");
+ kw_solve = &IdentTable.get("solve");
+ kw_specify = &IdentTable.get("specify");
+ kw_specparam = &IdentTable.get("specparam");
+ kw_strong0 = &IdentTable.get("strong0");
+ kw_strong1 = &IdentTable.get("strong1");
+ kw_supply0 = &IdentTable.get("supply0");
+ kw_supply1 = &IdentTable.get("supply1");
+ kw_table = &IdentTable.get("table");
+ kw_tagged = &IdentTable.get("tagged");
+ kw_task = &IdentTable.get("task");
+ kw_timescale = &IdentTable.get("timescale");
+ kw_tri = &IdentTable.get("tri");
+ kw_tri0 = &IdentTable.get("tri0");
+ kw_tri1 = &IdentTable.get("tri1");
+ kw_triand = &IdentTable.get("triand");
+ kw_trior = &IdentTable.get("trior");
+ kw_trireg = &IdentTable.get("trireg");
+ kw_unconnected_drive = &IdentTable.get("unconnected_drive");
+ kw_undefineall = &IdentTable.get("undefineall");
+ kw_unique = &IdentTable.get("unique");
+ kw_unique0 = &IdentTable.get("unique0");
+ kw_uwire = &IdentTable.get("uwire");
+ kw_vectored = &IdentTable.get("vectored");
+ kw_wand = &IdentTable.get("wand");
+ kw_weak0 = &IdentTable.get("weak0");
+ kw_weak1 = &IdentTable.get("weak1");
+ kw_wildcard = &IdentTable.get("wildcard");
+ kw_wire = &IdentTable.get("wire");
+ kw_with = &IdentTable.get("with");
+ kw_wor = &IdentTable.get("wor");
+
+ // Symbols that are treated as keywords.
+ kw_verilogHash = &IdentTable.get("#");
+ kw_verilogHashHash = &IdentTable.get("##");
+
// Keep this at the end of the constructor to make sure everything here
// is
// already initialized.
@@ -983,17 +1123,147 @@ struct AdditionalKeywords {
CSharpExtraKeywords = std::unordered_set<IdentifierInfo *>(
{kw_base, kw_byte, kw_checked, kw_decimal, kw_delegate, kw_event,
- kw_fixed, kw_foreach, kw_implicit, kw_in, kw_interface, kw_internal,
- kw_is, kw_lock, kw_null, kw_object, kw_out, kw_override, kw_params,
- kw_readonly, kw_ref, kw_string, kw_stackalloc, kw_sbyte, kw_sealed,
- kw_uint, kw_ulong, kw_unchecked, kw_unsafe, kw_ushort, kw_when,
- kw_where,
+ kw_fixed, kw_foreach, kw_implicit, kw_in, kw_init, kw_interface,
+ kw_internal, kw_is, kw_lock, kw_null, kw_object, kw_out, kw_override,
+ kw_params, kw_readonly, kw_ref, kw_string, kw_stackalloc, kw_sbyte,
+ kw_sealed, kw_uint, kw_ulong, kw_unchecked, kw_unsafe, kw_ushort,
+ kw_when, kw_where,
// Keywords from the JavaScript section.
kw_as, kw_async, kw_await, kw_declare, kw_finally, kw_from,
kw_function, kw_get, kw_import, kw_is, kw_let, kw_module, kw_readonly,
kw_set, kw_type, kw_typeof, kw_var, kw_yield,
// Keywords from the Java section.
kw_abstract, kw_extends, kw_implements, kw_instanceof, kw_interface});
+
+ // Some keywords are not included here because they don't need special
+ // treatment like `showcancelled` or they should be treated as identifiers
+ // like `int` and `logic`.
+ VerilogExtraKeywords =
+ std::unordered_set<IdentifierInfo *>({kw_always,
+ kw_always_comb,
+ kw_always_ff,
+ kw_always_latch,
+ kw_assert,
+ kw_assign,
+ kw_assume,
+ kw_automatic,
+ kw_before,
+ kw_begin,
+ kw_bins,
+ kw_binsof,
+ kw_casex,
+ kw_casez,
+ kw_celldefine,
+ kw_checker,
+ kw_clocking,
+ kw_constraint,
+ kw_cover,
+ kw_covergroup,
+ kw_coverpoint,
+ kw_disable,
+ kw_dist,
+ kw_end,
+ kw_endcase,
+ kw_endchecker,
+ kw_endclass,
+ kw_endclocking,
+ kw_endfunction,
+ kw_endgenerate,
+ kw_endgroup,
+ kw_endinterface,
+ kw_endmodule,
+ kw_endpackage,
+ kw_endprimitive,
+ kw_endprogram,
+ kw_endproperty,
+ kw_endsequence,
+ kw_endspecify,
+ kw_endtable,
+ kw_endtask,
+ kw_extends,
+ kw_final,
+ kw_foreach,
+ kw_forever,
+ kw_fork,
+ kw_function,
+ kw_generate,
+ kw_highz0,
+ kw_highz1,
+ kw_iff,
+ kw_ifnone,
+ kw_ignore_bins,
+ kw_illegal_bins,
+ kw_implements,
+ kw_import,
+ kw_initial,
+ kw_inout,
+ kw_input,
+ kw_inside,
+ kw_interconnect,
+ kw_interface,
+ kw_intersect,
+ kw_join,
+ kw_join_any,
+ kw_join_none,
+ kw_large,
+ kw_let,
+ kw_local,
+ kw_localparam,
+ kw_macromodule,
+ kw_matches,
+ kw_medium,
+ kw_output,
+ kw_package,
+ kw_packed,
+ kw_parameter,
+ kw_primitive,
+ kw_priority,
+ kw_program,
+ kw_property,
+ kw_pull0,
+ kw_pull1,
+ kw_pure,
+ kw_rand,
+ kw_randc,
+ kw_randcase,
+ kw_randsequence,
+ kw_ref,
+ kw_repeat,
+ kw_sample,
+ kw_scalared,
+ kw_sequence,
+ kw_small,
+ kw_soft,
+ kw_solve,
+ kw_specify,
+ kw_specparam,
+ kw_strong0,
+ kw_strong1,
+ kw_supply0,
+ kw_supply1,
+ kw_table,
+ kw_tagged,
+ kw_task,
+ kw_tri,
+ kw_tri0,
+ kw_tri1,
+ kw_triand,
+ kw_trior,
+ kw_trireg,
+ kw_unique,
+ kw_unique0,
+ kw_uwire,
+ kw_var,
+ kw_vectored,
+ kw_wand,
+ kw_weak0,
+ kw_weak1,
+ kw_wildcard,
+ kw_wire,
+ kw_with,
+ kw_wor,
+ kw_verilogHash,
+ kw_verilogHashHash});
}
// Context sensitive keywords.
@@ -1046,6 +1316,7 @@ struct AdditionalKeywords {
// Pragma keywords.
IdentifierInfo *kw_mark;
+ IdentifierInfo *kw_region;
// Proto keywords.
IdentifierInfo *kw_extend;
@@ -1061,6 +1332,9 @@ struct AdditionalKeywords {
IdentifierInfo *kw_slots;
IdentifierInfo *kw_qslots;
+ // For internal use by clang-format.
+ IdentifierInfo *kw_internal_ident_after_define;
+
// C# keywords
IdentifierInfo *kw_dollar;
IdentifierInfo *kw_base;
@@ -1072,6 +1346,7 @@ struct AdditionalKeywords {
IdentifierInfo *kw_fixed;
IdentifierInfo *kw_foreach;
IdentifierInfo *kw_implicit;
+ IdentifierInfo *kw_init;
IdentifierInfo *kw_internal;
IdentifierInfo *kw_lock;
@@ -1094,6 +1369,146 @@ struct AdditionalKeywords {
IdentifierInfo *kw_when;
IdentifierInfo *kw_where;
+ // Verilog keywords
+ IdentifierInfo *kw_always;
+ IdentifierInfo *kw_always_comb;
+ IdentifierInfo *kw_always_ff;
+ IdentifierInfo *kw_always_latch;
+ IdentifierInfo *kw_assign;
+ IdentifierInfo *kw_assume;
+ IdentifierInfo *kw_automatic;
+ IdentifierInfo *kw_before;
+ IdentifierInfo *kw_begin;
+ IdentifierInfo *kw_begin_keywords;
+ IdentifierInfo *kw_bins;
+ IdentifierInfo *kw_binsof;
+ IdentifierInfo *kw_casex;
+ IdentifierInfo *kw_casez;
+ IdentifierInfo *kw_celldefine;
+ IdentifierInfo *kw_checker;
+ IdentifierInfo *kw_clocking;
+ IdentifierInfo *kw_constraint;
+ IdentifierInfo *kw_cover;
+ IdentifierInfo *kw_covergroup;
+ IdentifierInfo *kw_coverpoint;
+ IdentifierInfo *kw_default_decay_time;
+ IdentifierInfo *kw_default_nettype;
+ IdentifierInfo *kw_default_trireg_strength;
+ IdentifierInfo *kw_delay_mode_distributed;
+ IdentifierInfo *kw_delay_mode_path;
+ IdentifierInfo *kw_delay_mode_unit;
+ IdentifierInfo *kw_delay_mode_zero;
+ IdentifierInfo *kw_disable;
+ IdentifierInfo *kw_dist;
+ IdentifierInfo *kw_elsif;
+ IdentifierInfo *kw_end;
+ IdentifierInfo *kw_end_keywords;
+ IdentifierInfo *kw_endcase;
+ IdentifierInfo *kw_endcelldefine;
+ IdentifierInfo *kw_endchecker;
+ IdentifierInfo *kw_endclass;
+ IdentifierInfo *kw_endclocking;
+ IdentifierInfo *kw_endfunction;
+ IdentifierInfo *kw_endgenerate;
+ IdentifierInfo *kw_endgroup;
+ IdentifierInfo *kw_endinterface;
+ IdentifierInfo *kw_endmodule;
+ IdentifierInfo *kw_endpackage;
+ IdentifierInfo *kw_endprimitive;
+ IdentifierInfo *kw_endprogram;
+ IdentifierInfo *kw_endproperty;
+ IdentifierInfo *kw_endsequence;
+ IdentifierInfo *kw_endspecify;
+ IdentifierInfo *kw_endtable;
+ IdentifierInfo *kw_endtask;
+ IdentifierInfo *kw_forever;
+ IdentifierInfo *kw_fork;
+ IdentifierInfo *kw_generate;
+ IdentifierInfo *kw_highz0;
+ IdentifierInfo *kw_highz1;
+ IdentifierInfo *kw_iff;
+ IdentifierInfo *kw_ifnone;
+ IdentifierInfo *kw_ignore_bins;
+ IdentifierInfo *kw_illegal_bins;
+ IdentifierInfo *kw_initial;
+ IdentifierInfo *kw_inout;
+ IdentifierInfo *kw_input;
+ IdentifierInfo *kw_inside;
+ IdentifierInfo *kw_interconnect;
+ IdentifierInfo *kw_intersect;
+ IdentifierInfo *kw_join;
+ IdentifierInfo *kw_join_any;
+ IdentifierInfo *kw_join_none;
+ IdentifierInfo *kw_large;
+ IdentifierInfo *kw_local;
+ IdentifierInfo *kw_localparam;
+ IdentifierInfo *kw_macromodule;
+ IdentifierInfo *kw_matches;
+ IdentifierInfo *kw_medium;
+ IdentifierInfo *kw_nounconnected_drive;
+ IdentifierInfo *kw_output;
+ IdentifierInfo *kw_packed;
+ IdentifierInfo *kw_parameter;
+ IdentifierInfo *kw_primitive;
+ IdentifierInfo *kw_priority;
+ IdentifierInfo *kw_program;
+ IdentifierInfo *kw_property;
+ IdentifierInfo *kw_pull0;
+ IdentifierInfo *kw_pull1;
+ IdentifierInfo *kw_pure;
+ IdentifierInfo *kw_rand;
+ IdentifierInfo *kw_randc;
+ IdentifierInfo *kw_randcase;
+ IdentifierInfo *kw_randsequence;
+ IdentifierInfo *kw_repeat;
+ IdentifierInfo *kw_resetall;
+ IdentifierInfo *kw_sample;
+ IdentifierInfo *kw_scalared;
+ IdentifierInfo *kw_sequence;
+ IdentifierInfo *kw_small;
+ IdentifierInfo *kw_soft;
+ IdentifierInfo *kw_solve;
+ IdentifierInfo *kw_specify;
+ IdentifierInfo *kw_specparam;
+ IdentifierInfo *kw_strong0;
+ IdentifierInfo *kw_strong1;
+ IdentifierInfo *kw_supply0;
+ IdentifierInfo *kw_supply1;
+ IdentifierInfo *kw_table;
+ IdentifierInfo *kw_tagged;
+ IdentifierInfo *kw_task;
+ IdentifierInfo *kw_timescale;
+ IdentifierInfo *kw_tri0;
+ IdentifierInfo *kw_tri1;
+ IdentifierInfo *kw_tri;
+ IdentifierInfo *kw_triand;
+ IdentifierInfo *kw_trior;
+ IdentifierInfo *kw_trireg;
+ IdentifierInfo *kw_unconnected_drive;
+ IdentifierInfo *kw_undefineall;
+ IdentifierInfo *kw_unique;
+ IdentifierInfo *kw_unique0;
+ IdentifierInfo *kw_uwire;
+ IdentifierInfo *kw_vectored;
+ IdentifierInfo *kw_wand;
+ IdentifierInfo *kw_weak0;
+ IdentifierInfo *kw_weak1;
+ IdentifierInfo *kw_wildcard;
+ IdentifierInfo *kw_wire;
+ IdentifierInfo *kw_with;
+ IdentifierInfo *kw_wor;
+
+ // Workaround for hashes and backticks in Verilog.
+ IdentifierInfo *kw_verilogHash;
+ IdentifierInfo *kw_verilogHashHash;
+
+ /// Returns \c true if \p Tok is a keyword or an identifier.
+ bool isWordLike(const FormatToken &Tok) const {
+ // getIdentifierinfo returns non-null for keywords as well as identifiers.
+ return Tok.Tok.getIdentifierInfo() != nullptr &&
+ !Tok.isOneOf(kw_verilogHash, kw_verilogHashHash);
+ }
+
/// Returns \c true if \p Tok is a true JavaScript identifier, returns
/// \c false if it is a keyword or a pseudo keyword.
/// If \c AcceptIdentifierName is true, returns true not only for keywords,
@@ -1220,12 +1635,101 @@ struct AdditionalKeywords {
}
}
+ bool isVerilogIdentifier(const FormatToken &Tok) const {
+ switch (Tok.Tok.getKind()) {
+ case tok::kw_case:
+ case tok::kw_class:
+ case tok::kw_const:
+ case tok::kw_continue:
+ case tok::kw_default:
+ case tok::kw_do:
+ case tok::kw_extern:
+ case tok::kw_else:
+ case tok::kw_enum:
+ case tok::kw_for:
+ case tok::kw_if:
+ case tok::kw_restrict:
+ case tok::kw_signed:
+ case tok::kw_static:
+ case tok::kw_struct:
+ case tok::kw_typedef:
+ case tok::kw_union:
+ case tok::kw_unsigned:
+ case tok::kw_virtual:
+ case tok::kw_while:
+ return false;
+ case tok::identifier:
+ return VerilogExtraKeywords.find(Tok.Tok.getIdentifierInfo()) ==
+ VerilogExtraKeywords.end();
+ default:
+ // getIdentifierInfo returns non-null for both identifiers and keywords.
+ return Tok.Tok.getIdentifierInfo() != nullptr;
+ }
+ }
+
+ /// Returns whether \p Tok is a Verilog preprocessor directive. This is
+ /// needed because macro expansions start with a backtick as well and they
+ /// need to be treated differently.
+ bool isVerilogPPDirective(const FormatToken &Tok) const {
+ auto Info = Tok.Tok.getIdentifierInfo();
+ if (!Info)
+ return false;
+ switch (Info->getPPKeywordID()) {
+ case tok::pp_define:
+ case tok::pp_else:
+ case tok::pp_endif:
+ case tok::pp_ifdef:
+ case tok::pp_ifndef:
+ case tok::pp_include:
+ case tok::pp_line:
+ case tok::pp_pragma:
+ case tok::pp_undef:
+ return true;
+ default:
+ return Tok.isOneOf(kw_begin_keywords, kw_celldefine,
+ kw_default_decay_time, kw_default_nettype,
+ kw_default_trireg_strength, kw_delay_mode_distributed,
+ kw_delay_mode_path, kw_delay_mode_unit,
+ kw_delay_mode_zero, kw_elsif, kw_end_keywords,
+ kw_endcelldefine, kw_nounconnected_drive, kw_resetall,
+ kw_timescale, kw_unconnected_drive, kw_undefineall);
+ }
+ }
+
+ /// Returns whether \p Tok is a Verilog keyword that opens a block.
+ bool isVerilogBegin(const FormatToken &Tok) const {
+ // `table` is not included since it needs to be treated specially.
+ return !Tok.endsSequence(kw_fork, kw_disable) &&
+ Tok.isOneOf(kw_begin, kw_fork, kw_generate, kw_specify);
+ }
+
+ /// Returns whether \p Tok is a Verilog keyword that closes a block.
+ bool isVerilogEnd(const FormatToken &Tok) const {
+ return !Tok.endsSequence(kw_join, kw_rand) &&
+ Tok.isOneOf(TT_MacroBlockEnd, kw_end, kw_endcase, kw_endclass,
+ kw_endclocking, kw_endchecker, kw_endfunction,
+ kw_endgenerate, kw_endgroup, kw_endinterface,
+ kw_endmodule, kw_endpackage, kw_endprimitive,
+ kw_endprogram, kw_endproperty, kw_endsequence,
+ kw_endspecify, kw_endtable, kw_endtask, kw_join,
+ kw_join_any, kw_join_none);
+ }
+
+ /// Whether the token begins a block.
+ bool isBlockBegin(const FormatToken &Tok, const FormatStyle &Style) const {
+ return Tok.is(TT_MacroBlockBegin) ||
+ (Style.isVerilog() ? isVerilogBegin(Tok) : Tok.is(tok::l_brace));
+ }
+
private:
/// The JavaScript keywords beyond the C++ keyword set.
std::unordered_set<IdentifierInfo *> JsExtraKeywords;
/// The C# keywords beyond the C++ keyword set
std::unordered_set<IdentifierInfo *> CSharpExtraKeywords;
+
+ /// The Verilog keywords beyond the C++ keyword set.
+ std::unordered_set<IdentifierInfo *> VerilogExtraKeywords;
};
} // namespace format
diff --git a/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.cpp b/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.cpp
index c9166f4b17aa..66f03dcb53a1 100644
--- a/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.cpp
+++ b/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.cpp
@@ -28,13 +28,13 @@ FormatTokenLexer::FormatTokenLexer(
llvm::SpecificBumpPtrAllocator<FormatToken> &Allocator,
IdentifierTable &IdentTable)
: FormatTok(nullptr), IsFirstToken(true), StateStack({LexerState::NORMAL}),
- Column(Column), TrailingWhitespace(0), SourceMgr(SourceMgr), ID(ID),
+ Column(Column), TrailingWhitespace(0),
+ LangOpts(getFormattingLangOpts(Style)), SourceMgr(SourceMgr), ID(ID),
Style(Style), IdentTable(IdentTable), Keywords(IdentTable),
Encoding(Encoding), Allocator(Allocator), FirstInLineIndex(0),
FormattingDisabled(false), MacroBlockBeginRegex(Style.MacroBlockBegin),
MacroBlockEndRegex(Style.MacroBlockEnd) {
- Lex.reset(new Lexer(ID, SourceMgr.getBufferOrFake(ID), SourceMgr,
- getFormattingLangOpts(Style)));
+ Lex.reset(new Lexer(ID, SourceMgr.getBufferOrFake(ID), SourceMgr, LangOpts));
Lex->SetKeepWhitespaceMode(true);
for (const std::string &ForEachMacro : Style.ForEachMacros) {
@@ -85,13 +85,14 @@ ArrayRef<FormatToken *> FormatTokenLexer::lex() {
if (Style.Language == FormatStyle::LK_TextProto)
tryParsePythonComment();
tryMergePreviousTokens();
- if (Style.isCSharp())
+ if (Style.isCSharp()) {
// This needs to come after tokens have been merged so that C#
// string literals are correctly identified.
handleCSharpVerbatimAndInterpolatedStrings();
+ }
if (Tokens.back()->NewlinesBefore > 0 || Tokens.back()->IsMultiline)
FirstInLineIndex = Tokens.size() - 1;
- } while (Tokens.back()->Tok.isNot(tok::eof));
+ } while (Tokens.back()->isNot(tok::eof));
return Tokens;
}
@@ -126,9 +127,8 @@ void FormatTokenLexer::tryMergePreviousTokens() {
Tokens.back()->Tok.setKind(tok::period);
return;
}
- if (tryMergeNullishCoalescingEqual()) {
+ if (tryMergeNullishCoalescingEqual())
return;
- }
}
if (Style.isCSharp()) {
@@ -336,8 +336,9 @@ bool FormatTokenLexer::tryMergeNullishCoalescingEqual() {
auto &NullishCoalescing = *(Tokens.end() - 2);
auto &Equal = *(Tokens.end() - 1);
if (NullishCoalescing->getType() != TT_NullCoalescingOperator ||
- !Equal->is(tok::equal))
+ !Equal->is(tok::equal)) {
return false;
+ }
NullishCoalescing->Tok.setKind(tok::equal); // no '??=' in clang tokens.
NullishCoalescing->TokenText =
StringRef(NullishCoalescing->TokenText.begin(),
@@ -501,7 +502,7 @@ bool FormatTokenLexer::canPrecedeRegexLiteral(FormatToken *Prev) {
// `!` is an unary prefix operator, but also a post-fix operator that casts
// away nullability, so the same check applies.
if (Prev->isOneOf(tok::plusplus, tok::minusminus, tok::exclaim))
- return (Tokens.size() < 3 || precedesOperand(Tokens[Tokens.size() - 3]));
+ return Tokens.size() < 3 || precedesOperand(Tokens[Tokens.size() - 3]);
// The previous token must introduce an operand location where regex
// literals can occur.
@@ -578,8 +579,9 @@ void FormatTokenLexer::handleCSharpVerbatimAndInterpolatedStrings() {
// Deal with multiline strings.
if (!(CSharpStringLiteral->TokenText.startswith(R"(@")") ||
- CSharpStringLiteral->TokenText.startswith(R"($@")")))
+ CSharpStringLiteral->TokenText.startswith(R"($@")"))) {
return;
+ }
const char *StrBegin =
Lex->getBufferLocation() - CSharpStringLiteral->TokenText.size();
@@ -621,9 +623,9 @@ void FormatTokenLexer::handleCSharpVerbatimAndInterpolatedStrings() {
if (LastBreak != StringRef::npos) {
CSharpStringLiteral->IsMultiline = true;
unsigned StartColumn = 0;
- CSharpStringLiteral->LastLineColumnWidth = encoding::columnWidthWithTabs(
- LiteralText.substr(LastBreak + 1, LiteralText.size()), StartColumn,
- Style.TabWidth, Encoding);
+ CSharpStringLiteral->LastLineColumnWidth =
+ encoding::columnWidthWithTabs(LiteralText.substr(LastBreak + 1),
+ StartColumn, Style.TabWidth, Encoding);
}
SourceLocation loc = Offset < Lex->getBuffer().end()
@@ -688,9 +690,9 @@ void FormatTokenLexer::handleTemplateStrings() {
if (LastBreak != StringRef::npos) {
BacktickToken->IsMultiline = true;
unsigned StartColumn = 0; // The template tail spans the entire line.
- BacktickToken->LastLineColumnWidth = encoding::columnWidthWithTabs(
- LiteralText.substr(LastBreak + 1, LiteralText.size()), StartColumn,
- Style.TabWidth, Encoding);
+ BacktickToken->LastLineColumnWidth =
+ encoding::columnWidthWithTabs(LiteralText.substr(LastBreak + 1),
+ StartColumn, Style.TabWidth, Encoding);
}
SourceLocation loc = Offset < Lex->getBuffer().end()
@@ -780,19 +782,17 @@ bool FormatTokenLexer::tryMergeConflictMarkers() {
StringRef Buffer = SourceMgr.getBufferOrFake(ID).getBuffer();
// Calculate the offset of the start of the current line.
auto LineOffset = Buffer.rfind('\n', FirstInLineOffset);
- if (LineOffset == StringRef::npos) {
+ if (LineOffset == StringRef::npos)
LineOffset = 0;
- } else {
+ else
++LineOffset;
- }
auto FirstSpace = Buffer.find_first_of(" \n", LineOffset);
StringRef LineStart;
- if (FirstSpace == StringRef::npos) {
+ if (FirstSpace == StringRef::npos)
LineStart = Buffer.substr(LineOffset);
- } else {
+ else
LineStart = Buffer.substr(LineOffset, FirstSpace - LineOffset);
- }
TokenType Type = TT_Unknown;
if (LineStart == "<<<<<<<" || LineStart == ">>>>") {
@@ -840,6 +840,58 @@ FormatToken *FormatTokenLexer::getStashedToken() {
return FormatTok;
}
+/// Truncate the current token to the new length and make the lexer continue
+/// from the end of the truncated token. Used for other languages that have
+/// different token boundaries, like JavaScript in which a comment ends at a
+/// line break regardless of whether the line break follows a backslash. Also
+/// used to set the lexer to the end of whitespace if the lexer regards
+/// whitespace and an unrecognized symbol as one token.
+void FormatTokenLexer::truncateToken(size_t NewLen) {
+ assert(NewLen <= FormatTok->TokenText.size());
+ resetLexer(SourceMgr.getFileOffset(Lex->getSourceLocation(
+ Lex->getBufferLocation() - FormatTok->TokenText.size() + NewLen)));
+ FormatTok->TokenText = FormatTok->TokenText.substr(0, NewLen);
+ FormatTok->ColumnWidth = encoding::columnWidthWithTabs(
+ FormatTok->TokenText, FormatTok->OriginalColumn, Style.TabWidth,
+ Encoding);
+ FormatTok->Tok.setLength(NewLen);
+}
+
+/// Count the length of leading whitespace in a token.
+static size_t countLeadingWhitespace(StringRef Text) {
+ // Basically counting the length matched by this regex.
+ // "^([\n\r\f\v \t]|(\\\\|\\?\\?/)[\n\r])+"
+ // Directly using the regex turned out to be slow. With the regex
+ // version formatting all files in this directory took about 1.25
+ // seconds. This version took about 0.5 seconds.
+ const unsigned char *const Begin = Text.bytes_begin();
+ const unsigned char *const End = Text.bytes_end();
+ const unsigned char *Cur = Begin;
+ while (Cur < End) {
+ if (isspace(Cur[0])) {
+ ++Cur;
+ } else if (Cur[0] == '\\' && (Cur[1] == '\n' || Cur[1] == '\r')) {
+ // A '\' followed by a newline always escapes the newline, regardless
+ // of whether there is another '\' before it.
+ // The source has a null byte at the end. So the end of the entire input
+ // isn't reached yet. Also the lexer doesn't break apart an escaped
+ // newline.
+ assert(End - Cur >= 2);
+ Cur += 2;
+ } else if (Cur[0] == '?' && Cur[1] == '?' && Cur[2] == '/' &&
+ (Cur[3] == '\n' || Cur[3] == '\r')) {
+ // Newlines can also be escaped by a '?' '?' '/' trigraph. By the way, the
+ // characters are quoted individually in this comment because if we write
+ // them together some compilers warn that we have a trigraph in the code.
+ assert(End - Cur >= 4);
+ Cur += 4;
+ } else {
+ break;
+ }
+ }
+ return Cur - Begin;
+}
+
FormatToken *FormatTokenLexer::getNextToken() {
if (StateStack.top() == LexerState::TOKEN_STASHED) {
StateStack.pop();
@@ -854,34 +906,33 @@ FormatToken *FormatTokenLexer::getNextToken() {
IsFirstToken = false;
// Consume and record whitespace until we find a significant token.
+ // Some tok::unknown tokens are not just whitespace, e.g. whitespace
+ // followed by a symbol such as backtick. Those symbols may be
+ // significant in other languages.
unsigned WhitespaceLength = TrailingWhitespace;
- while (FormatTok->Tok.is(tok::unknown)) {
+ while (FormatTok->isNot(tok::eof)) {
+ auto LeadingWhitespace = countLeadingWhitespace(FormatTok->TokenText);
+ if (LeadingWhitespace == 0)
+ break;
+ if (LeadingWhitespace < FormatTok->TokenText.size())
+ truncateToken(LeadingWhitespace);
StringRef Text = FormatTok->TokenText;
- auto EscapesNewline = [&](int pos) {
- // A '\r' here is just part of '\r\n'. Skip it.
- if (pos >= 0 && Text[pos] == '\r')
- --pos;
- // See whether there is an odd number of '\' before this.
- // FIXME: This is wrong. A '\' followed by a newline is always removed,
- // regardless of whether there is another '\' before it.
- // FIXME: Newlines can also be escaped by a '?' '?' '/' trigraph.
- unsigned count = 0;
- for (; pos >= 0; --pos, ++count)
- if (Text[pos] != '\\')
- break;
- return count & 1;
- };
- // FIXME: This miscounts tok:unknown tokens that are not just
- // whitespace, e.g. a '`' character.
+ bool InEscape = false;
for (int i = 0, e = Text.size(); i != e; ++i) {
switch (Text[i]) {
+ case '\r':
+ // If this is a CRLF sequence, break here and the LF will be handled on
+ // the next loop iteration. Otherwise, this is a single Mac CR, treat it
+ // the same as a single LF.
+ if (i + 1 < e && Text[i + 1] == '\n')
+ break;
+ LLVM_FALLTHROUGH;
case '\n':
++FormatTok->NewlinesBefore;
- FormatTok->HasUnescapedNewline = !EscapesNewline(i - 1);
- FormatTok->LastNewlineOffset = WhitespaceLength + i + 1;
- Column = 0;
- break;
- case '\r':
+ if (!InEscape)
+ FormatTok->HasUnescapedNewline = true;
+ else
+ InEscape = false;
FormatTok->LastNewlineOffset = WhitespaceLength + i + 1;
Column = 0;
break;
@@ -897,24 +948,32 @@ FormatToken *FormatTokenLexer::getNextToken() {
Style.TabWidth - (Style.TabWidth ? Column % Style.TabWidth : 0);
break;
case '\\':
- if (i + 1 == e || (Text[i + 1] != '\r' && Text[i + 1] != '\n'))
- FormatTok->setType(TT_ImplicitStringLiteral);
+ case '?':
+ case '/':
+ // The text was entirely whitespace when this loop was entered. Thus
+ // this has to be an escape sequence.
+ assert(Text.substr(i, 2) == "\\\r" || Text.substr(i, 2) == "\\\n" ||
+ Text.substr(i, 4) == "\?\?/\r" ||
+ Text.substr(i, 4) == "\?\?/\n" ||
+ (i >= 1 && (Text.substr(i - 1, 4) == "\?\?/\r" ||
+ Text.substr(i - 1, 4) == "\?\?/\n")) ||
+ (i >= 2 && (Text.substr(i - 2, 4) == "\?\?/\r" ||
+ Text.substr(i - 2, 4) == "\?\?/\n")));
+ InEscape = true;
break;
default:
- FormatTok->setType(TT_ImplicitStringLiteral);
+ // This shouldn't happen.
+ assert(false);
break;
}
- if (FormatTok->getType() == TT_ImplicitStringLiteral)
- break;
}
-
- if (FormatTok->is(TT_ImplicitStringLiteral))
- break;
- WhitespaceLength += FormatTok->Tok.getLength();
-
+ WhitespaceLength += Text.size();
readRawToken(*FormatTok);
}
+ if (FormatTok->is(tok::unknown))
+ FormatTok->setType(TT_ImplicitStringLiteral);
+
// JavaScript and Java do not allow to escape the end of the line with a
// backslash. Backslashes are syntax errors in plain source, but can occur in
// comments. When a single line comment ends with a \, it'll cause the next
@@ -928,39 +987,30 @@ FormatToken *FormatTokenLexer::getNextToken() {
while (BackslashPos != StringRef::npos) {
if (BackslashPos + 1 < FormatTok->TokenText.size() &&
FormatTok->TokenText[BackslashPos + 1] == '\n') {
- const char *Offset = Lex->getBufferLocation();
- Offset -= FormatTok->TokenText.size();
- Offset += BackslashPos + 1;
- resetLexer(SourceMgr.getFileOffset(Lex->getSourceLocation(Offset)));
- FormatTok->TokenText = FormatTok->TokenText.substr(0, BackslashPos + 1);
- FormatTok->ColumnWidth = encoding::columnWidthWithTabs(
- FormatTok->TokenText, FormatTok->OriginalColumn, Style.TabWidth,
- Encoding);
+ truncateToken(BackslashPos + 1);
break;
}
BackslashPos = FormatTok->TokenText.find('\\', BackslashPos + 1);
}
}
- // In case the token starts with escaped newlines, we want to
- // take them into account as whitespace - this pattern is quite frequent
- // in macro definitions.
- // FIXME: Add a more explicit test.
- while (FormatTok->TokenText.size() > 1 && FormatTok->TokenText[0] == '\\') {
- unsigned SkippedWhitespace = 0;
- if (FormatTok->TokenText.size() > 2 &&
- (FormatTok->TokenText[1] == '\r' && FormatTok->TokenText[2] == '\n'))
- SkippedWhitespace = 3;
- else if (FormatTok->TokenText[1] == '\n')
- SkippedWhitespace = 2;
- else
- break;
-
- ++FormatTok->NewlinesBefore;
- WhitespaceLength += SkippedWhitespace;
- FormatTok->LastNewlineOffset = SkippedWhitespace;
- Column = 0;
- FormatTok->TokenText = FormatTok->TokenText.substr(SkippedWhitespace);
+ if (Style.isVerilog()) {
+ // Verilog uses the backtick instead of the hash for preprocessor stuff.
+ // And it uses the hash for delays and parameter lists. In order to continue
+ // using `tok::hash` in other places, the backtick gets marked as the hash
+ // here. And in order to tell the backtick and hash apart for
+ // Verilog-specific stuff, the hash becomes an identifier.
+ if (FormatTok->isOneOf(tok::hash, tok::hashhash)) {
+ FormatTok->Tok.setKind(tok::raw_identifier);
+ } else if (FormatTok->is(tok::raw_identifier)) {
+ if (FormatTok->TokenText == "`") {
+ FormatTok->Tok.setIdentifierInfo(nullptr);
+ FormatTok->Tok.setKind(tok::hash);
+ } else if (FormatTok->TokenText == "``") {
+ FormatTok->Tok.setIdentifierInfo(nullptr);
+ FormatTok->Tok.setKind(tok::hashhash);
+ }
+ }
}
FormatTok->WhitespaceRange = SourceRange(
@@ -969,12 +1019,12 @@ FormatToken *FormatTokenLexer::getNextToken() {
FormatTok->OriginalColumn = Column;
TrailingWhitespace = 0;
- if (FormatTok->Tok.is(tok::comment)) {
+ if (FormatTok->is(tok::comment)) {
// FIXME: Add the trimmed whitespace to Column.
StringRef UntrimmedText = FormatTok->TokenText;
FormatTok->TokenText = FormatTok->TokenText.rtrim(" \t\v\f");
TrailingWhitespace = UntrimmedText.size() - FormatTok->TokenText.size();
- } else if (FormatTok->Tok.is(tok::raw_identifier)) {
+ } else if (FormatTok->is(tok::raw_identifier)) {
IdentifierInfo &Info = IdentTable.get(FormatTok->TokenText);
FormatTok->Tok.setIdentifierInfo(&Info);
FormatTok->Tok.setKind(Info.getTokenID());
@@ -989,12 +1039,12 @@ FormatToken *FormatTokenLexer::getNextToken() {
FormatTok->Tok.setKind(tok::identifier);
FormatTok->Tok.setIdentifierInfo(nullptr);
}
- } else if (FormatTok->Tok.is(tok::greatergreater)) {
+ } else if (FormatTok->is(tok::greatergreater)) {
FormatTok->Tok.setKind(tok::greater);
FormatTok->TokenText = FormatTok->TokenText.substr(0, 1);
++Column;
StateStack.push(LexerState::TOKEN_STASHED);
- } else if (FormatTok->Tok.is(tok::lessless)) {
+ } else if (FormatTok->is(tok::lessless)) {
FormatTok->Tok.setKind(tok::less);
FormatTok->TokenText = FormatTok->TokenText.substr(0, 1);
++Column;
@@ -1040,19 +1090,62 @@ FormatToken *FormatTokenLexer::getNextToken() {
FormatTok->Tok.setKind(tok::kw_if);
}
} else if (FormatTok->is(tok::identifier)) {
- if (MacroBlockBeginRegex.match(Text)) {
+ if (MacroBlockBeginRegex.match(Text))
FormatTok->setType(TT_MacroBlockBegin);
- } else if (MacroBlockEndRegex.match(Text)) {
+ else if (MacroBlockEndRegex.match(Text))
FormatTok->setType(TT_MacroBlockEnd);
- }
}
}
return FormatTok;
}
+bool FormatTokenLexer::readRawTokenVerilogSpecific(Token &Tok) {
+ // In Verilog the quote is not a character literal.
+ //
+ // Make the backtick and double backtick identifiers to match against them
+ // more easily.
+ //
+ // In Verilog an escaped identifier starts with backslash and ends with
+ // whitespace. Unless that whitespace is an escaped newline. A backslash can
+ // also begin an escaped newline outside of an escaped identifier. We check
+ // for that outside of the Regex since we can't use negative lookhead
+ // assertions. Simply changing the '*' to '+' breaks stuff as the escaped
+ // identifier may have a length of 0 according to Section A.9.3.
+ // FIXME: If there is an escaped newline in the middle of an escaped
+ // identifier, allow for pasting the two lines together, But escaped
+ // identifiers usually occur only in generated code anyway.
+ static const llvm::Regex VerilogToken(R"re(^('|``?|\\(\\)re"
+ "(\r?\n|\r)|[^[:space:]])*)");
+
+ SmallVector<StringRef, 4> Matches;
+ const char *Start = Lex->getBufferLocation();
+ if (!VerilogToken.match(StringRef(Start, Lex->getBuffer().end() - Start),
+ &Matches)) {
+ return false;
+ }
+ // There is a null byte at the end of the buffer, so we don't have to check
+ // Start[1] is within the buffer.
+ if (Start[0] == '\\' && (Start[1] == '\r' || Start[1] == '\n'))
+ return false;
+ size_t Len = Matches[0].size();
+
+ // The kind has to be an identifier so we can match it against those defined
+ // in Keywords. The kind has to be set before the length because the setLength
+ // function checks that the kind is not an annotation.
+ Tok.setKind(tok::raw_identifier);
+ Tok.setLength(Len);
+ Tok.setLocation(Lex->getSourceLocation(Start, Len));
+ Tok.setRawIdentifierData(Start);
+ Lex->seek(Lex->getCurrentBufferOffset() + Len, /*IsAtStartofline=*/false);
+ return true;
+}
+
void FormatTokenLexer::readRawToken(FormatToken &Tok) {
- Lex->LexFromRawLexer(Tok.Tok);
+ // For Verilog, first see if there is a special token, and fall back to the
+ // normal lexer if there isn't one.
+ if (!Style.isVerilog() || !readRawTokenVerilogSpecific(Tok.Tok))
+ Lex->LexFromRawLexer(Tok.Tok);
Tok.TokenText = StringRef(SourceMgr.getCharacterData(Tok.Tok.getLocation()),
Tok.Tok.getLength());
// For formatting, treat unterminated string literals like normal string
@@ -1087,9 +1180,9 @@ void FormatTokenLexer::readRawToken(FormatToken &Tok) {
void FormatTokenLexer::resetLexer(unsigned Offset) {
StringRef Buffer = SourceMgr.getBufferData(ID);
- Lex.reset(new Lexer(SourceMgr.getLocForStartOfFile(ID),
- getFormattingLangOpts(Style), Buffer.begin(),
- Buffer.begin() + Offset, Buffer.end()));
+ LangOpts = getFormattingLangOpts(Style);
+ Lex.reset(new Lexer(SourceMgr.getLocForStartOfFile(ID), LangOpts,
+ Buffer.begin(), Buffer.begin() + Offset, Buffer.end()));
Lex->SetKeepWhitespaceMode(true);
TrailingWhitespace = 0;
}
diff --git a/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.h b/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.h
index a9e3b2fd498a..bff2c181d81e 100644
--- a/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.h
+++ b/contrib/llvm-project/clang/lib/Format/FormatTokenLexer.h
@@ -17,6 +17,7 @@
#include "Encoding.h"
#include "FormatToken.h"
+#include "clang/Basic/LangOptions.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Format/Format.h"
@@ -91,6 +92,8 @@ private:
bool tryMergeConflictMarkers();
+ void truncateToken(size_t NewLen);
+
FormatToken *getStashedToken();
FormatToken *getNextToken();
@@ -101,6 +104,7 @@ private:
unsigned Column;
unsigned TrailingWhitespace;
std::unique_ptr<Lexer> Lex;
+ LangOptions LangOpts;
const SourceManager &SourceMgr;
FileID ID;
const FormatStyle &Style;
@@ -122,6 +126,9 @@ private:
// Targets that may appear inside a C# attribute.
static const llvm::StringSet<> CSharpAttributeTargets;
+ /// Handle Verilog-specific tokens.
+ bool readRawTokenVerilogSpecific(Token &Tok);
+
void readRawToken(FormatToken &Tok);
void resetLexer(unsigned Offset);
diff --git a/contrib/llvm-project/clang/lib/Format/MacroExpander.cpp b/contrib/llvm-project/clang/lib/Format/MacroExpander.cpp
index de96cb24ba1f..9c6bcb8764f4 100644
--- a/contrib/llvm-project/clang/lib/Format/MacroExpander.cpp
+++ b/contrib/llvm-project/clang/lib/Format/MacroExpander.cpp
@@ -125,9 +125,8 @@ MacroExpander::MacroExpander(
IdentifierTable &IdentTable)
: SourceMgr(SourceMgr), Style(Style), Allocator(Allocator),
IdentTable(IdentTable) {
- for (const std::string &Macro : Macros) {
+ for (const std::string &Macro : Macros)
parseDefinition(Macro);
- }
}
MacroExpander::~MacroExpander() = default;
diff --git a/contrib/llvm-project/clang/lib/Format/NamespaceEndCommentsFixer.cpp b/contrib/llvm-project/clang/lib/Format/NamespaceEndCommentsFixer.cpp
index 9fb6c5142672..2615a499f7ab 100644
--- a/contrib/llvm-project/clang/lib/Format/NamespaceEndCommentsFixer.cpp
+++ b/contrib/llvm-project/clang/lib/Format/NamespaceEndCommentsFixer.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "NamespaceEndCommentsFixer.h"
+#include "clang/Basic/TokenKinds.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/Regex.h"
@@ -22,6 +23,40 @@ namespace clang {
namespace format {
namespace {
+// Iterates all tokens starting from StartTok to EndTok and apply Fn to all
+// tokens between them including StartTok and EndTok. Returns the token after
+// EndTok.
+const FormatToken *
+processTokens(const FormatToken *Tok, tok::TokenKind StartTok,
+ tok::TokenKind EndTok,
+ llvm::function_ref<void(const FormatToken *)> Fn) {
+ if (!Tok || Tok->isNot(StartTok))
+ return Tok;
+ int NestLevel = 0;
+ do {
+ if (Tok->is(StartTok))
+ ++NestLevel;
+ else if (Tok->is(EndTok))
+ --NestLevel;
+ if (Fn)
+ Fn(Tok);
+ Tok = Tok->getNextNonComment();
+ } while (Tok && NestLevel > 0);
+ return Tok;
+}
+
+const FormatToken *skipAttribute(const FormatToken *Tok) {
+ if (!Tok)
+ return nullptr;
+ if (Tok->is(tok::kw___attribute)) {
+ Tok = Tok->getNextNonComment();
+ Tok = processTokens(Tok, tok::l_paren, tok::r_paren, nullptr);
+ } else if (Tok->is(tok::l_square)) {
+ Tok = processTokens(Tok, tok::l_square, tok::r_square, nullptr);
+ }
+ return Tok;
+}
+
// Computes the name of a namespace given the namespace token.
// Returns "" for anonymous namespace.
std::string computeName(const FormatToken *NamespaceTok) {
@@ -39,26 +74,69 @@ std::string computeName(const FormatToken *NamespaceTok) {
name += Tok->TokenText;
Tok = Tok->getNextNonComment();
}
- } else {
- // For `namespace [[foo]] A::B::inline C {` or
- // `namespace MACRO1 MACRO2 A::B::inline C {`, returns "A::B::inline C".
- // Peek for the first '::' (or '{') and then return all tokens from one
- // token before that up until the '{'.
- const FormatToken *FirstNSTok = Tok;
- while (Tok && !Tok->is(tok::l_brace) && !Tok->is(tok::coloncolon)) {
- FirstNSTok = Tok;
- Tok = Tok->getNextNonComment();
- }
+ return name;
+ }
+ Tok = skipAttribute(Tok);
+
+ std::string FirstNSName;
+ // For `namespace [[foo]] A::B::inline C {` or
+ // `namespace MACRO1 MACRO2 A::B::inline C {`, returns "A::B::inline C".
+ // Peek for the first '::' (or '{' or '(')) and then return all tokens from
+ // one token before that up until the '{'. A '(' might be a macro with
+ // arguments.
+ const FormatToken *FirstNSTok = nullptr;
+ while (Tok && !Tok->isOneOf(tok::l_brace, tok::coloncolon, tok::l_paren)) {
+ if (FirstNSTok)
+ FirstNSName += FirstNSTok->TokenText;
+ FirstNSTok = Tok;
+ Tok = Tok->getNextNonComment();
+ }
+ if (FirstNSTok)
Tok = FirstNSTok;
- while (Tok && !Tok->is(tok::l_brace)) {
- name += Tok->TokenText;
- if (Tok->is(tok::kw_inline))
+ Tok = skipAttribute(Tok);
+
+ FirstNSTok = nullptr;
+ // Add everything from '(' to ')'.
+ auto AddToken = [&name](const FormatToken *Tok) { name += Tok->TokenText; };
+ bool IsPrevColoncolon = false;
+ bool HasColoncolon = false;
+ bool IsPrevInline = false;
+ bool NameFinished = false;
+ // If we found '::' in name, then it's the name. Otherwise, we can't tell
+ // which one is name. For example, `namespace A B {`.
+ while (Tok && Tok->isNot(tok::l_brace)) {
+ if (FirstNSTok) {
+ if (!IsPrevInline && HasColoncolon && !IsPrevColoncolon) {
+ if (FirstNSTok->is(tok::l_paren)) {
+ FirstNSTok = Tok =
+ processTokens(FirstNSTok, tok::l_paren, tok::r_paren, AddToken);
+ continue;
+ }
+ if (FirstNSTok->isNot(tok::coloncolon)) {
+ NameFinished = true;
+ break;
+ }
+ }
+ name += FirstNSTok->TokenText;
+ IsPrevColoncolon = FirstNSTok->is(tok::coloncolon);
+ HasColoncolon = HasColoncolon || IsPrevColoncolon;
+ if (FirstNSTok->is(tok::kw_inline)) {
name += " ";
- Tok = Tok->getNextNonComment();
+ IsPrevInline = true;
+ }
}
+ FirstNSTok = Tok;
+ Tok = Tok->getNextNonComment();
+ const FormatToken *TokAfterAttr = skipAttribute(Tok);
+ if (TokAfterAttr != Tok)
+ FirstNSTok = Tok = TokAfterAttr;
}
- return name;
+ if (!NameFinished && FirstNSTok && FirstNSTok->isNot(tok::l_brace))
+ name += FirstNSTok->TokenText;
+ if (FirstNSName.empty() || HasColoncolon)
+ return name;
+ return name.empty() ? FirstNSName : FirstNSName + " " + name;
}
std::string computeEndCommentText(StringRef NamespaceName, bool AddNewline,
@@ -132,12 +210,11 @@ bool validEndComment(const FormatToken *RBraceTok, StringRef NamespaceName,
"^/[/*] *( +([a-zA-Z0-9:_]+))?\\.? *(\\*/)?$", llvm::Regex::IgnoreCase);
// Pull out just the comment text.
- if (!CommentPattern.match(Comment->Next->TokenText, &Groups)) {
+ if (!CommentPattern.match(Comment->Next->TokenText, &Groups))
return false;
- }
NamespaceNameInComment = Groups.size() > 2 ? Groups[2] : "";
- return (NamespaceNameInComment == NamespaceName);
+ return NamespaceNameInComment == NamespaceName;
}
void addEndComment(const FormatToken *RBraceTok, StringRef EndCommentText,
@@ -220,9 +297,8 @@ std::pair<tooling::Replacements, unsigned> NamespaceEndCommentsFixer::analyze(
// Don't attempt to comment unbalanced braces or this can
// lead to comments being placed on the closing brace which isn't
// the matching brace of the namespace. (occurs during incomplete editing).
- if (Braces != 0) {
+ if (Braces != 0)
return {Fixes, 0};
- }
std::string AllNamespaceNames;
size_t StartLineIndex = SIZE_MAX;
@@ -241,9 +317,8 @@ std::pair<tooling::Replacements, unsigned> NamespaceEndCommentsFixer::analyze(
const FormatToken *EndCommentPrevTok = RBraceTok;
// Namespaces often end with '};'. In that case, attach namespace end
// comments to the semicolon tokens.
- if (RBraceTok->Next && RBraceTok->Next->is(tok::semi)) {
+ if (RBraceTok->Next && RBraceTok->Next->is(tok::semi))
EndCommentPrevTok = RBraceTok->Next;
- }
if (StartLineIndex == SIZE_MAX)
StartLineIndex = EndLine->MatchingOpeningBlockLineIndex;
std::string NamespaceName = computeName(NamespaceTok);
diff --git a/contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.cpp b/contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.cpp
index 0142a6c08ed3..61f17cae383e 100644
--- a/contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.cpp
+++ b/contrib/llvm-project/clang/lib/Format/QualifierAlignmentFixer.cpp
@@ -37,7 +37,7 @@ QualifierAlignmentFixer::QualifierAlignmentFixer(
PrepareLeftRightOrdering(Style.QualifierOrder, LeftOrder, RightOrder,
ConfiguredQualifierTokens);
- // Handle the left and right Alignment Seperately
+ // Handle the left and right alignment separately.
for (const auto &Qualifier : LeftOrder) {
Passes.emplace_back(
[&, Qualifier, ConfiguredQualifierTokens](const Environment &Env) {
@@ -59,8 +59,9 @@ QualifierAlignmentFixer::QualifierAlignmentFixer(
}
std::pair<tooling::Replacements, unsigned> QualifierAlignmentFixer::analyze(
- TokenAnnotator &Annotator, SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
- FormatTokenLexer &Tokens) {
+ TokenAnnotator & /*Annotator*/,
+ SmallVectorImpl<AnnotatedLine *> & /*AnnotatedLines*/,
+ FormatTokenLexer & /*Tokens*/) {
auto Env = Environment::make(Code, FileName, Ranges, FirstStartColumn,
NextStartColumn, LastStartColumn);
if (!Env)
@@ -93,9 +94,10 @@ std::pair<tooling::Replacements, unsigned> QualifierAlignmentFixer::analyze(
if (!OriginalCode.equals(Fix.getReplacementText())) {
auto Err = NonNoOpFixes.add(Fix);
- if (Err)
+ if (Err) {
llvm::errs() << "Error adding replacements : "
<< llvm::toString(std::move(Err)) << "\n";
+ }
}
}
return {NonNoOpFixes, 0};
@@ -107,9 +109,10 @@ static void replaceToken(const SourceManager &SourceMgr,
auto Replacement = tooling::Replacement(SourceMgr, Range, NewText);
auto Err = Fixes.add(Replacement);
- if (Err)
+ if (Err) {
llvm::errs() << "Error while rearranging Qualifier : "
<< llvm::toString(std::move(Err)) << "\n";
+ }
}
static void removeToken(const SourceManager &SourceMgr,
@@ -149,16 +152,14 @@ static void insertQualifierBefore(const SourceManager &SourceMgr,
}
static bool endsWithSpace(const std::string &s) {
- if (s.empty()) {
+ if (s.empty())
return false;
- }
return isspace(s.back());
}
static bool startsWithSpace(const std::string &s) {
- if (s.empty()) {
+ if (s.empty())
return false;
- }
return isspace(s.front());
}
@@ -182,9 +183,8 @@ static void rotateTokens(const SourceManager &SourceMgr,
// Then move through the other tokens.
auto *Tok = Begin;
while (Tok != End) {
- if (!NewText.empty() && !endsWithSpace(NewText)) {
+ if (!NewText.empty() && !endsWithSpace(NewText))
NewText += " ";
- }
NewText += Tok->TokenText;
Tok = Tok->Next;
@@ -192,9 +192,8 @@ static void rotateTokens(const SourceManager &SourceMgr,
// If we are rotating to the right we move the first token to the back.
if (!Left) {
- if (!NewText.empty() && !startsWithSpace(NewText)) {
+ if (!NewText.empty() && !startsWithSpace(NewText))
NewText += " ";
- }
NewText += First->TokenText;
}
@@ -217,26 +216,18 @@ const FormatToken *LeftRightQualifierAlignmentFixer::analyzeRight(
if (LeftRightQualifierAlignmentFixer::isPossibleMacro(Tok->Next))
return Tok;
- FormatToken *Qual = Tok->Next;
- FormatToken *LastQual = Qual;
- while (Qual && isQualifierOrType(Qual, ConfiguredQualifierTokens)) {
- LastQual = Qual;
- Qual = Qual->Next;
- }
- if (LastQual && Qual != LastQual) {
- rotateTokens(SourceMgr, Fixes, Tok, LastQual, /*Left=*/false);
- Tok = LastQual;
- } else if (Tok->startsSequence(QualifierType, tok::identifier,
- TT_TemplateOpener)) {
- // Read from the TemplateOpener to
- // TemplateCloser as in const ArrayRef<int> a; const ArrayRef<int> &a;
- FormatToken *EndTemplate = Tok->Next->Next->MatchingParen;
+ auto AnalyzeTemplate =
+ [&](const FormatToken *Tok,
+ const FormatToken *StartTemplate) -> const FormatToken * {
+ // Read from the TemplateOpener to TemplateCloser.
+ FormatToken *EndTemplate = StartTemplate->MatchingParen;
if (EndTemplate) {
// Move to the end of any template class members e.g.
// `Foo<int>::iterator`.
if (EndTemplate->startsSequence(TT_TemplateCloser, tok::coloncolon,
- tok::identifier))
+ tok::identifier)) {
EndTemplate = EndTemplate->Next->Next;
+ }
}
if (EndTemplate && EndTemplate->Next &&
!EndTemplate->Next->isOneOf(tok::equal, tok::l_paren)) {
@@ -245,24 +236,59 @@ const FormatToken *LeftRightQualifierAlignmentFixer::analyzeRight(
removeToken(SourceMgr, Fixes, Tok);
return Tok;
}
- } else if (Tok->startsSequence(QualifierType, tok::identifier)) {
+ return nullptr;
+ };
+
+ FormatToken *Qual = Tok->Next;
+ FormatToken *LastQual = Qual;
+ while (Qual && isQualifierOrType(Qual, ConfiguredQualifierTokens)) {
+ LastQual = Qual;
+ Qual = Qual->Next;
+ }
+ if (LastQual && Qual != LastQual) {
+ rotateTokens(SourceMgr, Fixes, Tok, LastQual, /*Left=*/false);
+ Tok = LastQual;
+ } else if (Tok->startsSequence(QualifierType, tok::identifier,
+ TT_TemplateCloser)) {
+ FormatToken *Closer = Tok->Next->Next;
+ rotateTokens(SourceMgr, Fixes, Tok, Tok->Next, /*Left=*/false);
+ Tok = Closer;
+ return Tok;
+ } else if (Tok->startsSequence(QualifierType, tok::identifier,
+ TT_TemplateOpener)) {
+ // `const ArrayRef<int> a;`
+ // `const ArrayRef<int> &a;`
+ const FormatToken *NewTok = AnalyzeTemplate(Tok, Tok->Next->Next);
+ if (NewTok)
+ return NewTok;
+ } else if (Tok->startsSequence(QualifierType, tok::coloncolon,
+ tok::identifier, TT_TemplateOpener)) {
+ // `const ::ArrayRef<int> a;`
+ // `const ::ArrayRef<int> &a;`
+ const FormatToken *NewTok = AnalyzeTemplate(Tok, Tok->Next->Next->Next);
+ if (NewTok)
+ return NewTok;
+ } else if (Tok->startsSequence(QualifierType, tok::identifier) ||
+ Tok->startsSequence(QualifierType, tok::coloncolon,
+ tok::identifier)) {
FormatToken *Next = Tok->Next;
// The case `const Foo` -> `Foo const`
+ // The case `const ::Foo` -> `::Foo const`
// The case `const Foo *` -> `Foo const *`
// The case `const Foo &` -> `Foo const &`
// The case `const Foo &&` -> `Foo const &&`
// The case `const std::Foo &&` -> `std::Foo const &&`
// The case `const std::Foo<T> &&` -> `std::Foo<T> const &&`
- while (Next && Next->isOneOf(tok::identifier, tok::coloncolon)) {
+ while (Next && Next->isOneOf(tok::identifier, tok::coloncolon))
Next = Next->Next;
- }
if (Next && Next->is(TT_TemplateOpener)) {
Next = Next->MatchingParen;
// Move to the end of any template class members e.g.
// `Foo<int>::iterator`.
if (Next && Next->startsSequence(TT_TemplateCloser, tok::coloncolon,
- tok::identifier))
+ tok::identifier)) {
return Tok;
+ }
assert(Next && "Missing template opener");
Next = Next->Next;
}
@@ -296,12 +322,13 @@ const FormatToken *LeftRightQualifierAlignmentFixer::analyzeLeft(
break;
}
- if (!Qual) {
+ if (!Qual)
return Tok;
- }
if (LastQual && Qual != LastQual && Qual->is(QualifierType)) {
rotateTokens(SourceMgr, Fixes, Tok, Qual, /*Left=*/true);
+ if (!Qual->Next)
+ return Tok;
Tok = Qual->Next;
} else if (Tok->startsSequence(tok::identifier, QualifierType)) {
if (Tok->Next->Next && Tok->Next->Next->isOneOf(tok::identifier, tok::star,
@@ -312,6 +339,11 @@ const FormatToken *LeftRightQualifierAlignmentFixer::analyzeLeft(
rotateTokens(SourceMgr, Fixes, Tok, Tok->Next, /*Left=*/true);
Tok = Tok->Next;
}
+ } else if (Tok->startsSequence(tok::identifier, QualifierType,
+ TT_TemplateCloser)) {
+ FormatToken *Closer = Tok->Next->Next;
+ rotateTokens(SourceMgr, Fixes, Tok, Tok->Next, /*Left=*/true);
+ Tok = Closer;
}
}
if (Tok->is(TT_TemplateOpener) && Tok->Next &&
@@ -319,7 +351,9 @@ const FormatToken *LeftRightQualifierAlignmentFixer::analyzeLeft(
Tok->Next->Next && Tok->Next->Next->is(QualifierType)) {
rotateTokens(SourceMgr, Fixes, Tok->Next, Tok->Next->Next, /*Left=*/true);
}
- if (Tok->startsSequence(tok::identifier) && Tok->Next) {
+ if ((Tok->startsSequence(tok::coloncolon, tok::identifier) ||
+ Tok->is(tok::identifier)) &&
+ Tok->Next) {
if (Tok->Previous &&
Tok->Previous->isOneOf(tok::star, tok::ampamp, tok::amp)) {
return Tok;
@@ -335,14 +369,21 @@ const FormatToken *LeftRightQualifierAlignmentFixer::analyzeLeft(
if (Next->is(tok::comment) && Next->getNextNonComment())
Next = Next->getNextNonComment();
assert(Next->MatchingParen && "Missing template closer");
- Next = Next->MatchingParen->Next;
+ Next = Next->MatchingParen;
+
+ // If the template closer is closing the requires clause,
+ // then stop and go back to the TemplateOpener and do whatever is
+ // inside the <>.
+ if (Next->ClosesRequiresClause)
+ return Next->MatchingParen;
+ Next = Next->Next;
// Move to the end of any template class members e.g.
// `Foo<int>::iterator`.
if (Next && Next->startsSequence(tok::coloncolon, tok::identifier))
Next = Next->Next->Next;
if (Next && Next->is(QualifierType)) {
- // Remove the const.
+ // Move the qualifier.
insertQualifierBefore(SourceMgr, Fixes, Tok, Qualifier);
removeToken(SourceMgr, Fixes, Next);
return Next;
@@ -351,7 +392,7 @@ const FormatToken *LeftRightQualifierAlignmentFixer::analyzeLeft(
if (Next && Next->Next &&
Next->Next->isOneOf(tok::amp, tok::ampamp, tok::star)) {
if (Next->is(QualifierType)) {
- // Remove the qualifier.
+ // Move the qualifier.
insertQualifierBefore(SourceMgr, Fixes, Tok, Qualifier);
removeToken(SourceMgr, Fixes, Next);
return Next;
@@ -384,7 +425,8 @@ LeftRightQualifierAlignmentFixer::LeftRightQualifierAlignmentFixer(
std::pair<tooling::Replacements, unsigned>
LeftRightQualifierAlignmentFixer::analyze(
- TokenAnnotator &Annotator, SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
+ TokenAnnotator & /*Annotator*/,
+ SmallVectorImpl<AnnotatedLine *> &AnnotatedLines,
FormatTokenLexer &Tokens) {
tooling::Replacements Fixes;
const AdditionalKeywords &Keywords = Tokens.getKeywords();
@@ -395,6 +437,8 @@ LeftRightQualifierAlignmentFixer::analyze(
assert(QualifierToken != tok::identifier && "Unrecognised Qualifier");
for (AnnotatedLine *Line : AnnotatedLines) {
+ if (Line->InPPDirective)
+ continue;
FormatToken *First = Line->First;
assert(First);
if (First->Finalized)
@@ -406,12 +450,13 @@ LeftRightQualifierAlignmentFixer::analyze(
Tok = Tok->Next) {
if (Tok->is(tok::comment))
continue;
- if (RightAlign)
+ if (RightAlign) {
Tok = analyzeRight(SourceMgr, Keywords, Fixes, Tok, Qualifier,
QualifierToken);
- else
+ } else {
Tok = analyzeLeft(SourceMgr, Keywords, Fixes, Tok, Qualifier,
QualifierToken);
+ }
}
}
return {Fixes, 0};
@@ -439,15 +484,15 @@ void QualifierAlignmentFixer::PrepareLeftRightOrdering(
tok::TokenKind QualifierToken =
LeftRightQualifierAlignmentFixer::getTokenFromQualifier(s);
- if (QualifierToken != tok::kw_typeof && QualifierToken != tok::identifier) {
+ if (QualifierToken != tok::kw_typeof && QualifierToken != tok::identifier)
Qualifiers.push_back(QualifierToken);
- }
- if (left)
+ if (left) {
// Reverse the order for left aligned items.
LeftOrder.insert(LeftOrder.begin(), s);
- else
+ } else {
RightOrder.push_back(s);
+ }
}
}
@@ -464,8 +509,10 @@ bool LeftRightQualifierAlignmentFixer::isPossibleMacro(const FormatToken *Tok) {
return false;
if (!Tok->is(tok::identifier))
return false;
- if (Tok->TokenText.upper() == Tok->TokenText.str())
- return true;
+ if (Tok->TokenText.upper() == Tok->TokenText.str()) {
+ // T,K,U,V likely could be template arguments
+ return (Tok->TokenText.size() != 1);
+ }
return false;
}
diff --git a/contrib/llvm-project/clang/lib/Format/SortJavaScriptImports.cpp b/contrib/llvm-project/clang/lib/Format/SortJavaScriptImports.cpp
index 71326163f45a..c9de4868bf84 100644
--- a/contrib/llvm-project/clang/lib/Format/SortJavaScriptImports.cpp
+++ b/contrib/llvm-project/clang/lib/Format/SortJavaScriptImports.cpp
@@ -107,12 +107,13 @@ bool operator<(const JsModuleReference &LHS, const JsModuleReference &RHS) {
if (LHS.Category != RHS.Category)
return LHS.Category < RHS.Category;
if (LHS.Category == JsModuleReference::ReferenceCategory::SIDE_EFFECT ||
- LHS.Category == JsModuleReference::ReferenceCategory::ALIAS)
+ LHS.Category == JsModuleReference::ReferenceCategory::ALIAS) {
// Side effect imports and aliases might be ordering sensitive. Consider
// them equal so that they maintain their relative order in the stable sort
// below. This retains transitivity because LHS.Category == RHS.Category
// here.
return false;
+ }
// Empty URLs sort *last* (for export {...};).
if (LHS.URL.empty() != RHS.URL.empty())
return LHS.URL.empty() < RHS.URL.empty();
@@ -171,8 +172,9 @@ public:
// in a single group.
if (!Reference.IsExport &&
(Reference.IsExport != References[I + 1].IsExport ||
- Reference.Category != References[I + 1].Category))
+ Reference.Category != References[I + 1].Category)) {
ReferencesText += "\n";
+ }
}
}
llvm::StringRef PreviousText = getSourceText(InsertionPoint);
@@ -187,15 +189,16 @@ public:
// harmless and will be stripped by the subsequent formatting pass.
// FIXME: A better long term fix is to re-calculate Ranges after sorting.
unsigned PreviousSize = PreviousText.size();
- while (ReferencesText.size() < PreviousSize) {
+ while (ReferencesText.size() < PreviousSize)
ReferencesText += " ";
- }
// Separate references from the main code body of the file.
if (FirstNonImportLine && FirstNonImportLine->First->NewlinesBefore < 2 &&
!(FirstNonImportLine->First->is(tok::comment) &&
- FirstNonImportLine->First->TokenText.trim() == "// clang-format on"))
+ FirstNonImportLine->First->TokenText.trim() ==
+ "// clang-format on")) {
ReferencesText += "\n";
+ }
LLVM_DEBUG(llvm::dbgs() << "Replacing imports:\n"
<< PreviousText << "\nwith:\n"
@@ -390,11 +393,12 @@ private:
Current = Current->Next;
}
skipComments();
- if (Start.isInvalid() || References.empty())
+ if (Start.isInvalid() || References.empty()) {
// After the first file level comment, consider line comments to be part
// of the import that immediately follows them by using the previously
// set Start.
Start = Line->First->Tok.getLocation();
+ }
if (!Current) {
// Only comments on this line. Could be the first non-import line.
FirstNonImportLine = Line;
@@ -463,13 +467,14 @@ private:
// URL = TokenText without the quotes.
Reference.URL =
Current->TokenText.substr(1, Current->TokenText.size() - 2);
- if (Reference.URL.startswith(".."))
+ if (Reference.URL.startswith("..")) {
Reference.Category =
JsModuleReference::ReferenceCategory::RELATIVE_PARENT;
- else if (Reference.URL.startswith("."))
+ } else if (Reference.URL.startswith(".")) {
Reference.Category = JsModuleReference::ReferenceCategory::RELATIVE;
- else
+ } else {
Reference.Category = JsModuleReference::ReferenceCategory::ABSOLUTE;
+ }
}
return true;
}
@@ -511,9 +516,8 @@ private:
nextToken();
while (Current->is(tok::identifier)) {
nextToken();
- if (Current->is(tok::semi)) {
+ if (Current->is(tok::semi))
return true;
- }
if (!Current->is(tok::period))
return false;
nextToken();
diff --git a/contrib/llvm-project/clang/lib/Format/TokenAnalyzer.cpp b/contrib/llvm-project/clang/lib/Format/TokenAnalyzer.cpp
index 2bd5a1fd6230..0a775c0a87ed 100644
--- a/contrib/llvm-project/clang/lib/Format/TokenAnalyzer.cpp
+++ b/contrib/llvm-project/clang/lib/Format/TokenAnalyzer.cpp
@@ -110,12 +110,13 @@ std::pair<tooling::Replacements, unsigned> TokenAnalyzer::process() {
UnwrappedLineParser Parser(Style, Lex.getKeywords(),
Env.getFirstStartColumn(), Tokens, *this);
Parser.parse();
- assert(UnwrappedLines.rbegin()->empty());
+ assert(UnwrappedLines.back().empty());
unsigned Penalty = 0;
for (unsigned Run = 0, RunE = UnwrappedLines.size(); Run + 1 != RunE; ++Run) {
const auto &Lines = UnwrappedLines[Run];
LLVM_DEBUG(llvm::dbgs() << "Run " << Run << "...\n");
SmallVector<AnnotatedLine *, 16> AnnotatedLines;
+ AnnotatedLines.reserve(Lines.size());
TokenAnnotator Annotator(Style, Lex.getKeywords());
for (const UnwrappedLine &Line : Lines) {
diff --git a/contrib/llvm-project/clang/lib/Format/TokenAnnotator.cpp b/contrib/llvm-project/clang/lib/Format/TokenAnnotator.cpp
index 9d130dbb02eb..029cb9097871 100644
--- a/contrib/llvm-project/clang/lib/Format/TokenAnnotator.cpp
+++ b/contrib/llvm-project/clang/lib/Format/TokenAnnotator.cpp
@@ -26,6 +26,13 @@ namespace format {
namespace {
+/// Returns \c true if the line starts with a token that can start a statement
+/// with an initializer.
+static bool startsWithInitStatement(const AnnotatedLine &Line) {
+ return Line.startsWith(tok::kw_for) || Line.startsWith(tok::kw_if) ||
+ Line.startsWith(tok::kw_switch);
+}
+
/// Returns \c true if the token can be used as an identifier in
/// an Objective-C \c \@selector, \c false otherwise.
///
@@ -47,8 +54,9 @@ static bool isLambdaParameterList(const FormatToken *Left) {
// Skip <...> if present.
if (Left->Previous && Left->Previous->is(tok::greater) &&
Left->Previous->MatchingParen &&
- Left->Previous->MatchingParen->is(TT_TemplateOpener))
+ Left->Previous->MatchingParen->is(TT_TemplateOpener)) {
Left = Left->Previous->MatchingParen;
+ }
// Check for `[...]`.
return Left->Previous && Left->Previous->is(tok::r_square) &&
@@ -91,8 +99,10 @@ private:
return false;
if (Previous.Previous->is(tok::r_paren) && Contexts.size() > 1 &&
(!Previous.Previous->MatchingParen ||
- !Previous.Previous->MatchingParen->is(TT_OverloadedOperatorLParen)))
+ !Previous.Previous->MatchingParen->is(
+ TT_OverloadedOperatorLParen))) {
return false;
+ }
}
FormatToken *Left = CurrentToken->Previous;
@@ -106,12 +116,13 @@ private:
Contexts.back().IsExpression = false;
// If there's a template keyword before the opening angle bracket, this is a
// template parameter, not an argument.
- Contexts.back().InTemplateArgument =
- Left->Previous && Left->Previous->Tok.isNot(tok::kw_template);
+ if (Left->Previous && Left->Previous->isNot(tok::kw_template))
+ Contexts.back().ContextType = Context::TemplateArgument;
if (Style.Language == FormatStyle::LK_Java &&
- CurrentToken->is(tok::question))
+ CurrentToken->is(tok::question)) {
next();
+ }
while (CurrentToken) {
if (CurrentToken->is(tok::greater)) {
@@ -126,8 +137,9 @@ private:
(isKeywordWithCondition(*Line.First) ||
CurrentToken->getStartOfNonWhitespace() ==
CurrentToken->Next->getStartOfNonWhitespace().getLocWithOffset(
- -1)))
+ -1))) {
return false;
+ }
Left->MatchingParen = CurrentToken;
CurrentToken->MatchingParen = Left;
// In TT_Proto, we must distignuish between:
@@ -137,10 +149,11 @@ private:
// In TT_TextProto, map<key, value> does not occur.
if (Style.Language == FormatStyle::LK_TextProto ||
(Style.Language == FormatStyle::LK_Proto && Left->Previous &&
- Left->Previous->isOneOf(TT_SelectorName, TT_DictLiteral)))
+ Left->Previous->isOneOf(TT_SelectorName, TT_DictLiteral))) {
CurrentToken->setType(TT_DictLiteral);
- else
+ } else {
CurrentToken->setType(TT_TemplateCloser);
+ }
next();
return true;
}
@@ -152,8 +165,9 @@ private:
if (CurrentToken->isOneOf(tok::r_paren, tok::r_square, tok::r_brace) ||
(CurrentToken->isOneOf(tok::colon, tok::question) && InExprContext &&
!Style.isCSharp() && Style.Language != FormatStyle::LK_Proto &&
- Style.Language != FormatStyle::LK_TextProto))
+ Style.Language != FormatStyle::LK_TextProto)) {
return false;
+ }
// If a && or || is found and interpreted as a binary operator, this set
// of angles is likely part of something like "a < b && c > d". If the
// angles are inside an expression, the ||/&& might also be a binary
@@ -163,15 +177,17 @@ private:
if (CurrentToken->Previous->isOneOf(tok::pipepipe, tok::ampamp) &&
CurrentToken->Previous->is(TT_BinaryOperator) &&
Contexts[Contexts.size() - 2].IsExpression &&
- !Line.startsWith(tok::kw_template))
+ !Line.startsWith(tok::kw_template)) {
return false;
+ }
updateParameterCount(Left, CurrentToken);
if (Style.Language == FormatStyle::LK_Proto) {
if (FormatToken *Previous = CurrentToken->getPreviousNonComment()) {
if (CurrentToken->is(tok::colon) ||
(CurrentToken->isOneOf(tok::l_brace, tok::less) &&
- Previous->isNot(tok::colon)))
+ Previous->isNot(tok::colon))) {
Previous->setType(TT_SelectorName);
+ }
}
}
if (!consumeToken())
@@ -204,23 +220,25 @@ private:
bool parseParens(bool LookForDecls = false) {
if (!CurrentToken)
return false;
- FormatToken *Left = CurrentToken->Previous;
- assert(Left && "Unknown previous token");
- FormatToken *PrevNonComment = Left->getPreviousNonComment();
- Left->ParentBracket = Contexts.back().ContextKind;
+ assert(CurrentToken->Previous && "Unknown previous token");
+ FormatToken &OpeningParen = *CurrentToken->Previous;
+ assert(OpeningParen.is(tok::l_paren));
+ FormatToken *PrevNonComment = OpeningParen.getPreviousNonComment();
+ OpeningParen.ParentBracket = Contexts.back().ContextKind;
ScopedContextCreator ContextCreator(*this, tok::l_paren, 1);
// FIXME: This is a bit of a hack. Do better.
Contexts.back().ColonIsForRangeExpr =
Contexts.size() == 2 && Contexts[0].ColonIsForRangeExpr;
- if (Left->Previous && Left->Previous->is(TT_UntouchableMacroFunc)) {
- Left->Finalized = true;
+ if (OpeningParen.Previous &&
+ OpeningParen.Previous->is(TT_UntouchableMacroFunc)) {
+ OpeningParen.Finalized = true;
return parseUntouchableParens();
}
bool StartsObjCMethodExpr = false;
- if (FormatToken *MaybeSel = Left->Previous) {
+ if (FormatToken *MaybeSel = OpeningParen.Previous) {
// @selector( starts a selector.
if (MaybeSel->isObjCAtKeyword(tok::objc_selector) && MaybeSel->Previous &&
MaybeSel->Previous->is(tok::at)) {
@@ -228,9 +246,9 @@ private:
}
}
- if (Left->is(TT_OverloadedOperatorLParen)) {
+ if (OpeningParen.is(TT_OverloadedOperatorLParen)) {
// Find the previous kw_operator token.
- FormatToken *Prev = Left;
+ FormatToken *Prev = &OpeningParen;
while (!Prev->is(tok::kw_operator)) {
Prev = Prev->Previous;
assert(Prev && "Expect a kw_operator prior to the OperatorLParen!");
@@ -249,54 +267,57 @@ private:
// type X = (...);
// export type X = (...);
Contexts.back().IsExpression = false;
- } else if (Left->Previous &&
- (Left->Previous->isOneOf(tok::kw_static_assert, tok::kw_while,
- tok::l_paren, tok::comma) ||
- Left->Previous->isIf() ||
- Left->Previous->is(TT_BinaryOperator))) {
+ } else if (OpeningParen.Previous &&
+ (OpeningParen.Previous->isOneOf(tok::kw_static_assert,
+ tok::kw_while, tok::l_paren,
+ tok::comma, TT_BinaryOperator) ||
+ OpeningParen.Previous->isIf())) {
// static_assert, if and while usually contain expressions.
Contexts.back().IsExpression = true;
- } else if (Style.isJavaScript() && Left->Previous &&
- (Left->Previous->is(Keywords.kw_function) ||
- (Left->Previous->endsSequence(tok::identifier,
- Keywords.kw_function)))) {
+ } else if (Style.isJavaScript() && OpeningParen.Previous &&
+ (OpeningParen.Previous->is(Keywords.kw_function) ||
+ (OpeningParen.Previous->endsSequence(tok::identifier,
+ Keywords.kw_function)))) {
// function(...) or function f(...)
Contexts.back().IsExpression = false;
- } else if (Style.isJavaScript() && Left->Previous &&
- Left->Previous->is(TT_JsTypeColon)) {
+ } else if (Style.isJavaScript() && OpeningParen.Previous &&
+ OpeningParen.Previous->is(TT_JsTypeColon)) {
// let x: (SomeType);
Contexts.back().IsExpression = false;
- } else if (isLambdaParameterList(Left)) {
+ } else if (isLambdaParameterList(&OpeningParen)) {
// This is a parameter list of a lambda expression.
Contexts.back().IsExpression = false;
} else if (Line.InPPDirective &&
- (!Left->Previous || !Left->Previous->is(tok::identifier))) {
+ (!OpeningParen.Previous ||
+ !OpeningParen.Previous->is(tok::identifier))) {
Contexts.back().IsExpression = true;
} else if (Contexts[Contexts.size() - 2].CaretFound) {
// This is the parameter list of an ObjC block.
Contexts.back().IsExpression = false;
- } else if (Left->Previous && Left->Previous->is(TT_ForEachMacro)) {
+ } else if (OpeningParen.Previous &&
+ OpeningParen.Previous->is(TT_ForEachMacro)) {
// The first argument to a foreach macro is a declaration.
- Contexts.back().IsForEachMacro = true;
+ Contexts.back().ContextType = Context::ForEachMacro;
Contexts.back().IsExpression = false;
- } else if (Left->Previous && Left->Previous->MatchingParen &&
- Left->Previous->MatchingParen->is(TT_ObjCBlockLParen)) {
+ } else if (OpeningParen.Previous && OpeningParen.Previous->MatchingParen &&
+ OpeningParen.Previous->MatchingParen->is(TT_ObjCBlockLParen)) {
Contexts.back().IsExpression = false;
} else if (!Line.MustBeDeclaration && !Line.InPPDirective) {
bool IsForOrCatch =
- Left->Previous && Left->Previous->isOneOf(tok::kw_for, tok::kw_catch);
+ OpeningParen.Previous &&
+ OpeningParen.Previous->isOneOf(tok::kw_for, tok::kw_catch);
Contexts.back().IsExpression = !IsForOrCatch;
}
// Infer the role of the l_paren based on the previous token if we haven't
// detected one one yet.
- if (PrevNonComment && Left->is(TT_Unknown)) {
+ if (PrevNonComment && OpeningParen.is(TT_Unknown)) {
if (PrevNonComment->is(tok::kw___attribute)) {
- Left->setType(TT_AttributeParen);
+ OpeningParen.setType(TT_AttributeParen);
} else if (PrevNonComment->isOneOf(TT_TypenameMacro, tok::kw_decltype,
tok::kw_typeof, tok::kw__Atomic,
tok::kw___underlying_type)) {
- Left->setType(TT_TypeDeclarationParen);
+ OpeningParen.setType(TT_TypeDeclarationParen);
// decltype() and typeof() usually contain expressions.
if (PrevNonComment->isOneOf(tok::kw_decltype, tok::kw_typeof))
Contexts.back().IsExpression = true;
@@ -305,7 +326,7 @@ private:
if (StartsObjCMethodExpr) {
Contexts.back().ColonIsObjCMethodExpr = true;
- Left->setType(TT_ObjCMethodExpr);
+ OpeningParen.setType(TT_ObjCMethodExpr);
}
// MightBeFunctionType and ProbablyFunctionType are used for
@@ -322,7 +343,7 @@ private:
bool HasMultipleLines = false;
bool HasMultipleParametersOnALine = false;
bool MightBeObjCForRangeLoop =
- Left->Previous && Left->Previous->is(tok::kw_for);
+ OpeningParen.Previous && OpeningParen.Previous->is(tok::kw_for);
FormatToken *PossibleObjCForInToken = nullptr;
while (CurrentToken) {
// LookForDecls is set when "if (" has been seen. Check for
@@ -345,30 +366,37 @@ private:
if (CurrentToken->Previous->is(TT_PointerOrReference) &&
CurrentToken->Previous->Previous->isOneOf(tok::l_paren,
- tok::coloncolon))
+ tok::coloncolon)) {
ProbablyFunctionType = true;
+ }
if (CurrentToken->is(tok::comma))
MightBeFunctionType = false;
if (CurrentToken->Previous->is(TT_BinaryOperator))
Contexts.back().IsExpression = true;
if (CurrentToken->is(tok::r_paren)) {
- if (MightBeFunctionType && ProbablyFunctionType && CurrentToken->Next &&
+ if (OpeningParen.isNot(TT_CppCastLParen) && MightBeFunctionType &&
+ ProbablyFunctionType && CurrentToken->Next &&
(CurrentToken->Next->is(tok::l_paren) ||
- (CurrentToken->Next->is(tok::l_square) && Line.MustBeDeclaration)))
- Left->setType(Left->Next->is(tok::caret) ? TT_ObjCBlockLParen
- : TT_FunctionTypeLParen);
- Left->MatchingParen = CurrentToken;
- CurrentToken->MatchingParen = Left;
+ (CurrentToken->Next->is(tok::l_square) &&
+ Line.MustBeDeclaration))) {
+ OpeningParen.setType(OpeningParen.Next->is(tok::caret)
+ ? TT_ObjCBlockLParen
+ : TT_FunctionTypeLParen);
+ }
+ OpeningParen.MatchingParen = CurrentToken;
+ CurrentToken->MatchingParen = &OpeningParen;
if (CurrentToken->Next && CurrentToken->Next->is(tok::l_brace) &&
- Left->Previous && Left->Previous->is(tok::l_paren)) {
+ OpeningParen.Previous && OpeningParen.Previous->is(tok::l_paren)) {
// Detect the case where macros are used to generate lambdas or
// function bodies, e.g.:
// auto my_lambda = MACRO((Type *type, int i) { .. body .. });
- for (FormatToken *Tok = Left; Tok != CurrentToken; Tok = Tok->Next) {
+ for (FormatToken *Tok = &OpeningParen; Tok != CurrentToken;
+ Tok = Tok->Next) {
if (Tok->is(TT_BinaryOperator) &&
- Tok->isOneOf(tok::star, tok::amp, tok::ampamp))
+ Tok->isOneOf(tok::star, tok::amp, tok::ampamp)) {
Tok->setType(TT_PointerOrReference);
+ }
}
}
@@ -380,23 +408,29 @@ private:
}
}
- if (Left->is(TT_AttributeParen))
+ if (OpeningParen.is(TT_AttributeParen))
CurrentToken->setType(TT_AttributeParen);
- if (Left->is(TT_TypeDeclarationParen))
+ if (OpeningParen.is(TT_TypeDeclarationParen))
CurrentToken->setType(TT_TypeDeclarationParen);
- if (Left->Previous && Left->Previous->is(TT_JavaAnnotation))
+ if (OpeningParen.Previous &&
+ OpeningParen.Previous->is(TT_JavaAnnotation)) {
CurrentToken->setType(TT_JavaAnnotation);
- if (Left->Previous && Left->Previous->is(TT_LeadingJavaAnnotation))
+ }
+ if (OpeningParen.Previous &&
+ OpeningParen.Previous->is(TT_LeadingJavaAnnotation)) {
CurrentToken->setType(TT_LeadingJavaAnnotation);
- if (Left->Previous && Left->Previous->is(TT_AttributeSquare))
+ }
+ if (OpeningParen.Previous &&
+ OpeningParen.Previous->is(TT_AttributeSquare)) {
CurrentToken->setType(TT_AttributeSquare);
+ }
if (!HasMultipleLines)
- Left->setPackingKind(PPK_Inconclusive);
+ OpeningParen.setPackingKind(PPK_Inconclusive);
else if (HasMultipleParametersOnALine)
- Left->setPackingKind(PPK_BinPacked);
+ OpeningParen.setPackingKind(PPK_BinPacked);
else
- Left->setPackingKind(PPK_OnePerLine);
+ OpeningParen.setPackingKind(PPK_OnePerLine);
next();
return true;
@@ -404,20 +438,22 @@ private:
if (CurrentToken->isOneOf(tok::r_square, tok::r_brace))
return false;
- if (CurrentToken->is(tok::l_brace))
- Left->setType(TT_Unknown); // Not TT_ObjCBlockLParen
+ if (CurrentToken->is(tok::l_brace) && OpeningParen.is(TT_ObjCBlockLParen))
+ OpeningParen.setType(TT_Unknown);
if (CurrentToken->is(tok::comma) && CurrentToken->Next &&
!CurrentToken->Next->HasUnescapedNewline &&
- !CurrentToken->Next->isTrailingComment())
+ !CurrentToken->Next->isTrailingComment()) {
HasMultipleParametersOnALine = true;
+ }
bool ProbablyFunctionTypeLParen =
(CurrentToken->is(tok::l_paren) && CurrentToken->Next &&
CurrentToken->Next->isOneOf(tok::star, tok::amp, tok::caret));
if ((CurrentToken->Previous->isOneOf(tok::kw_const, tok::kw_auto) ||
CurrentToken->Previous->isSimpleTypeSpecifier()) &&
!(CurrentToken->is(tok::l_brace) ||
- (CurrentToken->is(tok::l_paren) && !ProbablyFunctionTypeLParen)))
+ (CurrentToken->is(tok::l_paren) && !ProbablyFunctionTypeLParen))) {
Contexts.back().IsExpression = false;
+ }
if (CurrentToken->isOneOf(tok::semi, tok::colon)) {
MightBeObjCForRangeLoop = false;
if (PossibleObjCForInToken) {
@@ -437,7 +473,7 @@ private:
FormatToken *Tok = CurrentToken;
if (!consumeToken())
return false;
- updateParameterCount(Left, Tok);
+ updateParameterCount(&OpeningParen, Tok);
if (CurrentToken && CurrentToken->HasUnescapedNewline)
HasMultipleLines = true;
}
@@ -468,9 +504,8 @@ private:
return false;
// Move along the tokens inbetween the '[' and ']' e.g. [STAThread].
- while (AttrTok && AttrTok->isNot(tok::r_square)) {
+ while (AttrTok && AttrTok->isNot(tok::r_square))
AttrTok = AttrTok->Next;
- }
if (!AttrTok)
return false;
@@ -489,8 +524,9 @@ private:
// incase its a [XXX] retval func(....
if (AttrTok->Next &&
- AttrTok->Next->startsSequence(tok::identifier, tok::l_paren))
+ AttrTok->Next->startsSequence(tok::identifier, tok::l_paren)) {
return true;
+ }
return false;
}
@@ -499,9 +535,8 @@ private:
if (!Style.isCpp() || !Tok.startsSequence(tok::l_square, tok::l_square))
return false;
// The first square bracket is part of an ObjC array literal
- if (Tok.Previous && Tok.Previous->is(tok::at)) {
+ if (Tok.Previous && Tok.Previous->is(tok::at))
return false;
- }
const FormatToken *AttrTok = Tok.Next->Next;
if (!AttrTok)
return false;
@@ -517,8 +552,9 @@ private:
// [[foo(:)]].
if (AttrTok->is(tok::colon) ||
AttrTok->startsSequence(tok::identifier, tok::identifier) ||
- AttrTok->startsSequence(tok::r_paren, tok::identifier))
+ AttrTok->startsSequence(tok::r_paren, tok::identifier)) {
return false;
+ }
if (AttrTok->is(tok::ellipsis))
return true;
AttrTok = AttrTok->Next;
@@ -544,7 +580,7 @@ private:
bool CppArrayTemplates =
Style.isCpp() && Parent && Parent->is(TT_TemplateCloser) &&
(Contexts.back().CanBeExpression || Contexts.back().IsExpression ||
- Contexts.back().InTemplateArgument);
+ Contexts.back().ContextType == Context::TemplateArgument);
bool IsCpp11AttributeSpecifier = isCpp11AttributeSpecifier(*Left) ||
Contexts.back().InCpp11AttributeSpecifier;
@@ -657,13 +693,13 @@ private:
if (CurrentToken->is(tok::r_square)) {
if (IsCpp11AttributeSpecifier)
CurrentToken->setType(TT_AttributeSquare);
- if (IsCSharpAttributeSpecifier)
+ if (IsCSharpAttributeSpecifier) {
CurrentToken->setType(TT_AttributeSquare);
- else if (((CurrentToken->Next &&
- CurrentToken->Next->is(tok::l_paren)) ||
- (CurrentToken->Previous &&
- CurrentToken->Previous->Previous == Left)) &&
- Left->is(TT_ObjCMethodExpr)) {
+ } else if (((CurrentToken->Next &&
+ CurrentToken->Next->is(tok::l_paren)) ||
+ (CurrentToken->Previous &&
+ CurrentToken->Previous->Previous == Left)) &&
+ Left->is(TT_ObjCMethodExpr)) {
// An ObjC method call is rarely followed by an open parenthesis. It
// also can't be composed of just one token, unless it's a macro that
// will be expanded to more tokens.
@@ -677,18 +713,20 @@ private:
// before the r_square is tagged as a selector name component.
if (!ColonFound && CurrentToken->Previous &&
CurrentToken->Previous->is(TT_Unknown) &&
- canBeObjCSelectorComponent(*CurrentToken->Previous))
+ canBeObjCSelectorComponent(*CurrentToken->Previous)) {
CurrentToken->Previous->setType(TT_SelectorName);
+ }
// determineStarAmpUsage() thinks that '*' '[' is allocating an
// array of pointers, but if '[' starts a selector then '*' is a
// binary operator.
if (Parent && Parent->is(TT_PointerOrReference))
- Parent->setType(TT_BinaryOperator);
+ Parent->overwriteFixedType(TT_BinaryOperator);
}
// An arrow after an ObjC method expression is not a lambda arrow.
if (CurrentToken->getType() == TT_ObjCMethodExpr &&
- CurrentToken->Next && CurrentToken->Next->is(TT_LambdaArrow))
- CurrentToken->Next->setType(TT_Unknown);
+ CurrentToken->Next && CurrentToken->Next->is(TT_LambdaArrow)) {
+ CurrentToken->Next->overwriteFixedType(TT_Unknown);
+ }
Left->MatchingParen = CurrentToken;
CurrentToken->MatchingParen = Left;
// FirstObjCSelectorName is set when a colon is found. This does
@@ -728,15 +766,17 @@ private:
Left->setType(TT_ObjCMethodExpr);
StartsObjCMethodExpr = true;
Contexts.back().ColonIsObjCMethodExpr = true;
- if (Parent && Parent->is(tok::r_paren))
+ if (Parent && Parent->is(tok::r_paren)) {
// FIXME(bug 36976): ObjC return types shouldn't use TT_CastRParen.
Parent->setType(TT_CastRParen);
+ }
}
ColonFound = true;
}
if (CurrentToken->is(tok::comma) && Left->is(TT_ObjCMethodExpr) &&
- !ColonFound)
+ !ColonFound) {
Left->setType(TT_ArrayInitializerLSquare);
+ }
FormatToken *Tok = CurrentToken;
if (!consumeToken())
return false;
@@ -753,72 +793,78 @@ private:
const auto End = std::next(Contexts.rbegin(), 2);
auto Last = Contexts.rbegin();
unsigned Depth = 0;
- for (; Last != End; ++Last) {
+ for (; Last != End; ++Last)
if (Last->ContextKind == tok::l_brace)
++Depth;
- }
return Depth == 2 && Last->ContextKind != tok::l_brace;
}
bool parseBrace() {
- if (CurrentToken) {
- FormatToken *Left = CurrentToken->Previous;
- Left->ParentBracket = Contexts.back().ContextKind;
+ if (!CurrentToken)
+ return true;
- if (Contexts.back().CaretFound)
- Left->setType(TT_ObjCBlockLBrace);
- Contexts.back().CaretFound = false;
+ assert(CurrentToken->Previous);
+ FormatToken &OpeningBrace = *CurrentToken->Previous;
+ assert(OpeningBrace.is(tok::l_brace));
+ OpeningBrace.ParentBracket = Contexts.back().ContextKind;
- ScopedContextCreator ContextCreator(*this, tok::l_brace, 1);
- Contexts.back().ColonIsDictLiteral = true;
- if (Left->is(BK_BracedInit))
- Contexts.back().IsExpression = true;
- if (Style.isJavaScript() && Left->Previous &&
- Left->Previous->is(TT_JsTypeColon))
- Contexts.back().IsExpression = false;
+ if (Contexts.back().CaretFound)
+ OpeningBrace.overwriteFixedType(TT_ObjCBlockLBrace);
+ Contexts.back().CaretFound = false;
- unsigned CommaCount = 0;
- while (CurrentToken) {
- if (CurrentToken->is(tok::r_brace)) {
- assert(Left->Optional == CurrentToken->Optional);
- Left->MatchingParen = CurrentToken;
- CurrentToken->MatchingParen = Left;
- if (Style.AlignArrayOfStructures != FormatStyle::AIAS_None) {
- if (Left->ParentBracket == tok::l_brace &&
- couldBeInStructArrayInitializer() && CommaCount > 0) {
- Contexts.back().InStructArrayInitializer = true;
- }
+ ScopedContextCreator ContextCreator(*this, tok::l_brace, 1);
+ Contexts.back().ColonIsDictLiteral = true;
+ if (OpeningBrace.is(BK_BracedInit))
+ Contexts.back().IsExpression = true;
+ if (Style.isJavaScript() && OpeningBrace.Previous &&
+ OpeningBrace.Previous->is(TT_JsTypeColon)) {
+ Contexts.back().IsExpression = false;
+ }
+
+ unsigned CommaCount = 0;
+ while (CurrentToken) {
+ if (CurrentToken->is(tok::r_brace)) {
+ assert(OpeningBrace.Optional == CurrentToken->Optional);
+ OpeningBrace.MatchingParen = CurrentToken;
+ CurrentToken->MatchingParen = &OpeningBrace;
+ if (Style.AlignArrayOfStructures != FormatStyle::AIAS_None) {
+ if (OpeningBrace.ParentBracket == tok::l_brace &&
+ couldBeInStructArrayInitializer() && CommaCount > 0) {
+ Contexts.back().ContextType = Context::StructArrayInitializer;
}
- next();
- return true;
}
- if (CurrentToken->isOneOf(tok::r_paren, tok::r_square))
- return false;
- updateParameterCount(Left, CurrentToken);
- if (CurrentToken->isOneOf(tok::colon, tok::l_brace, tok::less)) {
- FormatToken *Previous = CurrentToken->getPreviousNonComment();
- if (Previous->is(TT_JsTypeOptionalQuestion))
- Previous = Previous->getPreviousNonComment();
- if ((CurrentToken->is(tok::colon) &&
- (!Contexts.back().ColonIsDictLiteral || !Style.isCpp())) ||
- Style.Language == FormatStyle::LK_Proto ||
- Style.Language == FormatStyle::LK_TextProto) {
- Left->setType(TT_DictLiteral);
- if (Previous->Tok.getIdentifierInfo() ||
- Previous->is(tok::string_literal))
- Previous->setType(TT_SelectorName);
+ next();
+ return true;
+ }
+ if (CurrentToken->isOneOf(tok::r_paren, tok::r_square))
+ return false;
+ updateParameterCount(&OpeningBrace, CurrentToken);
+ if (CurrentToken->isOneOf(tok::colon, tok::l_brace, tok::less)) {
+ FormatToken *Previous = CurrentToken->getPreviousNonComment();
+ if (Previous->is(TT_JsTypeOptionalQuestion))
+ Previous = Previous->getPreviousNonComment();
+ if ((CurrentToken->is(tok::colon) &&
+ (!Contexts.back().ColonIsDictLiteral || !Style.isCpp())) ||
+ Style.Language == FormatStyle::LK_Proto ||
+ Style.Language == FormatStyle::LK_TextProto) {
+ OpeningBrace.setType(TT_DictLiteral);
+ if (Previous->Tok.getIdentifierInfo() ||
+ Previous->is(tok::string_literal)) {
+ Previous->setType(TT_SelectorName);
}
- if (CurrentToken->is(tok::colon) || Style.isJavaScript())
- Left->setType(TT_DictLiteral);
}
- if (CurrentToken->is(tok::comma)) {
- if (Style.isJavaScript())
- Left->setType(TT_DictLiteral);
- ++CommaCount;
- }
- if (!consumeToken())
- return false;
+ if (CurrentToken->is(tok::colon) && OpeningBrace.is(TT_Unknown))
+ OpeningBrace.setType(TT_DictLiteral);
+ else if (Style.isJavaScript())
+ OpeningBrace.overwriteFixedType(TT_DictLiteral);
+ }
+ if (CurrentToken->is(tok::comma)) {
+ if (Style.isJavaScript())
+ OpeningBrace.overwriteFixedType(TT_DictLiteral);
+ ++CommaCount;
}
+ if (!consumeToken())
+ return false;
}
return true;
}
@@ -932,12 +978,13 @@ private:
Contexts.back().LongestObjCSelectorName == 0 ||
UnknownIdentifierInMethodDeclaration) {
Tok->Previous->setType(TT_SelectorName);
- if (!Contexts.back().FirstObjCSelectorName)
+ if (!Contexts.back().FirstObjCSelectorName) {
Contexts.back().FirstObjCSelectorName = Tok->Previous;
- else if (Tok->Previous->ColumnWidth >
- Contexts.back().LongestObjCSelectorName)
+ } else if (Tok->Previous->ColumnWidth >
+ Contexts.back().LongestObjCSelectorName) {
Contexts.back().LongestObjCSelectorName =
Tok->Previous->ColumnWidth;
+ }
Tok->Previous->ParameterIndex =
Contexts.back().FirstObjCSelectorName->ObjCSelectorNameParts;
++Contexts.back().FirstObjCSelectorName->ObjCSelectorNameParts;
@@ -952,17 +999,18 @@ private:
FormatToken *Prev = Tok->getPreviousNonComment();
if (!Prev)
break;
- if (Prev->isOneOf(tok::r_paren, tok::kw_noexcept))
+ if (Prev->isOneOf(tok::r_paren, tok::kw_noexcept)) {
Tok->setType(TT_CtorInitializerColon);
- else if (Prev->is(tok::kw_try)) {
+ } else if (Prev->is(tok::kw_try)) {
// Member initializer list within function try block.
FormatToken *PrevPrev = Prev->getPreviousNonComment();
if (!PrevPrev)
break;
if (PrevPrev && PrevPrev->isOneOf(tok::r_paren, tok::kw_noexcept))
Tok->setType(TT_CtorInitializerColon);
- } else
+ } else {
Tok->setType(TT_InheritanceColon);
+ }
} else if (canBeObjCSelectorComponent(*Tok->Previous) && Tok->Next &&
(Tok->Next->isOneOf(tok::r_paren, tok::comma) ||
(canBeObjCSelectorComponent(*Tok->Next) && Tok->Next->Next &&
@@ -982,10 +1030,12 @@ private:
Tok->setType(TT_JsTypeOperator);
break;
case tok::kw_if:
- case tok::kw_while:
- if (Tok->is(tok::kw_if) && CurrentToken &&
- CurrentToken->isOneOf(tok::kw_constexpr, tok::identifier))
+ if (CurrentToken &&
+ CurrentToken->isOneOf(tok::kw_constexpr, tok::identifier)) {
next();
+ }
+ LLVM_FALLTHROUGH;
+ case tok::kw_while:
if (CurrentToken && CurrentToken->is(tok::l_paren)) {
next();
if (!parseParens(/*LookForDecls=*/true))
@@ -996,8 +1046,9 @@ private:
if (Style.isJavaScript()) {
// x.for and {for: ...}
if ((Tok->Previous && Tok->Previous->is(tok::period)) ||
- (Tok->Next && Tok->Next->is(tok::colon)))
+ (Tok->Next && Tok->Next->is(tok::colon))) {
break;
+ }
// JS' for await ( ...
if (CurrentToken && CurrentToken->is(Keywords.kw_await))
next();
@@ -1005,6 +1056,8 @@ private:
if (Style.isCpp() && CurrentToken && CurrentToken->is(tok::kw_co_await))
next();
Contexts.back().ColonIsForRangeExpr = true;
+ if (!CurrentToken || CurrentToken->isNot(tok::l_paren))
+ return false;
next();
if (!parseParens())
return false;
@@ -1026,10 +1079,12 @@ private:
return false;
if (Line.MustBeDeclaration && Contexts.size() == 1 &&
!Contexts.back().IsExpression && !Line.startsWith(TT_ObjCProperty) &&
- !Tok->is(TT_TypeDeclarationParen) &&
- (!Tok->Previous || !Tok->Previous->isOneOf(tok::kw___attribute,
- TT_LeadingJavaAnnotation)))
+ !Tok->isOneOf(TT_TypeDeclarationParen, TT_RequiresExpressionLParen) &&
+ (!Tok->Previous ||
+ !Tok->Previous->isOneOf(tok::kw___attribute,
+ TT_LeadingJavaAnnotation))) {
Line.MightBeFunctionDecl = true;
+ }
break;
case tok::l_square:
if (!parseSquare())
@@ -1083,20 +1138,23 @@ private:
break;
case tok::kw_operator:
if (Style.Language == FormatStyle::LK_TextProto ||
- Style.Language == FormatStyle::LK_Proto)
+ Style.Language == FormatStyle::LK_Proto) {
break;
+ }
while (CurrentToken &&
!CurrentToken->isOneOf(tok::l_paren, tok::semi, tok::r_paren)) {
if (CurrentToken->isOneOf(tok::star, tok::amp))
CurrentToken->setType(TT_PointerOrReference);
consumeToken();
if (CurrentToken && CurrentToken->is(tok::comma) &&
- CurrentToken->Previous->isNot(tok::kw_operator))
+ CurrentToken->Previous->isNot(tok::kw_operator)) {
break;
+ }
if (CurrentToken && CurrentToken->Previous->isOneOf(
TT_BinaryOperator, TT_UnaryOperator, tok::comma,
- tok::star, tok::arrow, tok::amp, tok::ampamp))
+ tok::star, tok::arrow, tok::amp, tok::ampamp)) {
CurrentToken->Previous->setType(TT_OverloadedOperator);
+ }
}
if (CurrentToken && CurrentToken->is(tok::l_paren))
CurrentToken->setType(TT_OverloadedOperatorLParen);
@@ -1117,8 +1175,9 @@ private:
// Declarations cannot be conditional expressions, this can only be part
// of a type declaration.
if (Line.MustBeDeclaration && !Contexts.back().IsExpression &&
- Style.isJavaScript())
+ Style.isJavaScript()) {
break;
+ }
if (Style.isCSharp()) {
// `Type?)`, `Type?>`, `Type? name;` and `Type? name =` can only be
// nullable types.
@@ -1137,16 +1196,22 @@ private:
parseTemplateDeclaration();
break;
case tok::comma:
- if (Contexts.back().InCtorInitializer)
+ switch (Contexts.back().ContextType) {
+ case Context::CtorInitializer:
Tok->setType(TT_CtorInitializerComma);
- else if (Contexts.back().InInheritanceList)
+ break;
+ case Context::InheritanceList:
Tok->setType(TT_InheritanceComma);
- else if (Contexts.back().FirstStartOfName &&
- (Contexts.size() == 1 || Line.startsWith(tok::kw_for))) {
- Contexts.back().FirstStartOfName->PartOfMultiVariableDeclStmt = true;
- Line.IsMultiVariableDeclStmt = true;
+ break;
+ default:
+ if (Contexts.back().FirstStartOfName &&
+ (Contexts.size() == 1 || startsWithInitStatement(Line))) {
+ Contexts.back().FirstStartOfName->PartOfMultiVariableDeclStmt = true;
+ Line.IsMultiVariableDeclStmt = true;
+ }
+ break;
}
- if (Contexts.back().IsForEachMacro)
+ if (Contexts.back().ContextType == Context::ForEachMacro)
Contexts.back().IsExpression = true;
break;
case tok::identifier:
@@ -1160,6 +1225,12 @@ private:
parseCSharpGenericTypeConstraint();
}
break;
+ case tok::arrow:
+ if (Tok->isNot(TT_LambdaArrow) && Tok->Previous &&
+ Tok->Previous->is(tok::kw_noexcept)) {
+ Tok->setType(TT_TrailingReturnArrow);
+ }
+ break;
default:
break;
}
@@ -1202,8 +1273,9 @@ private:
// Mark tokens up to the trailing line comments as implicit string
// literals.
if (CurrentToken->isNot(tok::comment) &&
- !CurrentToken->TokenText.startswith("//"))
+ !CurrentToken->TokenText.startswith("//")) {
CurrentToken->setType(TT_ImplicitStringLiteral);
+ }
next();
}
}
@@ -1223,9 +1295,10 @@ private:
void parsePragma() {
next(); // Consume "pragma".
if (CurrentToken &&
- CurrentToken->isOneOf(Keywords.kw_mark, Keywords.kw_option)) {
+ CurrentToken->isOneOf(Keywords.kw_mark, Keywords.kw_option,
+ Keywords.kw_region)) {
bool IsMark = CurrentToken->is(Keywords.kw_mark);
- next(); // Consume "mark".
+ next();
next(); // Consume first token (so we fix leading whitespace).
while (CurrentToken) {
if (IsMark || CurrentToken->Previous->is(TT_BinaryOperator))
@@ -1262,7 +1335,7 @@ private:
return LT_ImportStatement;
}
- if (CurrentToken->Tok.is(tok::numeric_constant)) {
+ if (CurrentToken->is(tok::numeric_constant)) {
CurrentToken->SpacesRequiredBefore = 1;
return Type;
}
@@ -1270,6 +1343,10 @@ private:
// sequence.
if (!CurrentToken->Tok.getIdentifierInfo())
return Type;
+ // In Verilog macro expansions start with a backtick just like preprocessor
+ // directives. Thus we stop if the word is not a preprocessor directive.
+ if (Style.isVerilog() && !Keywords.isVerilogPPDirective(*CurrentToken))
+ return LT_Invalid;
switch (CurrentToken->Tok.getIdentifierInfo()->getPPKeywordID()) {
case tok::pp_include:
case tok::pp_include_next:
@@ -1297,11 +1374,12 @@ private:
while (CurrentToken) {
FormatToken *Tok = CurrentToken;
next();
- if (Tok->is(tok::l_paren))
+ if (Tok->is(tok::l_paren)) {
parseParens();
- else if (Tok->isOneOf(Keywords.kw___has_include,
- Keywords.kw___has_include_next))
+ } else if (Tok->isOneOf(Keywords.kw___has_include,
+ Keywords.kw___has_include_next)) {
parseHasInclude();
+ }
}
return Type;
}
@@ -1311,8 +1389,14 @@ public:
if (!CurrentToken)
return LT_Invalid;
NonTemplateLess.clear();
- if (CurrentToken->is(tok::hash))
- return parsePreprocessorDirective();
+ if (CurrentToken->is(tok::hash)) {
+ // We were not yet allowed to use C++17 optional when this was being
+ // written. So we used LT_Invalid to mark that the line is not a
+ // preprocessor directive.
+ auto Type = parsePreprocessorDirective();
+ if (Type != LT_Invalid)
+ return Type;
+ }
// Directly allow to 'import <string-literal>' to support protocol buffer
// definitions (github.com/google/protobuf) or missing "#" (either way we
@@ -1367,8 +1451,9 @@ public:
// an import in this sense.
if (Line.First->is(tok::kw_export) &&
CurrentToken->is(Keywords.kw_from) && CurrentToken->Next &&
- CurrentToken->Next->isStringLiteral())
+ CurrentToken->Next->isStringLiteral()) {
ImportStatement = true;
+ }
if (isClosureImportStatement(*CurrentToken))
ImportStatement = true;
}
@@ -1381,17 +1466,16 @@ public:
return LT_ImportStatement;
if (Line.startsWith(TT_ObjCMethodSpecifier)) {
- if (Contexts.back().FirstObjCSelectorName)
+ if (Contexts.back().FirstObjCSelectorName) {
Contexts.back().FirstObjCSelectorName->LongestObjCSelectorName =
Contexts.back().LongestObjCSelectorName;
+ }
return LT_ObjCMethodDecl;
}
- for (const auto &ctx : Contexts) {
- if (ctx.InStructArrayInitializer) {
+ for (const auto &ctx : Contexts)
+ if (ctx.ContextType == Context::StructArrayInitializer)
return LT_ArrayOfStructInitializer;
- }
- }
return LT_Other;
}
@@ -1416,16 +1500,21 @@ private:
// Reset token type in case we have already looked at it and then
// recovered from an error (e.g. failure to find the matching >).
- if (!CurrentToken->isOneOf(
+ if (!CurrentToken->isTypeFinalized() &&
+ !CurrentToken->isOneOf(
TT_LambdaLSquare, TT_LambdaLBrace, TT_AttributeMacro, TT_IfMacro,
TT_ForEachMacro, TT_TypenameMacro, TT_FunctionLBrace,
TT_ImplicitStringLiteral, TT_InlineASMBrace, TT_FatArrow,
TT_LambdaArrow, TT_NamespaceMacro, TT_OverloadedOperator,
TT_RegexLiteral, TT_TemplateString, TT_ObjCStringLiteral,
- TT_UntouchableMacroFunc, TT_ConstraintJunctions,
- TT_StatementAttributeLikeMacro, TT_FunctionLikeOrFreestandingMacro,
- TT_RecordLBrace))
+ TT_UntouchableMacroFunc, TT_StatementAttributeLikeMacro,
+ TT_FunctionLikeOrFreestandingMacro, TT_ClassLBrace, TT_EnumLBrace,
+ TT_RecordLBrace, TT_StructLBrace, TT_UnionLBrace, TT_RequiresClause,
+ TT_RequiresClauseInARequiresExpression, TT_RequiresExpression,
+ TT_RequiresExpressionLParen, TT_RequiresExpressionLBrace,
+ TT_CompoundRequirementLBrace, TT_BracedListLBrace)) {
CurrentToken->setType(TT_Unknown);
+ }
CurrentToken->Role.reset();
CurrentToken->MatchingParen = nullptr;
CurrentToken->FakeLParens.clear();
@@ -1463,14 +1552,25 @@ private:
FormatToken *FirstObjCSelectorName = nullptr;
FormatToken *FirstStartOfName = nullptr;
bool CanBeExpression = true;
- bool InTemplateArgument = false;
- bool InCtorInitializer = false;
- bool InInheritanceList = false;
bool CaretFound = false;
- bool IsForEachMacro = false;
bool InCpp11AttributeSpecifier = false;
bool InCSharpAttributeSpecifier = false;
- bool InStructArrayInitializer = false;
+ enum {
+ Unknown,
+ // Like the part after `:` in a constructor.
+ // Context(...) : IsExpression(IsExpression)
+ CtorInitializer,
+ // Like in the parentheses in a foreach.
+ ForEachMacro,
+ // Like the inheritance list in a class declaration.
+ // class Input : public IO
+ InheritanceList,
+ // Like in the braced list.
+ // int x[] = {};
+ StructArrayInitializer,
+ // Like in `static_cast<int>`.
+ TemplateArgument,
+ } ContextType = Unknown;
};
/// Puts a new \c Context onto the stack \c Contexts for the lifetime
@@ -1488,9 +1588,9 @@ private:
~ScopedContextCreator() {
if (P.Style.AlignArrayOfStructures != FormatStyle::AIAS_None) {
- if (P.Contexts.back().InStructArrayInitializer) {
+ if (P.Contexts.back().ContextType == Context::StructArrayInitializer) {
P.Contexts.pop_back();
- P.Contexts.back().InStructArrayInitializer = true;
+ P.Contexts.back().ContextType = Context::StructArrayInitializer;
return;
}
}
@@ -1499,15 +1599,55 @@ private:
};
void modifyContext(const FormatToken &Current) {
- if (Current.getPrecedence() == prec::Assignment &&
- !Line.First->isOneOf(tok::kw_template, tok::kw_using, tok::kw_return) &&
- // Type aliases use `type X = ...;` in TypeScript and can be exported
- // using `export type ...`.
- !(Style.isJavaScript() &&
+ auto AssignmentStartsExpression = [&]() {
+ if (Current.getPrecedence() != prec::Assignment)
+ return false;
+
+ if (Line.First->isOneOf(tok::kw_using, tok::kw_return))
+ return false;
+ if (Line.First->is(tok::kw_template)) {
+ assert(Current.Previous);
+ if (Current.Previous->is(tok::kw_operator)) {
+ // `template ... operator=` cannot be an expression.
+ return false;
+ }
+
+ // `template` keyword can start a variable template.
+ const FormatToken *Tok = Line.First->getNextNonComment();
+ assert(Tok); // Current token is on the same line.
+ if (Tok->isNot(TT_TemplateOpener)) {
+ // Explicit template instantiations do not have `<>`.
+ return false;
+ }
+
+ Tok = Tok->MatchingParen;
+ if (!Tok)
+ return false;
+ Tok = Tok->getNextNonComment();
+ if (!Tok)
+ return false;
+
+ if (Tok->isOneOf(tok::kw_class, tok::kw_enum, tok::kw_concept,
+ tok::kw_struct, tok::kw_using)) {
+ return false;
+ }
+
+ return true;
+ }
+
+ // Type aliases use `type X = ...;` in TypeScript and can be exported
+ // using `export type ...`.
+ if (Style.isJavaScript() &&
(Line.startsWith(Keywords.kw_type, tok::identifier) ||
Line.startsWith(tok::kw_export, Keywords.kw_type,
- tok::identifier))) &&
- (!Current.Previous || Current.Previous->isNot(tok::kw_operator))) {
+ tok::identifier))) {
+ return false;
+ }
+
+ return !Current.Previous || Current.Previous->isNot(tok::kw_operator);
+ };
+
+ if (AssignmentStartsExpression()) {
Contexts.back().IsExpression = true;
if (!Line.startsWith(TT_UnaryOperator)) {
for (FormatToken *Previous = Current.Previous;
@@ -1523,8 +1663,9 @@ private:
break;
if (Previous->isOneOf(TT_BinaryOperator, TT_UnaryOperator) &&
Previous->isOneOf(tok::star, tok::amp, tok::ampamp) &&
- Previous->Previous && Previous->Previous->isNot(tok::equal))
+ Previous->Previous && Previous->Previous->isNot(tok::equal)) {
Previous->setType(TT_PointerOrReference);
+ }
}
}
} else if (Current.is(tok::lessless) &&
@@ -1539,16 +1680,19 @@ private:
} else if (Current.Previous &&
Current.Previous->is(TT_CtorInitializerColon)) {
Contexts.back().IsExpression = true;
- Contexts.back().InCtorInitializer = true;
+ Contexts.back().ContextType = Context::CtorInitializer;
} else if (Current.Previous && Current.Previous->is(TT_InheritanceColon)) {
- Contexts.back().InInheritanceList = true;
+ Contexts.back().ContextType = Context::InheritanceList;
} else if (Current.isOneOf(tok::r_paren, tok::greater, tok::comma)) {
for (FormatToken *Previous = Current.Previous;
Previous && Previous->isOneOf(tok::star, tok::amp);
- Previous = Previous->Previous)
+ Previous = Previous->Previous) {
Previous->setType(TT_PointerOrReference);
- if (Line.MustBeDeclaration && !Contexts.front().InCtorInitializer)
+ }
+ if (Line.MustBeDeclaration &&
+ Contexts.front().ContextType != Context::CtorInitializer) {
Contexts.back().IsExpression = false;
+ }
} else if (Current.is(tok::kw_new)) {
Contexts.back().CanBeExpression = false;
} else if (Current.is(tok::semi) ||
@@ -1615,12 +1759,14 @@ private:
FormatToken *PriorLeadingIdentifier = LeadingIdentifier->Previous;
// Skip back past explicit decoration
if (PriorLeadingIdentifier &&
- PriorLeadingIdentifier->is(tok::kw_explicit))
+ PriorLeadingIdentifier->is(tok::kw_explicit)) {
PriorLeadingIdentifier = PriorLeadingIdentifier->Previous;
+ }
- return (PriorLeadingIdentifier &&
- PriorLeadingIdentifier->is(TT_TemplateCloser) &&
- LeadingIdentifier->TokenText == Current.Next->TokenText);
+ return PriorLeadingIdentifier &&
+ (PriorLeadingIdentifier->is(TT_TemplateCloser) ||
+ PriorLeadingIdentifier->ClosesRequiresClause) &&
+ LeadingIdentifier->TokenText == Current.Next->TokenText;
}
}
}
@@ -1628,9 +1774,10 @@ private:
}
void determineTokenType(FormatToken &Current) {
- if (!Current.is(TT_Unknown))
+ if (!Current.is(TT_Unknown)) {
// The token type is already known.
return;
+ }
if ((Style.isJavaScript() || Style.isCSharp()) &&
Current.is(tok::exclaim)) {
@@ -1642,9 +1789,9 @@ private:
: Current.Previous->is(tok::identifier);
if (IsIdentifier ||
Current.Previous->isOneOf(
- tok::kw_namespace, tok::r_paren, tok::r_square, tok::r_brace,
- tok::kw_false, tok::kw_true, Keywords.kw_type, Keywords.kw_get,
- Keywords.kw_set) ||
+ tok::kw_default, tok::kw_namespace, tok::r_paren, tok::r_square,
+ tok::r_brace, tok::kw_false, tok::kw_true, Keywords.kw_type,
+ Keywords.kw_get, Keywords.kw_init, Keywords.kw_set) ||
Current.Previous->Tok.isLiteral()) {
Current.setType(TT_NonNullAssertion);
return;
@@ -1693,7 +1840,7 @@ private:
Current.setType(determineStarAmpUsage(
Current,
Contexts.back().CanBeExpression && Contexts.back().IsExpression,
- Contexts.back().InTemplateArgument));
+ Contexts.back().ContextType == Context::TemplateArgument));
} else if (Current.isOneOf(tok::minus, tok::plus, tok::caret)) {
Current.setType(determinePlusMinusCaretUsage(Current));
if (Current.is(TT_UnaryOperator) && Current.is(tok::caret))
@@ -1718,15 +1865,19 @@ private:
Current.setType(TT_BinaryOperator);
} else if (Current.is(tok::comment)) {
if (Current.TokenText.startswith("/*")) {
- if (Current.TokenText.endswith("*/"))
+ if (Current.TokenText.endswith("*/")) {
Current.setType(TT_BlockComment);
- else
+ } else {
// The lexer has for some reason determined a comment here. But we
// cannot really handle it, if it isn't properly terminated.
Current.Tok.setKind(tok::unknown);
+ }
} else {
Current.setType(TT_LineComment);
}
+ } else if (Current.is(tok::l_paren)) {
+ if (lParenStartsCppCast(Current))
+ Current.setType(TT_CppCastLParen);
} else if (Current.is(tok::r_paren)) {
if (rParenEndsCast(Current))
Current.setType(TT_CastRParen);
@@ -1734,19 +1885,22 @@ private:
!Current.Next->isBinaryOperator() &&
!Current.Next->isOneOf(tok::semi, tok::colon, tok::l_brace,
tok::comma, tok::period, tok::arrow,
- tok::coloncolon))
+ tok::coloncolon)) {
if (FormatToken *AfterParen = Current.MatchingParen->Next) {
// Make sure this isn't the return type of an Obj-C block declaration
- if (AfterParen->Tok.isNot(tok::caret)) {
- if (FormatToken *BeforeParen = Current.MatchingParen->Previous)
+ if (AfterParen->isNot(tok::caret)) {
+ if (FormatToken *BeforeParen = Current.MatchingParen->Previous) {
if (BeforeParen->is(tok::identifier) &&
!BeforeParen->is(TT_TypenameMacro) &&
BeforeParen->TokenText == BeforeParen->TokenText.upper() &&
(!BeforeParen->Previous ||
- BeforeParen->Previous->ClosesTemplateDeclaration))
+ BeforeParen->Previous->ClosesTemplateDeclaration)) {
Current.setType(TT_FunctionAnnotationRParen);
+ }
+ }
}
}
+ }
} else if (Current.is(tok::at) && Current.Next && !Style.isJavaScript() &&
Style.Language != FormatStyle::LK_Java) {
// In Java & JavaScript, "@..." is a decorator or annotation. In ObjC, it
@@ -1766,11 +1920,11 @@ private:
} else if (Current.is(tok::period)) {
FormatToken *PreviousNoComment = Current.getPreviousNonComment();
if (PreviousNoComment &&
- PreviousNoComment->isOneOf(tok::comma, tok::l_brace))
+ PreviousNoComment->isOneOf(tok::comma, tok::l_brace)) {
Current.setType(TT_DesignatedInitializerPeriod);
- else if (Style.Language == FormatStyle::LK_Java && Current.Previous &&
- Current.Previous->isOneOf(TT_JavaAnnotation,
- TT_LeadingJavaAnnotation)) {
+ } else if (Style.Language == FormatStyle::LK_Java && Current.Previous &&
+ Current.Previous->isOneOf(TT_JavaAnnotation,
+ TT_LeadingJavaAnnotation)) {
Current.setType(Current.Previous->getType());
}
} else if (canBeObjCSelectorComponent(Current) &&
@@ -1822,8 +1976,9 @@ private:
return false;
if (Tok.Previous->isOneOf(TT_LeadingJavaAnnotation, Keywords.kw_instanceof,
- Keywords.kw_as))
+ Keywords.kw_as)) {
return false;
+ }
if (Style.isJavaScript() && Tok.Previous->is(Keywords.kw_in))
return false;
@@ -1838,19 +1993,24 @@ private:
if (!PreviousNotConst)
return false;
+ if (PreviousNotConst->ClosesRequiresClause)
+ return false;
+
bool IsPPKeyword = PreviousNotConst->is(tok::identifier) &&
PreviousNotConst->Previous &&
PreviousNotConst->Previous->is(tok::hash);
- if (PreviousNotConst->is(TT_TemplateCloser))
+ if (PreviousNotConst->is(TT_TemplateCloser)) {
return PreviousNotConst && PreviousNotConst->MatchingParen &&
PreviousNotConst->MatchingParen->Previous &&
PreviousNotConst->MatchingParen->Previous->isNot(tok::period) &&
PreviousNotConst->MatchingParen->Previous->isNot(tok::kw_template);
+ }
if (PreviousNotConst->is(tok::r_paren) &&
- PreviousNotConst->is(TT_TypeDeclarationParen))
+ PreviousNotConst->is(TT_TypeDeclarationParen)) {
return true;
+ }
// If is a preprocess keyword like #define.
if (IsPPKeyword)
@@ -1869,15 +2029,37 @@ private:
return true;
// const a = in JavaScript.
- return (Style.isJavaScript() && PreviousNotConst->is(tok::kw_const));
+ return Style.isJavaScript() && PreviousNotConst->is(tok::kw_const);
+ }
+
+ /// Determine whether '(' is starting a C++ cast.
+ bool lParenStartsCppCast(const FormatToken &Tok) {
+ // C-style casts are only used in C++.
+ if (!Style.isCpp())
+ return false;
+
+ FormatToken *LeftOfParens = Tok.getPreviousNonComment();
+ if (LeftOfParens && LeftOfParens->is(TT_TemplateCloser) &&
+ LeftOfParens->MatchingParen) {
+ auto *Prev = LeftOfParens->MatchingParen->getPreviousNonComment();
+ if (Prev &&
+ Prev->isOneOf(tok::kw_const_cast, tok::kw_dynamic_cast,
+ tok::kw_reinterpret_cast, tok::kw_static_cast)) {
+ // FIXME: Maybe we should handle identifiers ending with "_cast",
+ // e.g. any_cast?
+ return true;
+ }
+ }
+ return false;
}
/// Determine whether ')' is ending a cast.
bool rParenEndsCast(const FormatToken &Tok) {
// C-style casts are only used in C++, C# and Java.
if (!Style.isCSharp() && !Style.isCpp() &&
- Style.Language != FormatStyle::LK_Java)
+ Style.Language != FormatStyle::LK_Java) {
return false;
+ }
// Empty parens aren't casts and there are no casts at the end of the line.
if (Tok.Previous == Tok.MatchingParen || !Tok.Next || !Tok.MatchingParen)
@@ -1890,30 +2072,53 @@ private:
if (LeftOfParens->is(tok::r_paren) &&
LeftOfParens->isNot(TT_CastRParen)) {
if (!LeftOfParens->MatchingParen ||
- !LeftOfParens->MatchingParen->Previous)
+ !LeftOfParens->MatchingParen->Previous) {
return false;
+ }
LeftOfParens = LeftOfParens->MatchingParen->Previous;
}
+ if (LeftOfParens->is(tok::r_square)) {
+ // delete[] (void *)ptr;
+ auto MayBeArrayDelete = [](FormatToken *Tok) -> FormatToken * {
+ if (Tok->isNot(tok::r_square))
+ return nullptr;
+
+ Tok = Tok->getPreviousNonComment();
+ if (!Tok || Tok->isNot(tok::l_square))
+ return nullptr;
+
+ Tok = Tok->getPreviousNonComment();
+ if (!Tok || Tok->isNot(tok::kw_delete))
+ return nullptr;
+ return Tok;
+ };
+ if (FormatToken *MaybeDelete = MayBeArrayDelete(LeftOfParens))
+ LeftOfParens = MaybeDelete;
+ }
+
// The Condition directly below this one will see the operator arguments
// as a (void *foo) cast.
// void operator delete(void *foo) ATTRIB;
if (LeftOfParens->Tok.getIdentifierInfo() && LeftOfParens->Previous &&
- LeftOfParens->Previous->is(tok::kw_operator))
+ LeftOfParens->Previous->is(tok::kw_operator)) {
return false;
+ }
// If there is an identifier (or with a few exceptions a keyword) right
// before the parentheses, this is unlikely to be a cast.
if (LeftOfParens->Tok.getIdentifierInfo() &&
!LeftOfParens->isOneOf(Keywords.kw_in, tok::kw_return, tok::kw_case,
- tok::kw_delete))
+ tok::kw_delete)) {
return false;
+ }
// Certain other tokens right before the parentheses are also signals that
// this cannot be a cast.
if (LeftOfParens->isOneOf(tok::at, tok::r_square, TT_OverloadedOperator,
- TT_TemplateCloser, tok::ellipsis))
+ TT_TemplateCloser, tok::ellipsis)) {
return false;
+ }
}
if (Tok.Next->is(tok::question))
@@ -1928,8 +2133,9 @@ private:
if (Tok.Next->isOneOf(tok::kw_noexcept, tok::kw_volatile, tok::kw_const,
tok::kw_requires, tok::kw_throw, tok::arrow,
Keywords.kw_override, Keywords.kw_final) ||
- isCpp11AttributeSpecifier(*Tok.Next))
+ isCpp11AttributeSpecifier(*Tok.Next)) {
return false;
+ }
// As Java has no function types, a "(" after the ")" likely means that this
// is a cast.
@@ -1939,8 +2145,9 @@ private:
// If a (non-string) literal follows, this is likely a cast.
if (Tok.Next->isNot(tok::string_literal) &&
(Tok.Next->Tok.isLiteral() ||
- Tok.Next->isOneOf(tok::kw_sizeof, tok::kw_alignof)))
+ Tok.Next->isOneOf(tok::kw_sizeof, tok::kw_alignof))) {
return true;
+ }
// Heuristically try to determine whether the parentheses contain a type.
auto IsQualifiedPointerOrReference = [](FormatToken *T) {
@@ -1989,9 +2196,10 @@ private:
// Certain token types inside the parentheses mean that this can't be a
// cast.
for (const FormatToken *Token = Tok.MatchingParen->Next; Token != &Tok;
- Token = Token->Next)
+ Token = Token->Next) {
if (Token->is(TT_BinaryOperator))
return false;
+ }
// If the following token is an identifier or 'this', this is a cast. All
// cases where this can be something else are handled above.
@@ -2001,8 +2209,9 @@ private:
// Look for a cast `( x ) (`.
if (Tok.Next->is(tok::l_paren) && Tok.Previous && Tok.Previous->Previous) {
if (Tok.Previous->is(tok::identifier) &&
- Tok.Previous->Previous->is(tok::l_paren))
+ Tok.Previous->Previous->is(tok::l_paren)) {
return true;
+ }
}
if (!Tok.Next->Next)
@@ -2014,8 +2223,9 @@ private:
bool NextIsUnary =
Tok.Next->isUnaryOperator() || Tok.Next->isOneOf(tok::amp, tok::star);
if (!NextIsUnary || Tok.Next->is(tok::plus) ||
- !Tok.Next->Next->isOneOf(tok::identifier, tok::numeric_constant))
+ !Tok.Next->Next->isOneOf(tok::identifier, tok::numeric_constant)) {
return false;
+ }
// Search for unexpected tokens.
for (FormatToken *Prev = Tok.Previous; Prev != Tok.MatchingParen;
Prev = Prev->Previous) {
@@ -2025,6 +2235,43 @@ private:
return true;
}
+ /// Returns true if the token is used as a unary operator.
+ bool determineUnaryOperatorByUsage(const FormatToken &Tok) {
+ const FormatToken *PrevToken = Tok.getPreviousNonComment();
+ if (!PrevToken)
+ return true;
+
+ // These keywords are deliberately not included here because they may
+ // precede only one of unary star/amp and plus/minus but not both. They are
+ // either included in determineStarAmpUsage or determinePlusMinusCaretUsage.
+ //
+ // @ - It may be followed by a unary `-` in Objective-C literals. We don't
+ // know how they can be followed by a star or amp.
+ if (PrevToken->isOneOf(
+ TT_ConditionalExpr, tok::l_paren, tok::comma, tok::colon, tok::semi,
+ tok::equal, tok::question, tok::l_square, tok::l_brace,
+ tok::kw_case, tok::kw_co_await, tok::kw_co_return, tok::kw_co_yield,
+ tok::kw_delete, tok::kw_return, tok::kw_throw)) {
+ return true;
+ }
+
+ // We put sizeof here instead of only in determineStarAmpUsage. In the cases
+ // where the unary `+` operator is overloaded, it is reasonable to write
+ // things like `sizeof +x`. Like commit 446d6ec996c6c3.
+ if (PrevToken->is(tok::kw_sizeof))
+ return true;
+
+ // A sequence of leading unary operators.
+ if (PrevToken->isOneOf(TT_CastRParen, TT_UnaryOperator))
+ return true;
+
+ // There can't be two consecutive binary operators.
+ if (PrevToken->is(TT_BinaryOperator))
+ return true;
+
+ return false;
+ }
+
/// Return the type of the given token assuming it is * or &.
TokenType determineStarAmpUsage(const FormatToken &Tok, bool IsExpression,
bool InTemplateArgument) {
@@ -2040,11 +2287,16 @@ private:
return TT_UnaryOperator;
const FormatToken *NextToken = Tok.getNextNonComment();
+
+ if (InTemplateArgument && NextToken && NextToken->is(tok::kw_noexcept))
+ return TT_BinaryOperator;
+
if (!NextToken ||
NextToken->isOneOf(tok::arrow, tok::equal, tok::kw_noexcept) ||
NextToken->canBePointerOrReferenceQualifier() ||
- (NextToken->is(tok::l_brace) && !NextToken->getNextNonComment()))
+ (NextToken->is(tok::l_brace) && !NextToken->getNextNonComment())) {
return TT_PointerOrReference;
+ }
if (PrevToken->is(tok::coloncolon))
return TT_PointerOrReference;
@@ -2052,12 +2304,7 @@ private:
if (PrevToken->is(tok::r_paren) && PrevToken->is(TT_TypeDeclarationParen))
return TT_PointerOrReference;
- if (PrevToken->isOneOf(tok::l_paren, tok::l_square, tok::l_brace,
- tok::comma, tok::semi, tok::kw_return, tok::colon,
- tok::kw_co_return, tok::kw_co_await,
- tok::kw_co_yield, tok::equal, tok::kw_delete,
- tok::kw_sizeof, tok::kw_throw, TT_BinaryOperator,
- TT_ConditionalExpr, TT_UnaryOperator, TT_CastRParen))
+ if (determineUnaryOperatorByUsage(Tok))
return TT_UnaryOperator;
if (NextToken->is(tok::l_square) && NextToken->isNot(TT_LambdaLSquare))
@@ -2067,16 +2314,50 @@ private:
if (NextToken->isOneOf(tok::comma, tok::semi))
return TT_PointerOrReference;
+ // After right braces, star tokens are likely to be pointers to struct,
+ // union, or class.
+ // struct {} *ptr;
+ if (PrevToken->is(tok::r_brace) && Tok.is(tok::star))
+ return TT_PointerOrReference;
+
+ // For "} &&"
+ if (PrevToken->is(tok::r_brace) && Tok.is(tok::ampamp)) {
+ const FormatToken *MatchingLBrace = PrevToken->MatchingParen;
+
+ // We check whether there is a TemplateCloser(">") to indicate it's a
+ // template or not. If it's not a template, "&&" is likely a reference
+ // operator.
+ // struct {} &&ref = {};
+ if (!MatchingLBrace)
+ return TT_PointerOrReference;
+ FormatToken *BeforeLBrace = MatchingLBrace->getPreviousNonComment();
+ if (!BeforeLBrace || BeforeLBrace->isNot(TT_TemplateCloser))
+ return TT_PointerOrReference;
+
+ // If it is a template, "&&" is a binary operator.
+ // enable_if<>{} && ...
+ return TT_BinaryOperator;
+ }
+
if (PrevToken->Tok.isLiteral() ||
PrevToken->isOneOf(tok::r_paren, tok::r_square, tok::kw_true,
- tok::kw_false, tok::r_brace) ||
- NextToken->Tok.isLiteral() ||
- NextToken->isOneOf(tok::kw_true, tok::kw_false) ||
- NextToken->isUnaryOperator() ||
- // If we know we're in a template argument, there are no named
- // declarations. Thus, having an identifier on the right-hand side
- // indicates a binary operator.
- (InTemplateArgument && NextToken->Tok.isAnyIdentifier()))
+ tok::kw_false, tok::r_brace)) {
+ return TT_BinaryOperator;
+ }
+
+ const FormatToken *NextNonParen = NextToken;
+ while (NextNonParen && NextNonParen->is(tok::l_paren))
+ NextNonParen = NextNonParen->getNextNonComment();
+ if (NextNonParen && (NextNonParen->Tok.isLiteral() ||
+ NextNonParen->isOneOf(tok::kw_true, tok::kw_false) ||
+ NextNonParen->isUnaryOperator())) {
+ return TT_BinaryOperator;
+ }
+
+ // If we know we're in a template argument, there are no named declarations.
+ // Thus, having an identifier on the right-hand side indicates a binary
+ // operator.
+ if (InTemplateArgument && NextToken->Tok.isAnyIdentifier())
return TT_BinaryOperator;
// "&&(" is quite unlikely to be two successive unary "&".
@@ -2100,23 +2381,14 @@ private:
}
TokenType determinePlusMinusCaretUsage(const FormatToken &Tok) {
- const FormatToken *PrevToken = Tok.getPreviousNonComment();
- if (!PrevToken)
- return TT_UnaryOperator;
-
- if (PrevToken->isOneOf(TT_CastRParen, TT_UnaryOperator))
- // This must be a sequence of leading unary operators.
+ if (determineUnaryOperatorByUsage(Tok))
return TT_UnaryOperator;
- // Use heuristics to recognize unary operators.
- if (PrevToken->isOneOf(tok::equal, tok::l_paren, tok::comma, tok::l_square,
- tok::question, tok::colon, tok::kw_return,
- tok::kw_case, tok::at, tok::l_brace, tok::kw_throw,
- tok::kw_co_return, tok::kw_co_yield))
+ const FormatToken *PrevToken = Tok.getPreviousNonComment();
+ if (!PrevToken)
return TT_UnaryOperator;
- // There can't be two consecutive binary operators.
- if (PrevToken->is(TT_BinaryOperator))
+ if (PrevToken->is(tok::at))
return TT_UnaryOperator;
// Fall back to marking the token as binary operator.
@@ -2158,7 +2430,7 @@ class ExpressionParser {
public:
ExpressionParser(const FormatStyle &Style, const AdditionalKeywords &Keywords,
AnnotatedLine &Line)
- : Style(Style), Keywords(Keywords), Current(Line.First) {}
+ : Style(Style), Keywords(Keywords), Line(Line), Current(Line.First) {}
/// Parse expressions with the given operator precedence.
void parse(int Precedence = 0) {
@@ -2166,8 +2438,9 @@ public:
// expression.
while (Current && (Current->is(tok::kw_return) ||
(Current->is(tok::colon) &&
- Current->isOneOf(TT_ObjCMethodExpr, TT_DictLiteral))))
+ Current->isOneOf(TT_ObjCMethodExpr, TT_DictLiteral)))) {
next();
+ }
if (!Current || Precedence > PrecedenceArrowAndPeriod)
return;
@@ -2214,7 +2487,11 @@ public:
}
// Consume scopes: (), [], <> and {}
- if (Current->opensScope()) {
+ // In addition to that we handle require clauses as scope, so that the
+ // constraints in that are correctly indented.
+ if (Current->opensScope() ||
+ Current->isOneOf(TT_RequiresClause,
+ TT_RequiresClauseInARequiresExpression)) {
// In fragment of a JavaScript template string can look like '}..${' and
// thus close a scope and open a new one at the same time.
while (Current && (!Current->closesScope() || Current->opensScope())) {
@@ -2236,12 +2513,26 @@ public:
}
if (LatestOperator && (Current || Precedence > 0)) {
- // LatestOperator->LastOperator = true;
+ // The requires clauses do not neccessarily end in a semicolon or a brace,
+ // but just go over to struct/class or a function declaration, we need to
+ // intervene so that the fake right paren is inserted correctly.
+ auto End =
+ (Start->Previous &&
+ Start->Previous->isOneOf(TT_RequiresClause,
+ TT_RequiresClauseInARequiresExpression))
+ ? [this](){
+ auto Ret = Current ? Current : Line.Last;
+ while (!Ret->ClosesRequiresClause && Ret->Previous)
+ Ret = Ret->Previous;
+ return Ret;
+ }()
+ : nullptr;
+
if (Precedence == PrecedenceArrowAndPeriod) {
// Call expressions don't have a binary operator precedence.
- addFakeParenthesis(Start, prec::Unknown);
+ addFakeParenthesis(Start, prec::Unknown, End);
} else {
- addFakeParenthesis(Start, prec::Level(Precedence));
+ addFakeParenthesis(Start, prec::Level(Precedence), End);
}
}
}
@@ -2258,8 +2549,9 @@ private:
(NextNonComment->isOneOf(TT_DictLiteral, TT_JsTypeColon) ||
((Style.Language == FormatStyle::LK_Proto ||
Style.Language == FormatStyle::LK_TextProto) &&
- NextNonComment->is(tok::less))))
+ NextNonComment->is(tok::less)))) {
return prec::Assignment;
+ }
if (Current->is(TT_JsComputedPropertyName))
return prec::Assignment;
if (Current->is(TT_LambdaArrow))
@@ -2268,39 +2560,43 @@ private:
return prec::Assignment;
if (Current->isOneOf(tok::semi, TT_InlineASMColon, TT_SelectorName) ||
(Current->is(tok::comment) && NextNonComment &&
- NextNonComment->is(TT_SelectorName)))
+ NextNonComment->is(TT_SelectorName))) {
return 0;
+ }
if (Current->is(TT_RangeBasedForLoopColon))
return prec::Comma;
if ((Style.Language == FormatStyle::LK_Java || Style.isJavaScript()) &&
- Current->is(Keywords.kw_instanceof))
+ Current->is(Keywords.kw_instanceof)) {
return prec::Relational;
+ }
if (Style.isJavaScript() &&
- Current->isOneOf(Keywords.kw_in, Keywords.kw_as))
+ Current->isOneOf(Keywords.kw_in, Keywords.kw_as)) {
return prec::Relational;
+ }
if (Current->is(TT_BinaryOperator) || Current->is(tok::comma))
return Current->getPrecedence();
if (Current->isOneOf(tok::period, tok::arrow))
return PrecedenceArrowAndPeriod;
if ((Style.Language == FormatStyle::LK_Java || Style.isJavaScript()) &&
Current->isOneOf(Keywords.kw_extends, Keywords.kw_implements,
- Keywords.kw_throws))
+ Keywords.kw_throws)) {
return 0;
+ }
}
return -1;
}
- void addFakeParenthesis(FormatToken *Start, prec::Level Precedence) {
+ void addFakeParenthesis(FormatToken *Start, prec::Level Precedence,
+ FormatToken *End = nullptr) {
Start->FakeLParens.push_back(Precedence);
if (Precedence > prec::Unknown)
Start->StartsBinaryExpression = true;
- if (Current) {
- FormatToken *Previous = Current->Previous;
- while (Previous->is(tok::comment) && Previous->Previous)
- Previous = Previous->Previous;
- ++Previous->FakeRParens;
+ if (!End && Current)
+ End = Current->getPreviousNonComment();
+ if (End) {
+ ++End->FakeRParens;
if (Precedence > prec::Unknown)
- Previous->EndsBinaryExpression = true;
+ End->EndsBinaryExpression = true;
}
}
@@ -2313,15 +2609,15 @@ private:
next();
}
parse(PrecedenceArrowAndPeriod);
- for (FormatToken *Token : llvm::reverse(Tokens))
+ for (FormatToken *Token : llvm::reverse(Tokens)) {
// The actual precedence doesn't matter.
addFakeParenthesis(Token, prec::Unknown);
+ }
}
void parseConditionalExpr() {
- while (Current && Current->isTrailingComment()) {
+ while (Current && Current->isTrailingComment())
next();
- }
FormatToken *Start = Current;
parse(prec::LogicalOr);
if (!Current || !Current->is(tok::question))
@@ -2340,33 +2636,28 @@ private:
Current = Current->Next;
while (Current &&
(Current->NewlinesBefore == 0 || SkipPastLeadingComments) &&
- Current->isTrailingComment())
+ Current->isTrailingComment()) {
Current = Current->Next;
+ }
}
const FormatStyle &Style;
const AdditionalKeywords &Keywords;
+ const AnnotatedLine &Line;
FormatToken *Current;
};
} // end anonymous namespace
void TokenAnnotator::setCommentLineLevels(
- SmallVectorImpl<AnnotatedLine *> &Lines) {
+ SmallVectorImpl<AnnotatedLine *> &Lines) const {
const AnnotatedLine *NextNonCommentLine = nullptr;
for (AnnotatedLine *Line : llvm::reverse(Lines)) {
assert(Line->First);
- bool CommentLine = true;
- for (const FormatToken *Tok = Line->First; Tok; Tok = Tok->Next) {
- if (!Tok->is(tok::comment)) {
- CommentLine = false;
- break;
- }
- }
// If the comment is currently aligned with the line immediately following
// it, that's probably intentional and we should keep it.
- if (NextNonCommentLine && CommentLine &&
+ if (NextNonCommentLine && Line->isComment() &&
NextNonCommentLine->First->NewlinesBefore <= 1 &&
NextNonCommentLine->First->OriginalColumn ==
Line->First->OriginalColumn) {
@@ -2394,7 +2685,7 @@ static unsigned maxNestingDepth(const AnnotatedLine &Line) {
return Result;
}
-void TokenAnnotator::annotate(AnnotatedLine &Line) {
+void TokenAnnotator::annotate(AnnotatedLine &Line) const {
for (auto &Child : Line.Children)
annotate(*Child);
@@ -2438,8 +2729,9 @@ static bool isFunctionDeclarationName(bool IsCpp, const FormatToken &Current,
if (Next->isOneOf(tok::kw_new, tok::kw_delete)) {
// For 'new[]' and 'delete[]'.
if (Next->Next &&
- Next->Next->startsSequence(tok::l_square, tok::r_square))
+ Next->Next->startsSequence(tok::l_square, tok::r_square)) {
Next = Next->Next->Next;
+ }
continue;
}
if (Next->startsSequence(tok::l_square, tok::r_square)) {
@@ -2503,8 +2795,9 @@ static bool isFunctionDeclarationName(bool IsCpp, const FormatToken &Current,
return true; // Empty parentheses.
// If there is an &/&& after the r_paren, this is likely a function.
if (Next->MatchingParen->Next &&
- Next->MatchingParen->Next->is(TT_PointerOrReference))
+ Next->MatchingParen->Next->is(TT_PointerOrReference)) {
return true;
+ }
// Check for K&R C function definitions (and C++ function definitions with
// unnamed parameters), e.g.:
@@ -2517,8 +2810,9 @@ static bool isFunctionDeclarationName(bool IsCpp, const FormatToken &Current,
// return !b;
// }
if (IsCpp && Next->Next && Next->Next->is(tok::identifier) &&
- !Line.endsWith(tok::semi))
+ !Line.endsWith(tok::semi)) {
return true;
+ }
for (const FormatToken *Tok = Next->Next; Tok && Tok != Next->MatchingParen;
Tok = Tok->Next) {
@@ -2529,11 +2823,13 @@ static bool isFunctionDeclarationName(bool IsCpp, const FormatToken &Current,
continue;
}
if (Tok->is(tok::kw_const) || Tok->isSimpleTypeSpecifier() ||
- Tok->isOneOf(TT_PointerOrReference, TT_StartOfName, tok::ellipsis))
+ Tok->isOneOf(TT_PointerOrReference, TT_StartOfName, tok::ellipsis)) {
return true;
+ }
if (Tok->isOneOf(tok::l_brace, tok::string_literal, TT_ObjCMethodExpr) ||
- Tok->Tok.isLiteral())
+ Tok->Tok.isLiteral()) {
return false;
+ }
}
return false;
}
@@ -2544,8 +2840,9 @@ bool TokenAnnotator::mustBreakForReturnType(const AnnotatedLine &Line) const {
if ((Style.AlwaysBreakAfterReturnType == FormatStyle::RTBS_TopLevel ||
Style.AlwaysBreakAfterReturnType ==
FormatStyle::RTBS_TopLevelDefinitions) &&
- Line.Level > 0)
+ Line.Level > 0) {
return false;
+ }
switch (Style.AlwaysBreakAfterReturnType) {
case FormatStyle::RTBS_None:
@@ -2561,7 +2858,7 @@ bool TokenAnnotator::mustBreakForReturnType(const AnnotatedLine &Line) const {
return false;
}
-void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) {
+void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) const {
for (AnnotatedLine *ChildLine : Line.Children)
calculateFormattingInformation(*ChildLine);
@@ -2581,11 +2878,12 @@ void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) {
Current->setType(TT_FunctionDeclarationName);
const FormatToken *Prev = Current->Previous;
if (Current->is(TT_LineComment)) {
- if (Prev->is(BK_BracedInit) && Prev->opensScope())
+ if (Prev->is(BK_BracedInit) && Prev->opensScope()) {
Current->SpacesRequiredBefore =
(Style.Cpp11BracedListStyle && !Style.SpacesInParentheses) ? 0 : 1;
- else
+ } else {
Current->SpacesRequiredBefore = Style.SpacesBeforeTrailingComments;
+ }
// If we find a trailing comment, iterate backwards to determine whether
// it seems to relate to a specific parameter. If so, break before that
@@ -2601,8 +2899,9 @@ void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) {
break;
if (Parameter->Previous && Parameter->Previous->is(tok::comma)) {
if (!Parameter->Previous->is(TT_CtorInitializerComma) &&
- Parameter->HasUnescapedNewline)
+ Parameter->HasUnescapedNewline) {
Parameter->MustBreakBefore = true;
+ }
break;
}
}
@@ -2612,12 +2911,17 @@ void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) {
Current->SpacesRequiredBefore = 1;
}
- Current->MustBreakBefore =
- Current->MustBreakBefore || mustBreakBefore(Line, *Current);
-
- if (!Current->MustBreakBefore && InFunctionDecl &&
- Current->is(TT_FunctionDeclarationName))
- Current->MustBreakBefore = mustBreakForReturnType(Line);
+ const auto &Children = Prev->Children;
+ if (!Children.empty() && Children.back()->Last->is(TT_LineComment)) {
+ Current->MustBreakBefore = true;
+ } else {
+ Current->MustBreakBefore =
+ Current->MustBreakBefore || mustBreakBefore(Line, *Current);
+ if (!Current->MustBreakBefore && InFunctionDecl &&
+ Current->is(TT_FunctionDeclarationName)) {
+ Current->MustBreakBefore = mustBreakForReturnType(Line);
+ }
+ }
Current->CanBreakBefore =
Current->MustBreakBefore || canBreakBefore(Line, *Current);
@@ -2630,11 +2934,12 @@ void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) {
if (Current->MustBreakBefore || Prev->Children.size() > 1 ||
(Prev->Children.size() == 1 &&
Prev->Children[0]->First->MustBreakBefore) ||
- Current->IsMultiline)
+ Current->IsMultiline) {
Current->TotalLength = Prev->TotalLength + Style.ColumnLimit;
- else
+ } else {
Current->TotalLength = Prev->TotalLength + Current->ColumnWidth +
ChildSize + Current->SpacesRequiredBefore;
+ }
if (Current->is(TT_CtorInitializerColon))
InFunctionDecl = false;
@@ -2678,7 +2983,8 @@ void TokenAnnotator::calculateFormattingInformation(AnnotatedLine &Line) {
LLVM_DEBUG({ printDebugInfo(Line); });
}
-void TokenAnnotator::calculateUnbreakableTailLengths(AnnotatedLine &Line) {
+void TokenAnnotator::calculateUnbreakableTailLengths(
+ AnnotatedLine &Line) const {
unsigned UnbreakableTailLength = 0;
FormatToken *Current = Line.Last;
while (Current) {
@@ -2694,10 +3000,10 @@ void TokenAnnotator::calculateUnbreakableTailLengths(AnnotatedLine &Line) {
}
}
-void TokenAnnotator::calculateArrayInitializerColumnList(AnnotatedLine &Line) {
- if (Line.First == Line.Last) {
+void TokenAnnotator::calculateArrayInitializerColumnList(
+ AnnotatedLine &Line) const {
+ if (Line.First == Line.Last)
return;
- }
auto *CurrentToken = Line.First;
CurrentToken->ArrayInitializerLineStart = true;
unsigned Depth = 0;
@@ -2715,7 +3021,7 @@ void TokenAnnotator::calculateArrayInitializerColumnList(AnnotatedLine &Line) {
}
FormatToken *TokenAnnotator::calculateInitializerColumnList(
- AnnotatedLine &Line, FormatToken *CurrentToken, unsigned Depth) {
+ AnnotatedLine &Line, FormatToken *CurrentToken, unsigned Depth) const {
while (CurrentToken != nullptr && CurrentToken != Line.Last) {
if (CurrentToken->is(tok::l_brace))
++Depth;
@@ -2735,7 +3041,7 @@ FormatToken *TokenAnnotator::calculateInitializerColumnList(
unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
const FormatToken &Tok,
- bool InFunctionDecl) {
+ bool InFunctionDecl) const {
const FormatToken &Left = *Tok.Previous;
const FormatToken &Right = Tok;
@@ -2755,8 +3061,9 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
if (Left.is(TT_JsTypeColon))
return 35;
if ((Left.is(TT_TemplateString) && Left.TokenText.endswith("${")) ||
- (Right.is(TT_TemplateString) && Right.TokenText.startswith("}")))
+ (Right.is(TT_TemplateString) && Right.TokenText.startswith("}"))) {
return 100;
+ }
// Prefer breaking call chains (".foo") over empty "{}", "[]" or "()".
if (Left.opensScope() && Right.closesScope())
return 200;
@@ -2774,13 +3081,15 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
return 35;
if (!Right.isOneOf(TT_ObjCMethodExpr, TT_LambdaLSquare,
TT_ArrayInitializerLSquare,
- TT_DesignatedInitializerLSquare, TT_AttributeSquare))
+ TT_DesignatedInitializerLSquare, TT_AttributeSquare)) {
return 500;
+ }
}
if (Left.is(tok::coloncolon) ||
- (Right.is(tok::period) && Style.Language == FormatStyle::LK_Proto))
+ (Right.is(tok::period) && Style.Language == FormatStyle::LK_Proto)) {
return 500;
+ }
if (Right.isOneOf(TT_StartOfName, TT_FunctionDeclarationName) ||
Right.is(tok::kw_operator)) {
if (Line.startsWith(tok::kw_for) && Right.PartOfMultiVariableDeclStmt)
@@ -2805,8 +3114,9 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
return 1000;
if (Left.isOneOf(TT_RangeBasedForLoopColon, TT_InheritanceColon,
- TT_CtorInitializerColon))
+ TT_CtorInitializerColon)) {
return 2;
+ }
if (Right.isMemberAccess()) {
// Breaking before the "./->" of a chained call/member access is reasonably
@@ -2863,17 +3173,20 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
// open paren (we'll prefer breaking after the protocol list's opening
// angle bracket, if present).
if (Line.Type == LT_ObjCDecl && Left.is(tok::l_paren) && Left.Previous &&
- Left.Previous->isOneOf(tok::identifier, tok::greater))
+ Left.Previous->isOneOf(tok::identifier, tok::greater)) {
return 500;
+ }
if (Left.is(tok::l_paren) && Style.PenaltyBreakOpenParenthesis != 0)
return Style.PenaltyBreakOpenParenthesis;
if (Left.is(tok::l_paren) && InFunctionDecl &&
- Style.AlignAfterOpenBracket != FormatStyle::BAS_DontAlign)
+ Style.AlignAfterOpenBracket != FormatStyle::BAS_DontAlign) {
return 100;
+ }
if (Left.is(tok::l_paren) && Left.Previous &&
- (Left.Previous->is(tok::kw_for) || Left.Previous->isIf()))
+ (Left.Previous->is(tok::kw_for) || Left.Previous->isIf())) {
return 1000;
+ }
if (Left.is(tok::equal) && InFunctionDecl)
return 110;
if (Right.is(tok::r_brace))
@@ -2885,8 +3198,9 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
// here unless the style does not want us to place all arguments on the
// next line.
if (Style.AlignAfterOpenBracket == FormatStyle::BAS_DontAlign &&
- (Left.ParameterCount <= 1 || Style.AllowAllArgumentsOnNextLine))
+ (Left.ParameterCount <= 1 || Style.AllowAllArgumentsOnNextLine)) {
return 0;
+ }
if (Left.is(tok::l_brace) && !Style.Cpp11BracedListStyle)
return 19;
return Left.ParameterCount > 1 ? Style.PenaltyBreakBeforeFirstCallParameter
@@ -2899,25 +3213,31 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
return 60;
if (Left.isOneOf(tok::plus, tok::comma) && Left.Previous &&
Left.Previous->isLabelString() &&
- (Left.NextOperator || Left.OperatorIndex != 0))
+ (Left.NextOperator || Left.OperatorIndex != 0)) {
return 50;
+ }
if (Right.is(tok::plus) && Left.isLabelString() &&
- (Right.NextOperator || Right.OperatorIndex != 0))
+ (Right.NextOperator || Right.OperatorIndex != 0)) {
return 25;
+ }
if (Left.is(tok::comma))
return 1;
if (Right.is(tok::lessless) && Left.isLabelString() &&
- (Right.NextOperator || Right.OperatorIndex != 1))
+ (Right.NextOperator || Right.OperatorIndex != 1)) {
return 25;
+ }
if (Right.is(tok::lessless)) {
// Breaking at a << is really cheap.
- if (!Left.is(tok::r_paren) || Right.OperatorIndex > 0)
+ if (!Left.is(tok::r_paren) || Right.OperatorIndex > 0) {
// Slightly prefer to break before the first one in log-like statements.
return 2;
+ }
return 1;
}
if (Left.ClosesTemplateDeclaration)
return Style.PenaltyBreakTemplateDeclaration;
+ if (Left.ClosesRequiresClause)
+ return 0;
if (Left.is(TT_ConditionalExpr))
return prec::Conditional;
prec::Level Level = Left.getPrecedence();
@@ -2935,42 +3255,50 @@ bool TokenAnnotator::spaceRequiredBeforeParens(const FormatToken &Right) const {
if (Style.SpaceBeforeParens == FormatStyle::SBPO_Always)
return true;
if (Right.is(TT_OverloadedOperatorLParen) &&
- Style.SpaceBeforeParensOptions.AfterOverloadedOperator)
+ Style.SpaceBeforeParensOptions.AfterOverloadedOperator) {
return true;
+ }
if (Style.SpaceBeforeParensOptions.BeforeNonEmptyParentheses &&
- Right.ParameterCount > 0)
+ Right.ParameterCount > 0) {
return true;
+ }
return false;
}
bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
const FormatToken &Left,
- const FormatToken &Right) {
- if (Left.is(tok::kw_return) && Right.isNot(tok::semi))
+ const FormatToken &Right) const {
+ if (Left.is(tok::kw_return) &&
+ !Right.isOneOf(tok::semi, tok::r_paren, tok::hashhash)) {
return true;
+ }
if (Style.isJson() && Left.is(tok::string_literal) && Right.is(tok::colon))
return false;
if (Left.is(Keywords.kw_assert) && Style.Language == FormatStyle::LK_Java)
return true;
if (Style.ObjCSpaceAfterProperty && Line.Type == LT_ObjCProperty &&
- Left.Tok.getObjCKeywordID() == tok::objc_property)
+ Left.Tok.getObjCKeywordID() == tok::objc_property) {
return true;
+ }
if (Right.is(tok::hashhash))
return Left.is(tok::hash);
if (Left.isOneOf(tok::hashhash, tok::hash))
return Right.is(tok::hash);
if ((Left.is(tok::l_paren) && Right.is(tok::r_paren)) ||
(Left.is(tok::l_brace) && Left.isNot(BK_Block) &&
- Right.is(tok::r_brace) && Right.isNot(BK_Block)))
+ Right.is(tok::r_brace) && Right.isNot(BK_Block))) {
return Style.SpaceInEmptyParentheses;
+ }
if (Style.SpacesInConditionalStatement) {
- if (Left.is(tok::l_paren) && Left.Previous &&
- isKeywordWithCondition(*Left.Previous))
- return true;
- if (Right.is(tok::r_paren) && Right.MatchingParen &&
- Right.MatchingParen->Previous &&
- isKeywordWithCondition(*Right.MatchingParen->Previous))
+ const FormatToken *LeftParen = nullptr;
+ if (Left.is(tok::l_paren))
+ LeftParen = &Left;
+ else if (Right.is(tok::r_paren) && Right.MatchingParen)
+ LeftParen = Right.MatchingParen;
+ if (LeftParen && LeftParen->Previous &&
+ isKeywordWithCondition(*LeftParen->Previous)) {
return true;
+ }
}
// auto{x} auto(x)
@@ -2979,21 +3307,21 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
// operator co_await(x)
if (Right.is(tok::l_paren) && Left.is(tok::kw_co_await) && Left.Previous &&
- Left.Previous->is(tok::kw_operator))
+ Left.Previous->is(tok::kw_operator)) {
return false;
+ }
// co_await (x), co_yield (x), co_return (x)
if (Left.isOneOf(tok::kw_co_await, tok::kw_co_yield, tok::kw_co_return) &&
- Right.isNot(tok::semi))
- return true;
- // requires clause Concept1<T> && Concept2<T>
- if (Left.is(TT_ConstraintJunctions) && Right.is(tok::identifier))
+ !Right.isOneOf(tok::semi, tok::r_paren)) {
return true;
+ }
- if (Left.is(tok::l_paren) || Right.is(tok::r_paren))
+ if (Left.is(tok::l_paren) || Right.is(tok::r_paren)) {
return (Right.is(TT_CastRParen) ||
(Left.MatchingParen && Left.MatchingParen->is(TT_CastRParen)))
? Style.SpacesInCStyleCastParentheses
: Style.SpacesInParentheses;
+ }
if (Right.isOneOf(tok::semi, tok::comma))
return false;
if (Right.is(tok::less) && Line.Type == LT_ObjCDecl) {
@@ -3009,8 +3337,9 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
if (Left.is(tok::at) &&
Right.isOneOf(tok::identifier, tok::string_literal, tok::char_constant,
tok::numeric_constant, tok::l_paren, tok::l_brace,
- tok::kw_true, tok::kw_false))
+ tok::kw_true, tok::kw_false)) {
return false;
+ }
if (Left.is(tok::colon))
return !Left.is(TT_ObjCMethodExpr);
if (Left.is(tok::coloncolon))
@@ -3026,9 +3355,10 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
}
return false;
}
- if (Right.is(tok::ellipsis))
+ if (Right.is(tok::ellipsis)) {
return Left.Tok.isLiteral() || (Left.is(tok::identifier) && Left.Previous &&
Left.Previous->is(tok::kw_case));
+ }
if (Left.is(tok::l_square) && Right.is(tok::amp))
return Style.SpacesInSquareBrackets;
if (Right.is(TT_PointerOrReference)) {
@@ -3045,34 +3375,39 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
// after pointer qualifiers.
if ((Style.SpaceAroundPointerQualifiers == FormatStyle::SAPQ_After ||
Style.SpaceAroundPointerQualifiers == FormatStyle::SAPQ_Both) &&
- (Left.is(TT_AttributeParen) || Left.canBePointerOrReferenceQualifier()))
+ (Left.is(TT_AttributeParen) ||
+ Left.canBePointerOrReferenceQualifier())) {
return true;
+ }
if (Left.Tok.isLiteral())
return true;
// for (auto a = 0, b = 0; const auto & c : {1, 2, 3})
if (Left.isTypeOrIdentifier() && Right.Next && Right.Next->Next &&
- Right.Next->Next->is(TT_RangeBasedForLoopColon))
+ Right.Next->Next->is(TT_RangeBasedForLoopColon)) {
return getTokenPointerOrReferenceAlignment(Right) !=
FormatStyle::PAS_Left;
- return (
- (!Left.isOneOf(TT_PointerOrReference, tok::l_paren) &&
- (getTokenPointerOrReferenceAlignment(Right) != FormatStyle::PAS_Left ||
- (Line.IsMultiVariableDeclStmt &&
- (Left.NestingLevel == 0 ||
- (Left.NestingLevel == 1 && Line.First->is(tok::kw_for)))))));
+ }
+ return !Left.isOneOf(TT_PointerOrReference, tok::l_paren) &&
+ (getTokenPointerOrReferenceAlignment(Right) !=
+ FormatStyle::PAS_Left ||
+ (Line.IsMultiVariableDeclStmt &&
+ (Left.NestingLevel == 0 ||
+ (Left.NestingLevel == 1 && startsWithInitStatement(Line)))));
}
if (Right.is(TT_FunctionTypeLParen) && Left.isNot(tok::l_paren) &&
(!Left.is(TT_PointerOrReference) ||
(getTokenPointerOrReferenceAlignment(Left) != FormatStyle::PAS_Right &&
- !Line.IsMultiVariableDeclStmt)))
+ !Line.IsMultiVariableDeclStmt))) {
return true;
+ }
if (Left.is(TT_PointerOrReference)) {
// Add a space if the next token is a pointer qualifier and the style
// requires spaces before pointer qualifiers.
if ((Style.SpaceAroundPointerQualifiers == FormatStyle::SAPQ_Before ||
Style.SpaceAroundPointerQualifiers == FormatStyle::SAPQ_Both) &&
- Right.canBePointerOrReferenceQualifier())
+ Right.canBePointerOrReferenceQualifier()) {
return true;
+ }
// & 1
if (Right.Tok.isLiteral())
return true;
@@ -3081,29 +3416,44 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
return true;
// foo() -> const Bar * override/final
if (Right.isOneOf(Keywords.kw_override, Keywords.kw_final) &&
- !Right.is(TT_StartOfName))
+ !Right.is(TT_StartOfName)) {
return true;
+ }
// & {
if (Right.is(tok::l_brace) && Right.is(BK_Block))
return true;
// for (auto a = 0, b = 0; const auto& c : {1, 2, 3})
if (Left.Previous && Left.Previous->isTypeOrIdentifier() && Right.Next &&
- Right.Next->is(TT_RangeBasedForLoopColon))
+ Right.Next->is(TT_RangeBasedForLoopColon)) {
return getTokenPointerOrReferenceAlignment(Left) !=
FormatStyle::PAS_Right;
- return !Right.isOneOf(TT_PointerOrReference, TT_ArraySubscriptLSquare,
- tok::l_paren) &&
- (getTokenPointerOrReferenceAlignment(Left) !=
- FormatStyle::PAS_Right &&
- !Line.IsMultiVariableDeclStmt) &&
- Left.Previous &&
- !Left.Previous->isOneOf(tok::l_paren, tok::coloncolon,
- tok::l_square);
+ }
+ if (Right.isOneOf(TT_PointerOrReference, TT_ArraySubscriptLSquare,
+ tok::l_paren)) {
+ return false;
+ }
+ if (getTokenPointerOrReferenceAlignment(Left) == FormatStyle::PAS_Right)
+ return false;
+ // FIXME: Setting IsMultiVariableDeclStmt for the whole line is error-prone,
+ // because it does not take into account nested scopes like lambdas.
+ // In multi-variable declaration statements, attach */& to the variable
+ // independently of the style. However, avoid doing it if we are in a nested
+ // scope, e.g. lambda. We still need to special-case statements with
+ // initializers.
+ if (Line.IsMultiVariableDeclStmt &&
+ (Left.NestingLevel == Line.First->NestingLevel ||
+ ((Left.NestingLevel == Line.First->NestingLevel + 1) &&
+ startsWithInitStatement(Line)))) {
+ return false;
+ }
+ return Left.Previous && !Left.Previous->isOneOf(
+ tok::l_paren, tok::coloncolon, tok::l_square);
}
// Ensure right pointer alignment with ellipsis e.g. int *...P
if (Left.is(tok::ellipsis) && Left.Previous &&
- Left.Previous->isOneOf(tok::star, tok::amp, tok::ampamp))
+ Left.Previous->isOneOf(tok::star, tok::amp, tok::ampamp)) {
return Style.PointerAlignment != FormatStyle::PAS_Right;
+ }
if (Right.is(tok::star) && Left.is(tok::l_paren))
return false;
@@ -3140,12 +3490,13 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
// dependent on PointerAlignment style.
if (Previous) {
if (Previous->endsSequence(tok::kw_operator))
- return (Style.PointerAlignment != FormatStyle::PAS_Left);
- if (Previous->is(tok::kw_const) || Previous->is(tok::kw_volatile))
+ return Style.PointerAlignment != FormatStyle::PAS_Left;
+ if (Previous->is(tok::kw_const) || Previous->is(tok::kw_volatile)) {
return (Style.PointerAlignment != FormatStyle::PAS_Left) ||
(Style.SpaceAroundPointerQualifiers ==
FormatStyle::SAPQ_After) ||
(Style.SpaceAroundPointerQualifiers == FormatStyle::SAPQ_Both);
+ }
}
}
const auto SpaceRequiredForArrayInitializerLSquare =
@@ -3157,13 +3508,14 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
LSquareTok.endsSequence(tok::l_square, tok::colon,
TT_SelectorName));
};
- if (Left.is(tok::l_square))
+ if (Left.is(tok::l_square)) {
return (Left.is(TT_ArrayInitializerLSquare) && Right.isNot(tok::r_square) &&
SpaceRequiredForArrayInitializerLSquare(Left, Style)) ||
(Left.isOneOf(TT_ArraySubscriptLSquare, TT_StructuredBindingLSquare,
TT_LambdaLSquare) &&
Style.SpacesInSquareBrackets && Right.isNot(tok::r_square));
- if (Right.is(tok::r_square))
+ }
+ if (Right.is(tok::r_square)) {
return Right.MatchingParen &&
((Right.MatchingParen->is(TT_ArrayInitializerLSquare) &&
SpaceRequiredForArrayInitializerLSquare(*Right.MatchingParen,
@@ -3173,23 +3525,27 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
TT_StructuredBindingLSquare,
TT_LambdaLSquare)) ||
Right.MatchingParen->is(TT_AttributeParen));
+ }
if (Right.is(tok::l_square) &&
!Right.isOneOf(TT_ObjCMethodExpr, TT_LambdaLSquare,
TT_DesignatedInitializerLSquare,
TT_StructuredBindingLSquare, TT_AttributeSquare) &&
!Left.isOneOf(tok::numeric_constant, TT_DictLiteral) &&
!(!Left.is(tok::r_square) && Style.SpaceBeforeSquareBrackets &&
- Right.is(TT_ArraySubscriptLSquare)))
+ Right.is(TT_ArraySubscriptLSquare))) {
return false;
+ }
if (Left.is(tok::l_brace) && Right.is(tok::r_brace))
return !Left.Children.empty(); // No spaces in "{}".
if ((Left.is(tok::l_brace) && Left.isNot(BK_Block)) ||
(Right.is(tok::r_brace) && Right.MatchingParen &&
- Right.MatchingParen->isNot(BK_Block)))
+ Right.MatchingParen->isNot(BK_Block))) {
return Style.Cpp11BracedListStyle ? Style.SpacesInParentheses : true;
- if (Left.is(TT_BlockComment))
+ }
+ if (Left.is(TT_BlockComment)) {
// No whitespace in x(/*foo=*/1), except for JavaScript.
return Style.isJavaScript() || !Left.TokenText.endswith("=*/");
+ }
// Space between template and attribute.
// e.g. template <typename T> [[nodiscard]] ...
@@ -3199,28 +3555,37 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
if (Right.is(tok::l_paren)) {
if (Left.is(TT_TemplateCloser) && Right.isNot(TT_FunctionTypeLParen))
return spaceRequiredBeforeParens(Right);
- if (Left.is(tok::kw_requires))
- return spaceRequiredBeforeParens(Right);
+ if (Left.isOneOf(TT_RequiresClause,
+ TT_RequiresClauseInARequiresExpression)) {
+ return Style.SpaceBeforeParensOptions.AfterRequiresInClause ||
+ spaceRequiredBeforeParens(Right);
+ }
+ if (Left.is(TT_RequiresExpression)) {
+ return Style.SpaceBeforeParensOptions.AfterRequiresInExpression ||
+ spaceRequiredBeforeParens(Right);
+ }
if ((Left.is(tok::r_paren) && Left.is(TT_AttributeParen)) ||
- (Left.is(tok::r_square) && Left.is(TT_AttributeSquare)))
+ (Left.is(tok::r_square) && Left.is(TT_AttributeSquare))) {
return true;
- if (Left.is(TT_ForEachMacro))
- return (Style.SpaceBeforeParensOptions.AfterForeachMacros ||
- spaceRequiredBeforeParens(Right));
- if (Left.is(TT_IfMacro))
- return (Style.SpaceBeforeParensOptions.AfterIfMacros ||
- spaceRequiredBeforeParens(Right));
+ }
+ if (Left.is(TT_ForEachMacro)) {
+ return Style.SpaceBeforeParensOptions.AfterForeachMacros ||
+ spaceRequiredBeforeParens(Right);
+ }
+ if (Left.is(TT_IfMacro)) {
+ return Style.SpaceBeforeParensOptions.AfterIfMacros ||
+ spaceRequiredBeforeParens(Right);
+ }
if (Line.Type == LT_ObjCDecl)
return true;
if (Left.is(tok::semi))
return true;
if (Left.isOneOf(tok::pp_elif, tok::kw_for, tok::kw_while, tok::kw_switch,
- tok::kw_case, TT_ForEachMacro, TT_ObjCForIn))
- return Style.SpaceBeforeParensOptions.AfterControlStatements ||
- spaceRequiredBeforeParens(Right);
- if (Left.isIf(Line.Type != LT_PreprocessorDirective))
+ tok::kw_case, TT_ForEachMacro, TT_ObjCForIn) ||
+ Left.isIf(Line.Type != LT_PreprocessorDirective)) {
return Style.SpaceBeforeParensOptions.AfterControlStatements ||
spaceRequiredBeforeParens(Right);
+ }
// TODO add Operator overloading specific Options to
// SpaceBeforeParensOptions
@@ -3228,43 +3593,58 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
return spaceRequiredBeforeParens(Right);
// Function declaration or definition
if (Line.MightBeFunctionDecl && (Left.is(TT_FunctionDeclarationName))) {
- if (Line.mightBeFunctionDefinition())
+ if (Line.mightBeFunctionDefinition()) {
return Style.SpaceBeforeParensOptions.AfterFunctionDefinitionName ||
spaceRequiredBeforeParens(Right);
- else
+ } else {
return Style.SpaceBeforeParensOptions.AfterFunctionDeclarationName ||
spaceRequiredBeforeParens(Right);
+ }
}
// Lambda
if (Line.Type != LT_PreprocessorDirective && Left.is(tok::r_square) &&
- Left.MatchingParen && Left.MatchingParen->is(TT_LambdaLSquare))
+ Left.MatchingParen && Left.MatchingParen->is(TT_LambdaLSquare)) {
return Style.SpaceBeforeParensOptions.AfterFunctionDefinitionName ||
spaceRequiredBeforeParens(Right);
+ }
if (!Left.Previous || Left.Previous->isNot(tok::period)) {
- if (Left.isOneOf(tok::kw_try, Keywords.kw___except, tok::kw_catch))
+ if (Left.isOneOf(tok::kw_try, Keywords.kw___except, tok::kw_catch)) {
return Style.SpaceBeforeParensOptions.AfterControlStatements ||
spaceRequiredBeforeParens(Right);
- if (Left.isOneOf(tok::kw_new, tok::kw_delete))
- return Style.SpaceBeforeParens != FormatStyle::SBPO_Never ||
+ }
+ if (Left.isOneOf(tok::kw_new, tok::kw_delete)) {
+ return ((!Line.MightBeFunctionDecl || !Left.Previous) &&
+ Style.SpaceBeforeParens != FormatStyle::SBPO_Never) ||
+ spaceRequiredBeforeParens(Right);
+ }
+
+ if (Left.is(tok::r_square) && Left.MatchingParen &&
+ Left.MatchingParen->Previous &&
+ Left.MatchingParen->Previous->is(tok::kw_delete)) {
+ return (Style.SpaceBeforeParens != FormatStyle::SBPO_Never) ||
spaceRequiredBeforeParens(Right);
+ }
}
+ // Handle builtins like identifiers.
if (Line.Type != LT_PreprocessorDirective &&
- (Left.is(tok::identifier) || Left.isFunctionLikeKeyword() ||
- Left.is(tok::r_paren) || Left.isSimpleTypeSpecifier()))
+ (Left.Tok.getIdentifierInfo() || Left.is(tok::r_paren))) {
return spaceRequiredBeforeParens(Right);
+ }
return false;
}
if (Left.is(tok::at) && Right.Tok.getObjCKeywordID() != tok::objc_not_keyword)
return false;
- if (Right.is(TT_UnaryOperator))
+ if (Right.is(TT_UnaryOperator)) {
return !Left.isOneOf(tok::l_paren, tok::l_square, tok::at) &&
(Left.isNot(tok::colon) || Left.isNot(TT_ObjCMethodExpr));
+ }
if ((Left.isOneOf(tok::identifier, tok::greater, tok::r_square,
tok::r_paren) ||
Left.isSimpleTypeSpecifier()) &&
Right.is(tok::l_brace) && Right.getNextNonComment() &&
- Right.isNot(BK_Block))
+ Right.isNot(BK_Block)) {
return false;
+ }
if (Left.is(tok::period) || Right.is(tok::period))
return false;
// u#str, U#str, L#str, u8#str
@@ -3273,39 +3653,44 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
(Left.TokenText == "L" || Left.TokenText == "u" ||
Left.TokenText == "U" || Left.TokenText == "u8" ||
Left.TokenText == "LR" || Left.TokenText == "uR" ||
- Left.TokenText == "UR" || Left.TokenText == "u8R"))
+ Left.TokenText == "UR" || Left.TokenText == "u8R")) {
return false;
+ }
if (Left.is(TT_TemplateCloser) && Left.MatchingParen &&
Left.MatchingParen->Previous &&
(Left.MatchingParen->Previous->is(tok::period) ||
- Left.MatchingParen->Previous->is(tok::coloncolon)))
+ Left.MatchingParen->Previous->is(tok::coloncolon))) {
// Java call to generic function with explicit type:
// A.<B<C<...>>>DoSomething();
// A::<B<C<...>>>DoSomething(); // With a Java 8 method reference.
return false;
+ }
if (Left.is(TT_TemplateCloser) && Right.is(tok::l_square))
return false;
- if (Left.is(tok::l_brace) && Left.endsSequence(TT_DictLiteral, tok::at))
+ if (Left.is(tok::l_brace) && Left.endsSequence(TT_DictLiteral, tok::at)) {
// Objective-C dictionary literal -> no space after opening brace.
return false;
+ }
if (Right.is(tok::r_brace) && Right.MatchingParen &&
- Right.MatchingParen->endsSequence(TT_DictLiteral, tok::at))
+ Right.MatchingParen->endsSequence(TT_DictLiteral, tok::at)) {
// Objective-C dictionary literal -> no space before closing brace.
return false;
+ }
if (Right.getType() == TT_TrailingAnnotation &&
Right.isOneOf(tok::amp, tok::ampamp) &&
Left.isOneOf(tok::kw_const, tok::kw_volatile) &&
- (!Right.Next || Right.Next->is(tok::semi)))
+ (!Right.Next || Right.Next->is(tok::semi))) {
// Match const and volatile ref-qualifiers without any additional
// qualifiers such as
// void Fn() const &;
return getTokenReferenceAlignment(Right) != FormatStyle::PAS_Left;
+ }
return true;
}
bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
- const FormatToken &Right) {
+ const FormatToken &Right) const {
const FormatToken &Left = *Right.Previous;
// If the token is finalized don't touch it (as it could be in a
@@ -3313,8 +3698,9 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
if (Left.Finalized)
return Right.hasWhitespaceBefore();
- if (Right.Tok.getIdentifierInfo() && Left.Tok.getIdentifierInfo())
- return true; // Never ever merge two identifiers.
+ // Never ever merge two words.
+ if (Keywords.isWordLike(Right) && Keywords.isWordLike(Left))
+ return true;
// Leave a space between * and /* to avoid C4138 `comment end` found outside
// of comment.
@@ -3328,18 +3714,21 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
return true;
// Space between `module :` and `import :`.
if (Left.isOneOf(Keywords.kw_module, Keywords.kw_import) &&
- Right.is(TT_ModulePartitionColon))
+ Right.is(TT_ModulePartitionColon)) {
return true;
+ }
// No space between import foo:bar but keep a space between import :bar;
if (Left.is(tok::identifier) && Right.is(TT_ModulePartitionColon))
return false;
// No space between :bar;
if (Left.is(TT_ModulePartitionColon) &&
- Right.isOneOf(tok::identifier, tok::kw_private))
+ Right.isOneOf(tok::identifier, tok::kw_private)) {
return false;
+ }
if (Left.is(tok::ellipsis) && Right.is(tok::identifier) &&
- Line.First->is(Keywords.kw_import))
+ Line.First->is(Keywords.kw_import)) {
return false;
+ }
// Space in __attribute__((attr)) ::type.
if (Left.is(TT_AttributeParen) && Right.is(tok::coloncolon))
return true;
@@ -3347,20 +3736,24 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
if (Left.is(tok::kw_operator))
return Right.is(tok::coloncolon);
if (Right.is(tok::l_brace) && Right.is(BK_BracedInit) &&
- !Left.opensScope() && Style.SpaceBeforeCpp11BracedList)
+ !Left.opensScope() && Style.SpaceBeforeCpp11BracedList) {
return true;
+ }
if (Left.is(tok::less) && Left.is(TT_OverloadedOperator) &&
- Right.is(TT_TemplateOpener))
+ Right.is(TT_TemplateOpener)) {
return true;
+ }
} else if (Style.Language == FormatStyle::LK_Proto ||
Style.Language == FormatStyle::LK_TextProto) {
if (Right.is(tok::period) &&
Left.isOneOf(Keywords.kw_optional, Keywords.kw_required,
- Keywords.kw_repeated, Keywords.kw_extend))
+ Keywords.kw_repeated, Keywords.kw_extend)) {
return true;
+ }
if (Right.is(tok::l_paren) &&
- Left.isOneOf(Keywords.kw_returns, Keywords.kw_option))
+ Left.isOneOf(Keywords.kw_returns, Keywords.kw_option)) {
return true;
+ }
if (Right.isOneOf(tok::l_brace, tok::less) && Left.is(TT_SelectorName))
return true;
// Slashes occur in text protocol extension syntax: [type/type] { ... }.
@@ -3368,8 +3761,9 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
return false;
if (Left.MatchingParen &&
Left.MatchingParen->is(TT_ProtoExtensionLSquare) &&
- Right.isOneOf(tok::l_brace, tok::less))
+ Right.isOneOf(tok::l_brace, tok::less)) {
return !Style.Cpp11BracedListStyle;
+ }
// A percent is probably part of a formatting specification, such as %lld.
if (Left.is(tok::percent))
return false;
@@ -3437,11 +3831,13 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
return true;
// space between keywords and paren e.g. "using ("
- if (Right.is(tok::l_paren))
+ if (Right.is(tok::l_paren)) {
if (Left.isOneOf(tok::kw_using, Keywords.kw_async, Keywords.kw_when,
- Keywords.kw_lock))
+ Keywords.kw_lock)) {
return Style.SpaceBeforeParensOptions.AfterControlStatements ||
spaceRequiredBeforeParens(Right);
+ }
+ }
// space between method modifier and opening parenthesis of a tuple return
// type
@@ -3450,15 +3846,17 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
Keywords.kw_internal, Keywords.kw_abstract,
Keywords.kw_sealed, Keywords.kw_override,
Keywords.kw_async, Keywords.kw_unsafe) &&
- Right.is(tok::l_paren))
+ Right.is(tok::l_paren)) {
return true;
+ }
} else if (Style.isJavaScript()) {
if (Left.is(TT_FatArrow))
return true;
// for await ( ...
if (Right.is(tok::l_paren) && Left.is(Keywords.kw_await) && Left.Previous &&
- Left.Previous->is(tok::kw_for))
+ Left.Previous->is(tok::kw_for)) {
return true;
+ }
if (Left.is(Keywords.kw_async) && Right.is(tok::l_paren) &&
Right.MatchingParen) {
const FormatToken *Next = Right.MatchingParen->getNextNonComment();
@@ -3468,21 +3866,25 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
return true;
}
if ((Left.is(TT_TemplateString) && Left.TokenText.endswith("${")) ||
- (Right.is(TT_TemplateString) && Right.TokenText.startswith("}")))
+ (Right.is(TT_TemplateString) && Right.TokenText.startswith("}"))) {
return false;
+ }
// In tagged template literals ("html`bar baz`"), there is no space between
// the tag identifier and the template string.
if (Keywords.IsJavaScriptIdentifier(Left,
/* AcceptIdentifierName= */ false) &&
- Right.is(TT_TemplateString))
+ Right.is(TT_TemplateString)) {
return false;
+ }
if (Right.is(tok::star) &&
- Left.isOneOf(Keywords.kw_function, Keywords.kw_yield))
+ Left.isOneOf(Keywords.kw_function, Keywords.kw_yield)) {
return false;
+ }
if (Right.isOneOf(tok::l_brace, tok::l_square) &&
Left.isOneOf(Keywords.kw_function, Keywords.kw_yield,
- Keywords.kw_extends, Keywords.kw_implements))
+ Keywords.kw_extends, Keywords.kw_implements)) {
return true;
+ }
if (Right.is(tok::l_paren)) {
// JS methods can use some keywords as names (e.g. `delete()`).
if (Line.MustBeDeclaration && Left.Tok.getIdentifierInfo())
@@ -3490,36 +3892,41 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
// Valid JS method names can include keywords, e.g. `foo.delete()` or
// `bar.instanceof()`. Recognize call positions by preceding period.
if (Left.Previous && Left.Previous->is(tok::period) &&
- Left.Tok.getIdentifierInfo())
+ Left.Tok.getIdentifierInfo()) {
return false;
+ }
// Additional unary JavaScript operators that need a space after.
if (Left.isOneOf(tok::kw_throw, Keywords.kw_await, Keywords.kw_typeof,
- tok::kw_void))
+ tok::kw_void)) {
return true;
+ }
}
// `foo as const;` casts into a const type.
- if (Left.endsSequence(tok::kw_const, Keywords.kw_as)) {
+ if (Left.endsSequence(tok::kw_const, Keywords.kw_as))
return false;
- }
if ((Left.isOneOf(Keywords.kw_let, Keywords.kw_var, Keywords.kw_in,
tok::kw_const) ||
// "of" is only a keyword if it appears after another identifier
// (e.g. as "const x of y" in a for loop), or after a destructuring
// operation (const [x, y] of z, const {a, b} of c).
(Left.is(Keywords.kw_of) && Left.Previous &&
- (Left.Previous->Tok.is(tok::identifier) ||
+ (Left.Previous->is(tok::identifier) ||
Left.Previous->isOneOf(tok::r_square, tok::r_brace)))) &&
- (!Left.Previous || !Left.Previous->is(tok::period)))
+ (!Left.Previous || !Left.Previous->is(tok::period))) {
return true;
+ }
if (Left.isOneOf(tok::kw_for, Keywords.kw_as) && Left.Previous &&
- Left.Previous->is(tok::period) && Right.is(tok::l_paren))
+ Left.Previous->is(tok::period) && Right.is(tok::l_paren)) {
return false;
+ }
if (Left.is(Keywords.kw_as) &&
- Right.isOneOf(tok::l_square, tok::l_brace, tok::l_paren))
+ Right.isOneOf(tok::l_square, tok::l_brace, tok::l_paren)) {
return true;
+ }
if (Left.is(tok::kw_default) && Left.Previous &&
- Left.Previous->is(tok::kw_export))
+ Left.Previous->is(tok::kw_export)) {
return true;
+ }
if (Left.is(Keywords.kw_is) && Right.is(tok::l_brace))
return true;
if (Right.isOneOf(TT_JsTypeColon, TT_JsTypeOptionalQuestion))
@@ -3527,53 +3934,76 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
if (Left.is(TT_JsTypeOperator) || Right.is(TT_JsTypeOperator))
return false;
if ((Left.is(tok::l_brace) || Right.is(tok::r_brace)) &&
- Line.First->isOneOf(Keywords.kw_import, tok::kw_export))
+ Line.First->isOneOf(Keywords.kw_import, tok::kw_export)) {
return false;
+ }
if (Left.is(tok::ellipsis))
return false;
if (Left.is(TT_TemplateCloser) &&
!Right.isOneOf(tok::equal, tok::l_brace, tok::comma, tok::l_square,
- Keywords.kw_implements, Keywords.kw_extends))
+ Keywords.kw_implements, Keywords.kw_extends)) {
// Type assertions ('<type>expr') are not followed by whitespace. Other
// locations that should have whitespace following are identified by the
// above set of follower tokens.
return false;
+ }
if (Right.is(TT_NonNullAssertion))
return false;
if (Left.is(TT_NonNullAssertion) &&
- Right.isOneOf(Keywords.kw_as, Keywords.kw_in))
+ Right.isOneOf(Keywords.kw_as, Keywords.kw_in)) {
return true; // "x! as string", "x! in y"
+ }
} else if (Style.Language == FormatStyle::LK_Java) {
if (Left.is(tok::r_square) && Right.is(tok::l_brace))
return true;
- if (Left.is(Keywords.kw_synchronized) && Right.is(tok::l_paren))
+ if (Left.is(Keywords.kw_synchronized) && Right.is(tok::l_paren)) {
return Style.SpaceBeforeParensOptions.AfterControlStatements ||
spaceRequiredBeforeParens(Right);
+ }
if ((Left.isOneOf(tok::kw_static, tok::kw_public, tok::kw_private,
tok::kw_protected) ||
Left.isOneOf(Keywords.kw_final, Keywords.kw_abstract,
Keywords.kw_native)) &&
- Right.is(TT_TemplateOpener))
+ Right.is(TT_TemplateOpener)) {
+ return true;
+ }
+ } else if (Style.isVerilog()) {
+ // Don't add space within a delay like `#0`.
+ if (!Left.is(TT_BinaryOperator) &&
+ Left.isOneOf(Keywords.kw_verilogHash, Keywords.kw_verilogHashHash)) {
+ return false;
+ }
+ // Add space after a delay.
+ if (!Right.is(tok::semi) &&
+ (Left.endsSequence(tok::numeric_constant, Keywords.kw_verilogHash) ||
+ Left.endsSequence(tok::numeric_constant,
+ Keywords.kw_verilogHashHash) ||
+ (Left.is(tok::r_paren) && Left.MatchingParen &&
+ Left.MatchingParen->endsSequence(tok::l_paren, tok::at)))) {
return true;
+ }
}
if (Left.is(TT_ImplicitStringLiteral))
return Right.hasWhitespaceBefore();
if (Line.Type == LT_ObjCMethodDecl) {
if (Left.is(TT_ObjCMethodSpecifier))
return true;
- if (Left.is(tok::r_paren) && canBeObjCSelectorComponent(Right))
+ if (Left.is(tok::r_paren) && canBeObjCSelectorComponent(Right)) {
// Don't space between ')' and <id> or ')' and 'new'. 'new' is not a
// keyword in Objective-C, and '+ (instancetype)new;' is a standard class
// method declaration.
return false;
+ }
}
if (Line.Type == LT_ObjCProperty &&
- (Right.is(tok::equal) || Left.is(tok::equal)))
+ (Right.is(tok::equal) || Left.is(tok::equal))) {
return false;
+ }
if (Right.isOneOf(TT_TrailingReturnArrow, TT_LambdaArrow) ||
- Left.isOneOf(TT_TrailingReturnArrow, TT_LambdaArrow))
+ Left.isOneOf(TT_TrailingReturnArrow, TT_LambdaArrow)) {
return true;
+ }
if (Left.is(tok::comma) && !Right.is(TT_OverloadedOperatorLParen))
return true;
if (Right.is(tok::comma))
@@ -3585,15 +4015,18 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
if (Right.is(TT_InheritanceColon) && !Style.SpaceBeforeInheritanceColon)
return false;
if (Right.is(TT_RangeBasedForLoopColon) &&
- !Style.SpaceBeforeRangeBasedForLoopColon)
+ !Style.SpaceBeforeRangeBasedForLoopColon) {
return false;
- if (Left.is(TT_BitFieldColon))
+ }
+ if (Left.is(TT_BitFieldColon)) {
return Style.BitFieldColonSpacing == FormatStyle::BFCS_Both ||
Style.BitFieldColonSpacing == FormatStyle::BFCS_After;
+ }
if (Right.is(tok::colon)) {
if (Line.First->isOneOf(tok::kw_default, tok::kw_case))
return Style.SpaceBeforeCaseColon;
- if (!Right.getNextNonComment() || Right.getNextNonComment()->is(tok::semi))
+ const FormatToken *Next = Right.getNextNonComment();
+ if (!Next || Next->is(tok::semi))
return false;
if (Right.is(TT_ObjCMethodExpr))
return false;
@@ -3607,17 +4040,19 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
return false;
if (Right.is(TT_CSharpNamedArgumentColon))
return false;
- if (Right.is(TT_BitFieldColon))
+ if (Right.is(TT_BitFieldColon)) {
return Style.BitFieldColonSpacing == FormatStyle::BFCS_Both ||
Style.BitFieldColonSpacing == FormatStyle::BFCS_Before;
+ }
return true;
}
// Do not merge "- -" into "--".
if ((Left.isOneOf(tok::minus, tok::minusminus) &&
Right.isOneOf(tok::minus, tok::minusminus)) ||
(Left.isOneOf(tok::plus, tok::plusplus) &&
- Right.isOneOf(tok::plus, tok::plusplus)))
+ Right.isOneOf(tok::plus, tok::plusplus))) {
return true;
+ }
if (Left.is(TT_UnaryOperator)) {
if (!Right.is(tok::l_paren)) {
// The alternative operators for ~ and ! are "compl" and "not".
@@ -3638,9 +4073,10 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
// If the next token is a binary operator or a selector name, we have
// incorrectly classified the parenthesis as a cast. FIXME: Detect correctly.
- if (Left.is(TT_CastRParen))
+ if (Left.is(TT_CastRParen)) {
return Style.SpaceAfterCStyleCast ||
Right.isOneOf(TT_BinaryOperator, TT_SelectorName);
+ }
auto ShouldAddSpacesInAngles = [this, &Right]() {
if (this->Style.SpacesInAngles == FormatStyle::SIAS_Always)
@@ -3652,29 +4088,34 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
if (Left.is(tok::greater) && Right.is(tok::greater)) {
if (Style.Language == FormatStyle::LK_TextProto ||
- (Style.Language == FormatStyle::LK_Proto && Left.is(TT_DictLiteral)))
+ (Style.Language == FormatStyle::LK_Proto && Left.is(TT_DictLiteral))) {
return !Style.Cpp11BracedListStyle;
+ }
return Right.is(TT_TemplateCloser) && Left.is(TT_TemplateCloser) &&
((Style.Standard < FormatStyle::LS_Cpp11) ||
ShouldAddSpacesInAngles());
}
if (Right.isOneOf(tok::arrow, tok::arrowstar, tok::periodstar) ||
Left.isOneOf(tok::arrow, tok::period, tok::arrowstar, tok::periodstar) ||
- (Right.is(tok::period) && Right.isNot(TT_DesignatedInitializerPeriod)))
+ (Right.is(tok::period) && Right.isNot(TT_DesignatedInitializerPeriod))) {
return false;
+ }
if (!Style.SpaceBeforeAssignmentOperators && Left.isNot(TT_TemplateCloser) &&
- Right.getPrecedence() == prec::Assignment)
+ Right.getPrecedence() == prec::Assignment) {
return false;
+ }
if (Style.Language == FormatStyle::LK_Java && Right.is(tok::coloncolon) &&
- (Left.is(tok::identifier) || Left.is(tok::kw_this)))
+ (Left.is(tok::identifier) || Left.is(tok::kw_this))) {
return false;
- if (Right.is(tok::coloncolon) && Left.is(tok::identifier))
+ }
+ if (Right.is(tok::coloncolon) && Left.is(tok::identifier)) {
// Generally don't remove existing spaces between an identifier and "::".
// The identifier might actually be a macro name such as ALWAYS_INLINE. If
// this turns out to be too lenient, add analysis of the identifier itself.
return Right.hasWhitespaceBefore();
+ }
if (Right.is(tok::coloncolon) &&
- !Left.isOneOf(tok::l_brace, tok::comment, tok::l_paren))
+ !Left.isOneOf(tok::l_brace, tok::comment, tok::l_paren)) {
// Put a space between < and :: in vector< ::std::string >
return (Left.is(TT_TemplateOpener) &&
((Style.Standard < FormatStyle::LS_Cpp11) ||
@@ -3683,26 +4124,33 @@ bool TokenAnnotator::spaceRequiredBefore(const AnnotatedLine &Line,
tok::kw___super, TT_TemplateOpener,
TT_TemplateCloser)) ||
(Left.is(tok::l_paren) && Style.SpacesInParentheses);
+ }
if ((Left.is(TT_TemplateOpener)) != (Right.is(TT_TemplateCloser)))
return ShouldAddSpacesInAngles();
// Space before TT_StructuredBindingLSquare.
- if (Right.is(TT_StructuredBindingLSquare))
+ if (Right.is(TT_StructuredBindingLSquare)) {
return !Left.isOneOf(tok::amp, tok::ampamp) ||
getTokenReferenceAlignment(Left) != FormatStyle::PAS_Right;
+ }
// Space before & or && following a TT_StructuredBindingLSquare.
if (Right.Next && Right.Next->is(TT_StructuredBindingLSquare) &&
- Right.isOneOf(tok::amp, tok::ampamp))
+ Right.isOneOf(tok::amp, tok::ampamp)) {
return getTokenReferenceAlignment(Right) != FormatStyle::PAS_Left;
+ }
if ((Right.is(TT_BinaryOperator) && !Left.is(tok::l_paren)) ||
(Left.isOneOf(TT_BinaryOperator, TT_ConditionalExpr) &&
- !Right.is(tok::r_paren)))
+ !Right.is(tok::r_paren))) {
return true;
+ }
if (Right.is(TT_TemplateOpener) && Left.is(tok::r_paren) &&
- Left.MatchingParen && Left.MatchingParen->is(TT_OverloadedOperatorLParen))
+ Left.MatchingParen &&
+ Left.MatchingParen->is(TT_OverloadedOperatorLParen)) {
return false;
+ }
if (Right.is(tok::less) && Left.isNot(tok::l_paren) &&
- Line.startsWith(tok::hash))
+ Line.startsWith(tok::hash)) {
return true;
+ }
if (Right.is(TT_TrailingUnaryOperator))
return false;
if (Left.is(TT_RegexLiteral))
@@ -3729,8 +4177,8 @@ isItAnEmptyLambdaAllowed(const FormatToken &Tok,
}
static bool isAllmanLambdaBrace(const FormatToken &Tok) {
- return (Tok.is(tok::l_brace) && Tok.is(BK_Block) &&
- !Tok.isOneOf(TT_ObjCBlockLBrace, TT_DictLiteral));
+ return Tok.is(tok::l_brace) && Tok.is(BK_Block) &&
+ !Tok.isOneOf(TT_ObjCBlockLBrace, TT_DictLiteral);
}
// Returns the first token on the line that is not a comment.
@@ -3744,58 +4192,66 @@ static const FormatToken *getFirstNonComment(const AnnotatedLine &Line) {
}
bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
- const FormatToken &Right) {
+ const FormatToken &Right) const {
const FormatToken &Left = *Right.Previous;
if (Right.NewlinesBefore > 1 && Style.MaxEmptyLinesToKeep > 0)
return true;
if (Style.isCSharp()) {
if (Left.is(TT_FatArrow) && Right.is(tok::l_brace) &&
- Style.BraceWrapping.AfterFunction)
+ Style.BraceWrapping.AfterFunction) {
return true;
+ }
if (Right.is(TT_CSharpNamedArgumentColon) ||
- Left.is(TT_CSharpNamedArgumentColon))
+ Left.is(TT_CSharpNamedArgumentColon)) {
return false;
+ }
if (Right.is(TT_CSharpGenericTypeConstraint))
return true;
if (Right.Next && Right.Next->is(TT_FatArrow) &&
(Right.is(tok::numeric_constant) ||
- (Right.is(tok::identifier) && Right.TokenText == "_")))
+ (Right.is(tok::identifier) && Right.TokenText == "_"))) {
return true;
+ }
// Break after C# [...] and before public/protected/private/internal.
if (Left.is(TT_AttributeSquare) && Left.is(tok::r_square) &&
(Right.isAccessSpecifier(/*ColonRequired=*/false) ||
- Right.is(Keywords.kw_internal)))
+ Right.is(Keywords.kw_internal))) {
return true;
+ }
// Break between ] and [ but only when there are really 2 attributes.
if (Left.is(TT_AttributeSquare) && Right.is(TT_AttributeSquare) &&
- Left.is(tok::r_square) && Right.is(tok::l_square))
+ Left.is(tok::r_square) && Right.is(tok::l_square)) {
return true;
+ }
} else if (Style.isJavaScript()) {
// FIXME: This might apply to other languages and token kinds.
if (Right.is(tok::string_literal) && Left.is(tok::plus) && Left.Previous &&
- Left.Previous->is(tok::string_literal))
+ Left.Previous->is(tok::string_literal)) {
return true;
+ }
if (Left.is(TT_DictLiteral) && Left.is(tok::l_brace) && Line.Level == 0 &&
Left.Previous && Left.Previous->is(tok::equal) &&
Line.First->isOneOf(tok::identifier, Keywords.kw_import, tok::kw_export,
tok::kw_const) &&
// kw_var/kw_let are pseudo-tokens that are tok::identifier, so match
// above.
- !Line.First->isOneOf(Keywords.kw_var, Keywords.kw_let))
+ !Line.First->isOneOf(Keywords.kw_var, Keywords.kw_let)) {
// Object literals on the top level of a file are treated as "enum-style".
// Each key/value pair is put on a separate line, instead of bin-packing.
return true;
+ }
if (Left.is(tok::l_brace) && Line.Level == 0 &&
(Line.startsWith(tok::kw_enum) ||
Line.startsWith(tok::kw_const, tok::kw_enum) ||
Line.startsWith(tok::kw_export, tok::kw_enum) ||
- Line.startsWith(tok::kw_export, tok::kw_const, tok::kw_enum)))
+ Line.startsWith(tok::kw_export, tok::kw_const, tok::kw_enum))) {
// JavaScript top-level enum key/value pairs are put on separate lines
// instead of bin-packing.
return true;
+ }
if (Right.is(tok::r_brace) && Left.is(tok::l_brace) && Left.Previous &&
Left.Previous->is(TT_FatArrow)) {
// JS arrow function (=> {...}).
@@ -3816,17 +4272,19 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
}
if (Right.is(tok::r_brace) && Left.is(tok::l_brace) &&
- !Left.Children.empty())
+ !Left.Children.empty()) {
// Support AllowShortFunctionsOnASingleLine for JavaScript.
return Style.AllowShortFunctionsOnASingleLine == FormatStyle::SFS_None ||
Style.AllowShortFunctionsOnASingleLine == FormatStyle::SFS_Empty ||
(Left.NestingLevel == 0 && Line.Level == 0 &&
Style.AllowShortFunctionsOnASingleLine &
FormatStyle::SFS_InlineOnly);
+ }
} else if (Style.Language == FormatStyle::LK_Java) {
if (Right.is(tok::plus) && Left.is(tok::string_literal) && Right.Next &&
- Right.Next->is(tok::string_literal))
+ Right.Next->is(tok::string_literal)) {
return true;
+ }
} else if (Style.Language == FormatStyle::LK_Cpp ||
Style.Language == FormatStyle::LK_ObjC ||
Style.Language == FormatStyle::LK_Proto ||
@@ -3847,8 +4305,9 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
// [
// ]
if (Left.is(TT_ArrayInitializerLSquare) && Left.is(tok::l_square) &&
- !Right.is(tok::r_square))
+ !Right.is(tok::r_square)) {
return true;
+ }
// Always break after successive entries.
// 1,
// 2
@@ -3864,28 +4323,42 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
const FormatToken *BeforeClosingBrace = nullptr;
if ((Left.isOneOf(tok::l_brace, TT_ArrayInitializerLSquare) ||
(Style.isJavaScript() && Left.is(tok::l_paren))) &&
- Left.isNot(BK_Block) && Left.MatchingParen)
+ Left.isNot(BK_Block) && Left.MatchingParen) {
BeforeClosingBrace = Left.MatchingParen->Previous;
- else if (Right.MatchingParen &&
- (Right.MatchingParen->isOneOf(tok::l_brace,
- TT_ArrayInitializerLSquare) ||
- (Style.isJavaScript() && Right.MatchingParen->is(tok::l_paren))))
+ } else if (Right.MatchingParen &&
+ (Right.MatchingParen->isOneOf(tok::l_brace,
+ TT_ArrayInitializerLSquare) ||
+ (Style.isJavaScript() &&
+ Right.MatchingParen->is(tok::l_paren)))) {
BeforeClosingBrace = &Left;
+ }
if (BeforeClosingBrace && (BeforeClosingBrace->is(tok::comma) ||
- BeforeClosingBrace->isTrailingComment()))
+ BeforeClosingBrace->isTrailingComment())) {
return true;
+ }
}
- if (Right.is(tok::comment))
+ if (Right.is(tok::comment)) {
return Left.isNot(BK_BracedInit) && Left.isNot(TT_CtorInitializerColon) &&
(Right.NewlinesBefore > 0 && Right.HasUnescapedNewline);
+ }
if (Left.isTrailingComment())
return true;
if (Left.IsUnterminatedLiteral)
return true;
if (Right.is(tok::lessless) && Right.Next && Left.is(tok::string_literal) &&
- Right.Next->is(tok::string_literal))
+ Right.Next->is(tok::string_literal)) {
return true;
+ }
+ if (Right.is(TT_RequiresClause)) {
+ switch (Style.RequiresClausePosition) {
+ case FormatStyle::RCPS_OwnLine:
+ case FormatStyle::RCPS_WithFollowing:
+ return true;
+ default:
+ break;
+ }
+ }
// Can break after template<> declaration
if (Left.ClosesTemplateDeclaration && Left.MatchingParen &&
Left.MatchingParen->NestingLevel == 0) {
@@ -3893,34 +4366,50 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
// template<typename T>
// concept ...
if (Right.is(tok::kw_concept))
- return Style.BreakBeforeConceptDeclarations;
- return (Style.AlwaysBreakTemplateDeclarations == FormatStyle::BTDS_Yes);
+ return Style.BreakBeforeConceptDeclarations == FormatStyle::BBCDS_Always;
+ return Style.AlwaysBreakTemplateDeclarations == FormatStyle::BTDS_Yes;
+ }
+ if (Left.ClosesRequiresClause && Right.isNot(tok::semi)) {
+ switch (Style.RequiresClausePosition) {
+ case FormatStyle::RCPS_OwnLine:
+ case FormatStyle::RCPS_WithPreceding:
+ return true;
+ default:
+ break;
+ }
}
if (Style.PackConstructorInitializers == FormatStyle::PCIS_Never) {
if (Style.BreakConstructorInitializers == FormatStyle::BCIS_BeforeColon &&
- (Left.is(TT_CtorInitializerComma) || Right.is(TT_CtorInitializerColon)))
+ (Left.is(TT_CtorInitializerComma) ||
+ Right.is(TT_CtorInitializerColon))) {
return true;
+ }
if (Style.BreakConstructorInitializers == FormatStyle::BCIS_AfterColon &&
- Left.isOneOf(TT_CtorInitializerColon, TT_CtorInitializerComma))
+ Left.isOneOf(TT_CtorInitializerColon, TT_CtorInitializerComma)) {
return true;
+ }
}
if (Style.PackConstructorInitializers < FormatStyle::PCIS_CurrentLine &&
Style.BreakConstructorInitializers == FormatStyle::BCIS_BeforeComma &&
- Right.isOneOf(TT_CtorInitializerComma, TT_CtorInitializerColon))
+ Right.isOneOf(TT_CtorInitializerComma, TT_CtorInitializerColon)) {
return true;
+ }
// Break only if we have multiple inheritance.
if (Style.BreakInheritanceList == FormatStyle::BILS_BeforeComma &&
- Right.is(TT_InheritanceComma))
+ Right.is(TT_InheritanceComma)) {
return true;
+ }
if (Style.BreakInheritanceList == FormatStyle::BILS_AfterComma &&
- Left.is(TT_InheritanceComma))
+ Left.is(TT_InheritanceComma)) {
return true;
- if (Right.is(tok::string_literal) && Right.TokenText.startswith("R\""))
+ }
+ if (Right.is(tok::string_literal) && Right.TokenText.startswith("R\"")) {
// Multiline raw string literals are special wrt. line breaks. The author
// has made a deliberate choice and might have aligned the contents of the
// string literal accordingly. Thus, we try keep existing line breaks.
return Right.IsMultiline && Right.NewlinesBefore > 0;
+ }
if ((Left.is(tok::l_brace) || (Left.is(tok::less) && Left.Previous &&
Left.Previous->is(tok::equal))) &&
Right.NestingLevel == 1 && Style.Language == FormatStyle::LK_Proto) {
@@ -3940,28 +4429,32 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
if (Style.BraceWrapping.AfterEnum) {
if (Line.startsWith(tok::kw_enum) ||
- Line.startsWith(tok::kw_typedef, tok::kw_enum))
+ Line.startsWith(tok::kw_typedef, tok::kw_enum)) {
return true;
+ }
// Ensure BraceWrapping for `public enum A {`.
if (AccessSpecifier && FirstNonComment->Next &&
- FirstNonComment->Next->is(tok::kw_enum))
+ FirstNonComment->Next->is(tok::kw_enum)) {
return true;
+ }
}
// Ensure BraceWrapping for `public interface A {`.
if (Style.BraceWrapping.AfterClass &&
((AccessSpecifier && FirstNonComment->Next &&
FirstNonComment->Next->is(Keywords.kw_interface)) ||
- Line.startsWith(Keywords.kw_interface)))
+ Line.startsWith(Keywords.kw_interface))) {
return true;
+ }
return (Line.startsWith(tok::kw_class) && Style.BraceWrapping.AfterClass) ||
(Line.startsWith(tok::kw_struct) && Style.BraceWrapping.AfterStruct);
}
if (Left.is(TT_ObjCBlockLBrace) &&
- Style.AllowShortBlocksOnASingleLine == FormatStyle::SBS_Never)
+ Style.AllowShortBlocksOnASingleLine == FormatStyle::SBS_Never) {
return true;
+ }
// Ensure wrapping after __attribute__((XX)) and @interface etc.
if (Left.is(TT_AttributeParen) && Right.is(TT_ObjCDecl))
@@ -3969,14 +4462,16 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
if (Left.is(TT_LambdaLBrace)) {
if (IsFunctionArgument(Left) &&
- Style.AllowShortLambdasOnASingleLine == FormatStyle::SLS_Inline)
+ Style.AllowShortLambdasOnASingleLine == FormatStyle::SLS_Inline) {
return false;
+ }
if (Style.AllowShortLambdasOnASingleLine == FormatStyle::SLS_None ||
Style.AllowShortLambdasOnASingleLine == FormatStyle::SLS_Inline ||
(!Left.Children.empty() &&
- Style.AllowShortLambdasOnASingleLine == FormatStyle::SLS_Empty))
+ Style.AllowShortLambdasOnASingleLine == FormatStyle::SLS_Empty)) {
return true;
+ }
}
if (Style.BraceWrapping.BeforeLambdaBody && Right.is(TT_LambdaLBrace) &&
@@ -3988,8 +4483,9 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
if ((Style.Language == FormatStyle::LK_Java || Style.isJavaScript()) &&
Left.is(TT_LeadingJavaAnnotation) &&
Right.isNot(TT_LeadingJavaAnnotation) && Right.isNot(tok::l_paren) &&
- (Line.Last->is(tok::l_brace) || Style.BreakAfterJavaFieldAnnotations))
+ (Line.Last->is(tok::l_brace) || Style.BreakAfterJavaFieldAnnotations)) {
return true;
+ }
if (Right.is(TT_ProtoExtensionLSquare))
return true;
@@ -4107,27 +4603,29 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
}
bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
- const FormatToken &Right) {
+ const FormatToken &Right) const {
const FormatToken &Left = *Right.Previous;
// Language-specific stuff.
if (Style.isCSharp()) {
if (Left.isOneOf(TT_CSharpNamedArgumentColon, TT_AttributeColon) ||
- Right.isOneOf(TT_CSharpNamedArgumentColon, TT_AttributeColon))
+ Right.isOneOf(TT_CSharpNamedArgumentColon, TT_AttributeColon)) {
return false;
+ }
// Only break after commas for generic type constraints.
if (Line.First->is(TT_CSharpGenericTypeConstraint))
return Left.is(TT_CSharpGenericTypeConstraintComma);
// Keep nullable operators attached to their identifiers.
- if (Right.is(TT_CSharpNullable)) {
+ if (Right.is(TT_CSharpNullable))
return false;
- }
} else if (Style.Language == FormatStyle::LK_Java) {
if (Left.isOneOf(Keywords.kw_throws, Keywords.kw_extends,
- Keywords.kw_implements))
+ Keywords.kw_implements)) {
return false;
+ }
if (Right.isOneOf(Keywords.kw_throws, Keywords.kw_extends,
- Keywords.kw_implements))
+ Keywords.kw_implements)) {
return true;
+ }
} else if (Style.isJavaScript()) {
const FormatToken *NonComment = Right.getPreviousNonComment();
if (NonComment &&
@@ -4137,16 +4635,19 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
tok::kw_static, tok::kw_public, tok::kw_private, tok::kw_protected,
Keywords.kw_readonly, Keywords.kw_override, Keywords.kw_abstract,
Keywords.kw_get, Keywords.kw_set, Keywords.kw_async,
- Keywords.kw_await))
+ Keywords.kw_await)) {
return false; // Otherwise automatic semicolon insertion would trigger.
+ }
if (Right.NestingLevel == 0 &&
(Left.Tok.getIdentifierInfo() ||
Left.isOneOf(tok::r_square, tok::r_paren)) &&
- Right.isOneOf(tok::l_square, tok::l_paren))
+ Right.isOneOf(tok::l_square, tok::l_paren)) {
return false; // Otherwise automatic semicolon insertion would trigger.
+ }
if (NonComment && NonComment->is(tok::identifier) &&
- NonComment->TokenText == "asserts")
+ NonComment->TokenText == "asserts") {
return false;
+ }
if (Left.is(TT_FatArrow) && Right.is(tok::l_brace))
return false;
if (Left.is(TT_JsTypeColon))
@@ -4189,20 +4690,21 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
Right.isOneOf(Keywords.kw_module, tok::kw_namespace,
Keywords.kw_function, tok::kw_class, tok::kw_enum,
Keywords.kw_interface, Keywords.kw_type, Keywords.kw_var,
- Keywords.kw_let, tok::kw_const))
+ Keywords.kw_let, tok::kw_const)) {
// See grammar for 'declare' statements at:
// https://github.com/Microsoft/TypeScript/blob/main/doc/spec-ARCHIVED.md#A.10
return false;
+ }
if (Left.isOneOf(Keywords.kw_module, tok::kw_namespace) &&
- Right.isOneOf(tok::identifier, tok::string_literal))
+ Right.isOneOf(tok::identifier, tok::string_literal)) {
return false; // must not break in "module foo { ...}"
+ }
if (Right.is(TT_TemplateString) && Right.closesScope())
return false;
// Don't split tagged template literal so there is a break between the tag
// identifier and template string.
- if (Left.is(tok::identifier) && Right.is(TT_TemplateString)) {
+ if (Left.is(tok::identifier) && Right.is(TT_TemplateString))
return false;
- }
if (Left.is(TT_TemplateString) && Left.opensScope())
return true;
}
@@ -4213,17 +4715,19 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
return false;
if (Left.isOneOf(TT_JavaAnnotation, TT_LeadingJavaAnnotation))
return !Right.is(tok::l_paren);
- if (Right.is(TT_PointerOrReference))
+ if (Right.is(TT_PointerOrReference)) {
return Line.IsMultiVariableDeclStmt ||
(getTokenPointerOrReferenceAlignment(Right) ==
FormatStyle::PAS_Right &&
(!Right.Next || Right.Next->isNot(TT_FunctionDeclarationName)));
+ }
if (Right.isOneOf(TT_StartOfName, TT_FunctionDeclarationName) ||
- Right.is(tok::kw_operator))
+ Right.is(tok::kw_operator)) {
return true;
+ }
if (Left.is(TT_PointerOrReference))
return false;
- if (Right.isTrailingComment())
+ if (Right.isTrailingComment()) {
// We rely on MustBreakBefore being set correctly here as we should not
// change the "binding" behavior of a comment.
// The first comment in a braced lists is always interpreted as belonging to
@@ -4232,6 +4736,7 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
return Left.is(BK_BracedInit) ||
(Left.is(TT_CtorInitializerColon) &&
Style.BreakConstructorInitializers == FormatStyle::BCIS_AfterColon);
+ }
if (Left.is(tok::question) && Right.is(tok::colon))
return false;
if (Right.is(TT_ConditionalExpr) || Right.is(tok::question))
@@ -4243,12 +4748,14 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
if (Right.is(TT_InheritanceColon))
return Style.BreakInheritanceList != FormatStyle::BILS_AfterColon;
if (Right.is(TT_ObjCMethodExpr) && !Right.is(tok::r_square) &&
- Left.isNot(TT_SelectorName))
+ Left.isNot(TT_SelectorName)) {
return true;
+ }
if (Right.is(tok::colon) &&
- !Right.isOneOf(TT_CtorInitializerColon, TT_InlineASMColon))
+ !Right.isOneOf(TT_CtorInitializerColon, TT_InlineASMColon)) {
return false;
+ }
if (Left.is(tok::colon) && Left.isOneOf(TT_DictLiteral, TT_ObjCMethodExpr)) {
if (Style.Language == FormatStyle::LK_Proto ||
Style.Language == FormatStyle::LK_TextProto) {
@@ -4280,53 +4787,79 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
// repeated: [ ... ]
if (((Right.is(tok::l_brace) || Right.is(tok::less)) &&
Right.is(TT_DictLiteral)) ||
- Right.is(TT_ArrayInitializerLSquare))
+ Right.is(TT_ArrayInitializerLSquare)) {
return false;
+ }
}
return true;
}
if (Right.is(tok::r_square) && Right.MatchingParen &&
- Right.MatchingParen->is(TT_ProtoExtensionLSquare))
+ Right.MatchingParen->is(TT_ProtoExtensionLSquare)) {
return false;
+ }
if (Right.is(TT_SelectorName) || (Right.is(tok::identifier) && Right.Next &&
- Right.Next->is(TT_ObjCMethodExpr)))
+ Right.Next->is(TT_ObjCMethodExpr))) {
return Left.isNot(tok::period); // FIXME: Properly parse ObjC calls.
+ }
if (Left.is(tok::r_paren) && Line.Type == LT_ObjCProperty)
return true;
+ if (Right.is(tok::kw_concept))
+ return Style.BreakBeforeConceptDeclarations != FormatStyle::BBCDS_Never;
+ if (Right.is(TT_RequiresClause))
+ return true;
if (Left.ClosesTemplateDeclaration || Left.is(TT_FunctionAnnotationRParen))
return true;
+ if (Left.ClosesRequiresClause)
+ return true;
if (Right.isOneOf(TT_RangeBasedForLoopColon, TT_OverloadedOperatorLParen,
- TT_OverloadedOperator))
+ TT_OverloadedOperator)) {
return false;
+ }
if (Left.is(TT_RangeBasedForLoopColon))
return true;
if (Right.is(TT_RangeBasedForLoopColon))
return false;
if (Left.is(TT_TemplateCloser) && Right.is(TT_TemplateOpener))
return true;
+ if ((Left.is(tok::greater) && Right.is(tok::greater)) ||
+ (Left.is(tok::less) && Right.is(tok::less))) {
+ return false;
+ }
+ if (Right.is(TT_BinaryOperator) &&
+ Style.BreakBeforeBinaryOperators != FormatStyle::BOS_None &&
+ (Style.BreakBeforeBinaryOperators == FormatStyle::BOS_All ||
+ Right.getPrecedence() != prec::Assignment)) {
+ return true;
+ }
if (Left.isOneOf(TT_TemplateCloser, TT_UnaryOperator) ||
- Left.is(tok::kw_operator))
+ Left.is(tok::kw_operator)) {
return false;
+ }
if (Left.is(tok::equal) && !Right.isOneOf(tok::kw_default, tok::kw_delete) &&
- Line.Type == LT_VirtualFunctionDecl && Left.NestingLevel == 0)
+ Line.Type == LT_VirtualFunctionDecl && Left.NestingLevel == 0) {
return false;
+ }
if (Left.is(tok::equal) && Right.is(tok::l_brace) &&
- !Style.Cpp11BracedListStyle)
+ !Style.Cpp11BracedListStyle) {
return false;
+ }
if (Left.is(tok::l_paren) &&
- Left.isOneOf(TT_AttributeParen, TT_TypeDeclarationParen))
+ Left.isOneOf(TT_AttributeParen, TT_TypeDeclarationParen)) {
return false;
+ }
if (Left.is(tok::l_paren) && Left.Previous &&
- (Left.Previous->isOneOf(TT_BinaryOperator, TT_CastRParen)))
+ (Left.Previous->isOneOf(TT_BinaryOperator, TT_CastRParen))) {
return false;
+ }
if (Right.is(TT_ImplicitStringLiteral))
return false;
if (Right.is(TT_TemplateCloser))
return false;
if (Right.is(tok::r_square) && Right.MatchingParen &&
- Right.MatchingParen->is(TT_LambdaLSquare))
+ Right.MatchingParen->is(TT_LambdaLSquare)) {
return false;
+ }
// We only break before r_brace if there was a corresponding break before
// the l_brace, which is tracked by BreakBeforeClosingBrace.
@@ -4335,25 +4868,25 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
// We only break before r_paren if we're in a block indented context.
if (Right.is(tok::r_paren)) {
- if (Style.AlignAfterOpenBracket == FormatStyle::BAS_BlockIndent) {
- return Right.MatchingParen &&
- !(Right.MatchingParen->Previous &&
- (Right.MatchingParen->Previous->is(tok::kw_for) ||
- Right.MatchingParen->Previous->isIf()));
+ if (Style.AlignAfterOpenBracket != FormatStyle::BAS_BlockIndent ||
+ !Right.MatchingParen) {
+ return false;
}
-
- return false;
+ const FormatToken *Previous = Right.MatchingParen->Previous;
+ return !(Previous && (Previous->is(tok::kw_for) || Previous->isIf()));
}
// Allow breaking after a trailing annotation, e.g. after a method
// declaration.
- if (Left.is(TT_TrailingAnnotation))
+ if (Left.is(TT_TrailingAnnotation)) {
return !Right.isOneOf(tok::l_brace, tok::semi, tok::equal, tok::l_paren,
tok::less, tok::coloncolon);
+ }
if (Right.is(tok::kw___attribute) ||
- (Right.is(tok::l_square) && Right.is(TT_AttributeSquare)))
+ (Right.is(tok::l_square) && Right.is(TT_AttributeSquare))) {
return !Left.is(TT_AttributeSquare);
+ }
if (Left.is(tok::identifier) && Right.is(tok::string_literal))
return true;
@@ -4366,25 +4899,21 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
if (Right.is(TT_CtorInitializerColon))
return Style.BreakConstructorInitializers != FormatStyle::BCIS_AfterColon;
if (Left.is(TT_CtorInitializerComma) &&
- Style.BreakConstructorInitializers == FormatStyle::BCIS_BeforeComma)
+ Style.BreakConstructorInitializers == FormatStyle::BCIS_BeforeComma) {
return false;
+ }
if (Right.is(TT_CtorInitializerComma) &&
- Style.BreakConstructorInitializers == FormatStyle::BCIS_BeforeComma)
+ Style.BreakConstructorInitializers == FormatStyle::BCIS_BeforeComma) {
return true;
+ }
if (Left.is(TT_InheritanceComma) &&
- Style.BreakInheritanceList == FormatStyle::BILS_BeforeComma)
+ Style.BreakInheritanceList == FormatStyle::BILS_BeforeComma) {
return false;
+ }
if (Right.is(TT_InheritanceComma) &&
- Style.BreakInheritanceList == FormatStyle::BILS_BeforeComma)
- return true;
- if ((Left.is(tok::greater) && Right.is(tok::greater)) ||
- (Left.is(tok::less) && Right.is(tok::less)))
- return false;
- if (Right.is(TT_BinaryOperator) &&
- Style.BreakBeforeBinaryOperators != FormatStyle::BOS_None &&
- (Style.BreakBeforeBinaryOperators == FormatStyle::BOS_All ||
- Right.getPrecedence() != prec::Assignment))
+ Style.BreakInheritanceList == FormatStyle::BILS_BeforeComma) {
return true;
+ }
if (Left.is(TT_ArrayInitializerLSquare))
return true;
if (Right.is(tok::kw_typename) && Left.isNot(tok::kw_const))
@@ -4393,11 +4922,13 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
!Left.isOneOf(tok::arrowstar, tok::lessless) &&
Style.BreakBeforeBinaryOperators != FormatStyle::BOS_All &&
(Style.BreakBeforeBinaryOperators == FormatStyle::BOS_None ||
- Left.getPrecedence() == prec::Assignment))
+ Left.getPrecedence() == prec::Assignment)) {
return true;
+ }
if ((Left.is(TT_AttributeSquare) && Right.is(tok::l_square)) ||
- (Left.is(tok::r_square) && Right.is(TT_AttributeSquare)))
+ (Left.is(tok::r_square) && Right.is(TT_AttributeSquare))) {
return false;
+ }
auto ShortLambdaOption = Style.AllowShortLambdasOnASingleLine;
if (Style.BraceWrapping.BeforeLambdaBody && Right.is(TT_LambdaLBrace)) {
@@ -4418,7 +4949,7 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
(Left.is(TT_TemplateOpener) && !Right.is(TT_TemplateCloser));
}
-void TokenAnnotator::printDebugInfo(const AnnotatedLine &Line) {
+void TokenAnnotator::printDebugInfo(const AnnotatedLine &Line) const {
llvm::errs() << "AnnotatedTokens(L=" << Line.Level << "):\n";
const FormatToken *Tok = Line.First;
while (Tok) {
@@ -4430,8 +4961,8 @@ void TokenAnnotator::printDebugInfo(const AnnotatedLine &Line) {
<< " BK=" << Tok->getBlockKind() << " P=" << Tok->SplitPenalty
<< " Name=" << Tok->Tok.getName() << " L=" << Tok->TotalLength
<< " PPK=" << Tok->getPackingKind() << " FakeLParens=";
- for (unsigned i = 0, e = Tok->FakeLParens.size(); i != e; ++i)
- llvm::errs() << Tok->FakeLParens[i] << "/";
+ for (prec::Level LParen : Tok->FakeLParens)
+ llvm::errs() << LParen << "/";
llvm::errs() << " FakeRParens=" << Tok->FakeRParens;
llvm::errs() << " II=" << Tok->Tok.getIdentifierInfo();
llvm::errs() << " Text='" << Tok->TokenText << "'\n";
@@ -4443,7 +4974,7 @@ void TokenAnnotator::printDebugInfo(const AnnotatedLine &Line) {
}
FormatStyle::PointerAlignmentStyle
-TokenAnnotator::getTokenReferenceAlignment(const FormatToken &Reference) {
+TokenAnnotator::getTokenReferenceAlignment(const FormatToken &Reference) const {
assert(Reference.isOneOf(tok::amp, tok::ampamp));
switch (Style.ReferenceAlignment) {
case FormatStyle::RAS_Pointer:
@@ -4461,7 +4992,7 @@ TokenAnnotator::getTokenReferenceAlignment(const FormatToken &Reference) {
FormatStyle::PointerAlignmentStyle
TokenAnnotator::getTokenPointerOrReferenceAlignment(
- const FormatToken &PointerOrReference) {
+ const FormatToken &PointerOrReference) const {
if (PointerOrReference.isOneOf(tok::amp, tok::ampamp)) {
switch (Style.ReferenceAlignment) {
case FormatStyle::RAS_Pointer:
diff --git a/contrib/llvm-project/clang/lib/Format/TokenAnnotator.h b/contrib/llvm-project/clang/lib/Format/TokenAnnotator.h
index 96e03967ff60..1be64ed6d3b5 100644
--- a/contrib/llvm-project/clang/lib/Format/TokenAnnotator.h
+++ b/contrib/llvm-project/clang/lib/Format/TokenAnnotator.h
@@ -76,6 +76,10 @@ public:
}
}
+ bool isComment() const {
+ return First && First->is(tok::comment) && !First->getNextNonComment();
+ }
+
/// \c true if this line starts with the given tokens in order, ignoring
/// comments.
template <typename... Ts> bool startsWith(Ts... Tokens) const {
@@ -157,43 +161,46 @@ public:
/// Adapts the indent levels of comment lines to the indent of the
/// subsequent line.
// FIXME: Can/should this be done in the UnwrappedLineParser?
- void setCommentLineLevels(SmallVectorImpl<AnnotatedLine *> &Lines);
+ void setCommentLineLevels(SmallVectorImpl<AnnotatedLine *> &Lines) const;
- void annotate(AnnotatedLine &Line);
- void calculateFormattingInformation(AnnotatedLine &Line);
+ void annotate(AnnotatedLine &Line) const;
+ void calculateFormattingInformation(AnnotatedLine &Line) const;
private:
/// Calculate the penalty for splitting before \c Tok.
unsigned splitPenalty(const AnnotatedLine &Line, const FormatToken &Tok,
- bool InFunctionDecl);
+ bool InFunctionDecl) const;
bool spaceRequiredBeforeParens(const FormatToken &Right) const;
bool spaceRequiredBetween(const AnnotatedLine &Line, const FormatToken &Left,
- const FormatToken &Right);
+ const FormatToken &Right) const;
- bool spaceRequiredBefore(const AnnotatedLine &Line, const FormatToken &Right);
+ bool spaceRequiredBefore(const AnnotatedLine &Line,
+ const FormatToken &Right) const;
- bool mustBreakBefore(const AnnotatedLine &Line, const FormatToken &Right);
+ bool mustBreakBefore(const AnnotatedLine &Line,
+ const FormatToken &Right) const;
- bool canBreakBefore(const AnnotatedLine &Line, const FormatToken &Right);
+ bool canBreakBefore(const AnnotatedLine &Line,
+ const FormatToken &Right) const;
bool mustBreakForReturnType(const AnnotatedLine &Line) const;
- void printDebugInfo(const AnnotatedLine &Line);
+ void printDebugInfo(const AnnotatedLine &Line) const;
- void calculateUnbreakableTailLengths(AnnotatedLine &Line);
+ void calculateUnbreakableTailLengths(AnnotatedLine &Line) const;
- void calculateArrayInitializerColumnList(AnnotatedLine &Line);
+ void calculateArrayInitializerColumnList(AnnotatedLine &Line) const;
FormatToken *calculateInitializerColumnList(AnnotatedLine &Line,
FormatToken *CurrentToken,
- unsigned Depth);
+ unsigned Depth) const;
FormatStyle::PointerAlignmentStyle
- getTokenReferenceAlignment(const FormatToken &PointerOrReference);
+ getTokenReferenceAlignment(const FormatToken &PointerOrReference) const;
- FormatStyle::PointerAlignmentStyle
- getTokenPointerOrReferenceAlignment(const FormatToken &PointerOrReference);
+ FormatStyle::PointerAlignmentStyle getTokenPointerOrReferenceAlignment(
+ const FormatToken &PointerOrReference) const;
const FormatStyle &Style;
diff --git a/contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.cpp b/contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.cpp
index 01c151fec132..22509a504246 100644
--- a/contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.cpp
+++ b/contrib/llvm-project/clang/lib/Format/UnwrappedLineFormatter.cpp
@@ -26,6 +26,11 @@ bool startsExternCBlock(const AnnotatedLine &Line) {
NextNext && NextNext->is(tok::l_brace);
}
+bool isRecordLBrace(const FormatToken &Tok) {
+ return Tok.isOneOf(TT_ClassLBrace, TT_EnumLBrace, TT_RecordLBrace,
+ TT_StructLBrace, TT_UnionLBrace);
+}
+
/// Tracks the indent level of \c AnnotatedLines across levels.
///
/// \c nextLine must be called for each \c AnnotatedLine, after which \c
@@ -87,8 +92,9 @@ public:
if (static_cast<int>(LevelIndent) - Offset >= 0)
LevelIndent -= Offset;
if ((!Line.First->is(tok::comment) || IndentForLevel[Line.Level] == -1) &&
- !Line.InPPDirective)
+ !Line.InPPDirective) {
IndentForLevel[Line.Level] = LevelIndent;
+ }
}
private:
@@ -98,25 +104,30 @@ private:
/// characters to the left from their level.
int getIndentOffset(const FormatToken &RootToken) {
if (Style.Language == FormatStyle::LK_Java || Style.isJavaScript() ||
- Style.isCSharp())
+ Style.isCSharp()) {
return 0;
+ }
auto IsAccessModifier = [this, &RootToken]() {
- if (RootToken.isAccessSpecifier(Style.isCpp()))
+ if (RootToken.isAccessSpecifier(Style.isCpp())) {
return true;
- else if (RootToken.isObjCAccessSpecifier())
+ } else if (RootToken.isObjCAccessSpecifier()) {
return true;
+ }
// Handle Qt signals.
else if ((RootToken.isOneOf(Keywords.kw_signals, Keywords.kw_qsignals) &&
- RootToken.Next && RootToken.Next->is(tok::colon)))
+ RootToken.Next && RootToken.Next->is(tok::colon))) {
return true;
- else if (RootToken.Next &&
- RootToken.Next->isOneOf(Keywords.kw_slots, Keywords.kw_qslots) &&
- RootToken.Next->Next && RootToken.Next->Next->is(tok::colon))
+ } else if (RootToken.Next &&
+ RootToken.Next->isOneOf(Keywords.kw_slots,
+ Keywords.kw_qslots) &&
+ RootToken.Next->Next && RootToken.Next->Next->is(tok::colon)) {
return true;
+ }
// Handle malformed access specifier e.g. 'private' without trailing ':'.
- else if (!RootToken.Next && RootToken.isAccessSpecifier(false))
+ else if (!RootToken.Next && RootToken.isAccessSpecifier(false)) {
return true;
+ }
return false;
};
@@ -200,12 +211,13 @@ public:
const AnnotatedLine *Current = *Next;
IndentTracker.nextLine(*Current);
unsigned MergedLines = tryFitMultipleLinesInOne(IndentTracker, Next, End);
- if (MergedLines > 0 && Style.ColumnLimit == 0)
+ if (MergedLines > 0 && Style.ColumnLimit == 0) {
// Disallow line merging if there is a break at the start of one of the
// input lines.
for (unsigned i = 0; i < MergedLines; ++i)
if (Next[i + 1]->First->NewlinesBefore > 0)
MergedLines = 0;
+ }
if (!DryRun)
for (unsigned i = 0; i < MergedLines; ++i)
join(*Next[0], *Next[i + 1]);
@@ -228,11 +240,13 @@ private:
const AnnotatedLine *TheLine = *I;
if (TheLine->Last->is(TT_LineComment))
return 0;
- if (I[1]->Type == LT_Invalid || I[1]->First->MustBreakBefore)
+ const auto &NextLine = *I[1];
+ if (NextLine.Type == LT_Invalid || NextLine.First->MustBreakBefore)
return 0;
if (TheLine->InPPDirective &&
- (!I[1]->InPPDirective || I[1]->First->HasUnescapedNewline))
+ (!NextLine.InPPDirective || NextLine.First->HasUnescapedNewline)) {
return 0;
+ }
if (Style.ColumnLimit > 0 && Indent > Style.ColumnLimit)
return 0;
@@ -248,30 +262,34 @@ private:
if (TheLine->Last->is(TT_FunctionLBrace) &&
TheLine->First == TheLine->Last &&
!Style.BraceWrapping.SplitEmptyFunction &&
- I[1]->First->is(tok::r_brace))
+ NextLine.First->is(tok::r_brace)) {
return tryMergeSimpleBlock(I, E, Limit);
+ }
- // Handle empty record blocks where the brace has already been wrapped
- if (TheLine->Last->is(tok::l_brace) && TheLine->First == TheLine->Last &&
- I != AnnotatedLines.begin()) {
- bool EmptyBlock = I[1]->First->is(tok::r_brace);
+ const auto *PreviousLine = I != AnnotatedLines.begin() ? I[-1] : nullptr;
+ // Handle empty record blocks where the brace has already been wrapped.
+ if (PreviousLine && TheLine->Last->is(tok::l_brace) &&
+ TheLine->First == TheLine->Last) {
+ bool EmptyBlock = NextLine.First->is(tok::r_brace);
- const FormatToken *Tok = I[-1]->First;
+ const FormatToken *Tok = PreviousLine->First;
if (Tok && Tok->is(tok::comment))
Tok = Tok->getNextNonComment();
- if (Tok && Tok->getNamespaceToken())
+ if (Tok && Tok->getNamespaceToken()) {
return !Style.BraceWrapping.SplitEmptyNamespace && EmptyBlock
? tryMergeSimpleBlock(I, E, Limit)
: 0;
+ }
if (Tok && Tok->is(tok::kw_typedef))
Tok = Tok->getNextNonComment();
if (Tok && Tok->isOneOf(tok::kw_class, tok::kw_struct, tok::kw_union,
- tok::kw_extern, Keywords.kw_interface))
+ tok::kw_extern, Keywords.kw_interface)) {
return !Style.BraceWrapping.SplitEmptyRecord && EmptyBlock
? tryMergeSimpleBlock(I, E, Limit)
: 0;
+ }
if (Tok && Tok->is(tok::kw_template) &&
Style.BraceWrapping.SplitEmptyRecord && EmptyBlock) {
@@ -279,48 +297,67 @@ private:
}
}
- auto ShouldMergeShortFunctions =
- [this, B = AnnotatedLines.begin()](
- SmallVectorImpl<AnnotatedLine *>::const_iterator I) {
- if (Style.AllowShortFunctionsOnASingleLine == FormatStyle::SFS_All)
- return true;
- if (Style.AllowShortFunctionsOnASingleLine >=
- FormatStyle::SFS_Empty &&
- I[1]->First->is(tok::r_brace))
- return true;
-
- if (Style.AllowShortFunctionsOnASingleLine &
- FormatStyle::SFS_InlineOnly) {
- // Just checking TheLine->Level != 0 is not enough, because it
- // provokes treating functions inside indented namespaces as short.
- if (Style.isJavaScript() && (*I)->Last->is(TT_FunctionLBrace))
- return true;
-
- if ((*I)->Level != 0) {
- if (I == B)
- return false;
-
- // TODO: Use IndentTracker to avoid loop?
- // Find the last line with lower level.
- auto J = I - 1;
- for (; J != B; --J)
- if ((*J)->Level < (*I)->Level)
- break;
-
- // Check if the found line starts a record.
- for (const FormatToken *RecordTok = (*J)->Last; RecordTok;
- RecordTok = RecordTok->Previous)
- if (RecordTok->is(tok::l_brace))
- return RecordTok->is(TT_RecordLBrace);
-
- return false;
+ auto ShouldMergeShortFunctions = [this, &I, &NextLine, PreviousLine,
+ TheLine]() {
+ if (Style.AllowShortFunctionsOnASingleLine == FormatStyle::SFS_All)
+ return true;
+ if (Style.AllowShortFunctionsOnASingleLine >= FormatStyle::SFS_Empty &&
+ NextLine.First->is(tok::r_brace)) {
+ return true;
+ }
+
+ if (Style.AllowShortFunctionsOnASingleLine &
+ FormatStyle::SFS_InlineOnly) {
+ // Just checking TheLine->Level != 0 is not enough, because it
+ // provokes treating functions inside indented namespaces as short.
+ if (Style.isJavaScript() && TheLine->Last->is(TT_FunctionLBrace))
+ return true;
+
+ if (TheLine->Level != 0) {
+ if (!PreviousLine)
+ return false;
+
+ // TODO: Use IndentTracker to avoid loop?
+ // Find the last line with lower level.
+ const AnnotatedLine *Line = nullptr;
+ for (auto J = I - 1; J >= AnnotatedLines.begin(); --J) {
+ assert(*J);
+ if (!(*J)->InPPDirective && !(*J)->isComment() &&
+ (*J)->Level < TheLine->Level) {
+ Line = *J;
+ break;
}
}
- return false;
- };
+ if (!Line)
+ return false;
+
+ // Check if the found line starts a record.
+ const FormatToken *LastNonComment = Line->Last;
+ assert(LastNonComment);
+ if (LastNonComment->is(tok::comment)) {
+ LastNonComment = LastNonComment->getPreviousNonComment();
+ // There must be another token (usually `{`), because we chose a
+ // non-PPDirective and non-comment line that has a smaller level.
+ assert(LastNonComment);
+ }
+ return isRecordLBrace(*LastNonComment);
+ }
+ }
+
+ return false;
+ };
- bool MergeShortFunctions = ShouldMergeShortFunctions(I);
+ bool MergeShortFunctions = ShouldMergeShortFunctions();
+
+ const FormatToken *FirstNonComment = TheLine->First;
+ if (FirstNonComment->is(tok::comment)) {
+ FirstNonComment = FirstNonComment->getNextNonComment();
+ if (!FirstNonComment)
+ return 0;
+ }
+ // FIXME: There are probably cases where we should use FirstNonComment
+ // instead of TheLine->First.
if (Style.CompactNamespaces) {
if (auto nsToken = TheLine->First->getNamespaceToken()) {
@@ -330,8 +367,8 @@ private:
nsToken->TokenText == getNamespaceTokenText(I[i + 1]) &&
closingLine == I[i + 1]->MatchingClosingBlockLineIndex &&
I[i + 1]->Last->TotalLength < Limit;
- i++, closingLine--) {
- // No extra indent for compacted namespaces
+ i++, --closingLine) {
+ // No extra indent for compacted namespaces.
IndentTracker.skipLine(*I[i + 1]);
Limit -= I[i + 1]->Last->TotalLength;
@@ -346,128 +383,143 @@ private:
nsToken->TokenText ==
getMatchingNamespaceTokenText(I[i + 1], AnnotatedLines) &&
openingLine == I[i + 1]->MatchingOpeningBlockLineIndex;
- i++, openingLine--) {
- // No space between consecutive braces
+ i++, --openingLine) {
+ // No space between consecutive braces.
I[i + 1]->First->SpacesRequiredBefore = !I[i]->Last->is(tok::r_brace);
- // Indent like the outer-most namespace
+ // Indent like the outer-most namespace.
IndentTracker.nextLine(*I[i + 1]);
}
return i;
}
}
- // Try to merge a function block with left brace unwrapped
- if (TheLine->Last->is(TT_FunctionLBrace) &&
- TheLine->First != TheLine->Last) {
+ // Try to merge a function block with left brace unwrapped.
+ if (TheLine->Last->is(TT_FunctionLBrace) && TheLine->First != TheLine->Last)
return MergeShortFunctions ? tryMergeSimpleBlock(I, E, Limit) : 0;
- }
- // Try to merge a control statement block with left brace unwrapped
- if (TheLine->Last->is(tok::l_brace) && TheLine->First != TheLine->Last &&
- TheLine->First->isOneOf(tok::kw_if, tok::kw_while, tok::kw_for,
- TT_ForEachMacro)) {
+ // Try to merge a control statement block with left brace unwrapped.
+ if (TheLine->Last->is(tok::l_brace) && FirstNonComment != TheLine->Last &&
+ FirstNonComment->isOneOf(tok::kw_if, tok::kw_while, tok::kw_for,
+ TT_ForEachMacro)) {
return Style.AllowShortBlocksOnASingleLine != FormatStyle::SBS_Never
? tryMergeSimpleBlock(I, E, Limit)
: 0;
}
- // Try to merge a control statement block with left brace wrapped
- if (I[1]->First->is(tok::l_brace) &&
- (TheLine->First->isOneOf(tok::kw_if, tok::kw_else, tok::kw_while,
- tok::kw_for, tok::kw_switch, tok::kw_try,
- tok::kw_do, TT_ForEachMacro) ||
- (TheLine->First->is(tok::r_brace) && TheLine->First->Next &&
- TheLine->First->Next->isOneOf(tok::kw_else, tok::kw_catch))) &&
- Style.BraceWrapping.AfterControlStatement ==
- FormatStyle::BWACS_MultiLine) {
- // If possible, merge the next line's wrapped left brace with the current
- // line. Otherwise, leave it on the next line, as this is a multi-line
- // control statement.
- return (Style.ColumnLimit == 0 ||
- TheLine->Last->TotalLength <= Style.ColumnLimit)
- ? 1
- : 0;
- } else if (I[1]->First->is(tok::l_brace) &&
- TheLine->First->isOneOf(tok::kw_if, tok::kw_else, tok::kw_while,
- tok::kw_for, TT_ForEachMacro)) {
- return (Style.BraceWrapping.AfterControlStatement ==
- FormatStyle::BWACS_Always)
- ? tryMergeSimpleBlock(I, E, Limit)
- : 0;
- } else if (I[1]->First->is(tok::l_brace) &&
- TheLine->First->isOneOf(tok::kw_else, tok::kw_catch) &&
- Style.BraceWrapping.AfterControlStatement ==
- FormatStyle::BWACS_MultiLine) {
- // This case if different from the upper BWACS_MultiLine processing
- // in that a preceding r_brace is not on the same line as else/catch
- // most likely because of BeforeElse/BeforeCatch set to true.
- // If the line length doesn't fit ColumnLimit, leave l_brace on the
- // next line to respect the BWACS_MultiLine.
- return (Style.ColumnLimit == 0 ||
- TheLine->Last->TotalLength <= Style.ColumnLimit)
- ? 1
- : 0;
+ // Try to merge a control statement block with left brace wrapped.
+ if (NextLine.First->is(tok::l_brace)) {
+ if ((TheLine->First->isOneOf(tok::kw_if, tok::kw_else, tok::kw_while,
+ tok::kw_for, tok::kw_switch, tok::kw_try,
+ tok::kw_do, TT_ForEachMacro) ||
+ (TheLine->First->is(tok::r_brace) && TheLine->First->Next &&
+ TheLine->First->Next->isOneOf(tok::kw_else, tok::kw_catch))) &&
+ Style.BraceWrapping.AfterControlStatement ==
+ FormatStyle::BWACS_MultiLine) {
+ // If possible, merge the next line's wrapped left brace with the
+ // current line. Otherwise, leave it on the next line, as this is a
+ // multi-line control statement.
+ return (Style.ColumnLimit == 0 || TheLine->Level * Style.IndentWidth +
+ TheLine->Last->TotalLength <=
+ Style.ColumnLimit)
+ ? 1
+ : 0;
+ }
+ if (TheLine->First->isOneOf(tok::kw_if, tok::kw_else, tok::kw_while,
+ tok::kw_for, TT_ForEachMacro)) {
+ return (Style.BraceWrapping.AfterControlStatement ==
+ FormatStyle::BWACS_Always)
+ ? tryMergeSimpleBlock(I, E, Limit)
+ : 0;
+ }
+ if (TheLine->First->isOneOf(tok::kw_else, tok::kw_catch) &&
+ Style.BraceWrapping.AfterControlStatement ==
+ FormatStyle::BWACS_MultiLine) {
+ // This case if different from the upper BWACS_MultiLine processing
+ // in that a preceding r_brace is not on the same line as else/catch
+ // most likely because of BeforeElse/BeforeCatch set to true.
+ // If the line length doesn't fit ColumnLimit, leave l_brace on the
+ // next line to respect the BWACS_MultiLine.
+ return (Style.ColumnLimit == 0 ||
+ TheLine->Last->TotalLength <= Style.ColumnLimit)
+ ? 1
+ : 0;
+ }
}
- // Don't merge block with left brace wrapped after ObjC special blocks
- if (TheLine->First->is(tok::l_brace) && I != AnnotatedLines.begin() &&
- I[-1]->First->is(tok::at) && I[-1]->First->Next) {
- tok::ObjCKeywordKind kwId = I[-1]->First->Next->Tok.getObjCKeywordID();
- if (kwId == clang::tok::objc_autoreleasepool ||
- kwId == clang::tok::objc_synchronized)
+ if (PreviousLine && TheLine->First->is(tok::l_brace)) {
+ switch (PreviousLine->First->Tok.getKind()) {
+ case tok::at:
+ // Don't merge block with left brace wrapped after ObjC special blocks.
+ if (PreviousLine->First->Next) {
+ tok::ObjCKeywordKind kwId =
+ PreviousLine->First->Next->Tok.getObjCKeywordID();
+ if (kwId == tok::objc_autoreleasepool ||
+ kwId == tok::objc_synchronized) {
+ return 0;
+ }
+ }
+ break;
+
+ case tok::kw_case:
+ case tok::kw_default:
+ // Don't merge block with left brace wrapped after case labels.
return 0;
+
+ default:
+ break;
+ }
}
- // Don't merge block with left brace wrapped after case labels
- if (TheLine->First->is(tok::l_brace) && I != AnnotatedLines.begin() &&
- I[-1]->First->isOneOf(tok::kw_case, tok::kw_default))
- return 0;
// Don't merge an empty template class or struct if SplitEmptyRecords
// is defined.
- if (Style.BraceWrapping.SplitEmptyRecord &&
- TheLine->Last->is(tok::l_brace) && I != AnnotatedLines.begin() &&
- I[-1]->Last) {
- const FormatToken *Previous = I[-1]->Last;
+ if (PreviousLine && Style.BraceWrapping.SplitEmptyRecord &&
+ TheLine->Last->is(tok::l_brace) && PreviousLine->Last) {
+ const FormatToken *Previous = PreviousLine->Last;
if (Previous) {
if (Previous->is(tok::comment))
Previous = Previous->getPreviousNonComment();
if (Previous) {
- if (Previous->is(tok::greater) && !I[-1]->InPPDirective)
+ if (Previous->is(tok::greater) && !PreviousLine->InPPDirective)
return 0;
if (Previous->is(tok::identifier)) {
const FormatToken *PreviousPrevious =
Previous->getPreviousNonComment();
if (PreviousPrevious &&
- PreviousPrevious->isOneOf(tok::kw_class, tok::kw_struct))
+ PreviousPrevious->isOneOf(tok::kw_class, tok::kw_struct)) {
return 0;
+ }
}
}
}
}
- // Try to merge a block with left brace unwrapped that wasn't yet covered
if (TheLine->Last->is(tok::l_brace)) {
- const FormatToken *Tok = TheLine->First;
bool ShouldMerge = false;
- if (Tok->is(tok::kw_typedef)) {
- Tok = Tok->getNextNonComment();
- assert(Tok);
- }
- if (Tok->isOneOf(tok::kw_class, tok::kw_struct)) {
+ // Try to merge records.
+ if (TheLine->Last->is(TT_EnumLBrace)) {
+ ShouldMerge = Style.AllowShortEnumsOnASingleLine;
+ } else if (TheLine->Last->isOneOf(TT_ClassLBrace, TT_StructLBrace)) {
+ // NOTE: We use AfterClass (whereas AfterStruct exists) for both classes
+ // and structs, but it seems that wrapping is still handled correctly
+ // elsewhere.
ShouldMerge = !Style.BraceWrapping.AfterClass ||
- (I[1]->First->is(tok::r_brace) &&
+ (NextLine.First->is(tok::r_brace) &&
!Style.BraceWrapping.SplitEmptyRecord);
- } else if (Tok->is(tok::kw_enum)) {
- ShouldMerge = Style.AllowShortEnumsOnASingleLine;
} else {
+ // Try to merge a block with left brace unwrapped that wasn't yet
+ // covered.
+ assert(TheLine->InPPDirective ||
+ !TheLine->First->isOneOf(tok::kw_class, tok::kw_enum,
+ tok::kw_struct));
ShouldMerge = !Style.BraceWrapping.AfterFunction ||
- (I[1]->First->is(tok::r_brace) &&
+ (NextLine.First->is(tok::r_brace) &&
!Style.BraceWrapping.SplitEmptyFunction);
}
return ShouldMerge ? tryMergeSimpleBlock(I, E, Limit) : 0;
}
- // Try to merge a function block with left brace wrapped
- if (I[1]->First->is(TT_FunctionLBrace) &&
+
+ // Try to merge a function block with left brace wrapped.
+ if (NextLine.First->is(TT_FunctionLBrace) &&
Style.BraceWrapping.AfterFunction) {
- if (I[1]->Last->is(TT_LineComment))
+ if (NextLine.Last->is(TT_LineComment))
return 0;
// Check for Limit <= 2 to account for the " {".
@@ -478,7 +530,7 @@ private:
unsigned MergedLines = 0;
if (MergeShortFunctions ||
(Style.AllowShortFunctionsOnASingleLine >= FormatStyle::SFS_Empty &&
- I[1]->First == I[1]->Last && I + 2 != E &&
+ NextLine.First == NextLine.Last && I + 2 != E &&
I[2]->First->is(tok::r_brace))) {
MergedLines = tryMergeSimpleBlock(I + 1, E, Limit);
// If we managed to merge the block, count the function header, which is
@@ -542,16 +594,19 @@ private:
if (Style.BraceWrapping.AfterControlStatement ==
FormatStyle::BWACS_Always &&
I[1]->First->is(tok::l_brace) &&
- Style.AllowShortBlocksOnASingleLine == FormatStyle::SBS_Never)
+ Style.AllowShortBlocksOnASingleLine == FormatStyle::SBS_Never) {
return 0;
+ }
if (I[1]->InPPDirective != (*I)->InPPDirective ||
- (I[1]->InPPDirective && I[1]->First->HasUnescapedNewline))
+ (I[1]->InPPDirective && I[1]->First->HasUnescapedNewline)) {
return 0;
+ }
Limit = limitConsideringMacros(I + 1, E, Limit);
AnnotatedLine &Line = **I;
if (!Line.First->is(tok::kw_do) && !Line.First->is(tok::kw_else) &&
- !Line.Last->is(tok::kw_else) && Line.Last->isNot(tok::r_paren))
+ !Line.Last->is(tok::kw_else) && Line.Last->isNot(tok::r_paren)) {
return 0;
+ }
// Only merge `do while` if `do` is the only statement on the line.
if (Line.First->is(tok::kw_do) && !Line.Last->is(tok::kw_do))
return 0;
@@ -559,14 +614,16 @@ private:
return 0;
// Don't merge with loops, ifs, a single semicolon or a line comment.
if (I[1]->First->isOneOf(tok::semi, tok::kw_if, tok::kw_for, tok::kw_while,
- TT_ForEachMacro, TT_LineComment))
+ TT_ForEachMacro, TT_LineComment)) {
return 0;
+ }
// Only inline simple if's (no nested if or else), unless specified
if (Style.AllowShortIfStatementsOnASingleLine ==
FormatStyle::SIS_WithoutElse) {
if (I + 2 != E && Line.startsWith(tok::kw_if) &&
- I[2]->First->is(tok::kw_else))
+ I[2]->First->is(tok::kw_else)) {
return 0;
+ }
}
return 1;
}
@@ -576,8 +633,9 @@ private:
SmallVectorImpl<AnnotatedLine *>::const_iterator E,
unsigned Limit) {
if (Limit == 0 || I + 1 == E ||
- I[1]->First->isOneOf(tok::kw_case, tok::kw_default))
+ I[1]->First->isOneOf(tok::kw_case, tok::kw_default)) {
return 0;
+ }
if (I[0]->Last->is(tok::l_brace) || I[1]->First->is(tok::l_brace))
return 0;
unsigned NumStmts = 0;
@@ -595,8 +653,9 @@ private:
break;
if (Line->First->isOneOf(tok::kw_if, tok::kw_for, tok::kw_switch,
tok::kw_while) ||
- EndsWithComment)
+ EndsWithComment) {
return 0;
+ }
if (Line->First->is(tok::comment)) {
if (Level != Line->Level)
return 0;
@@ -625,20 +684,26 @@ private:
tryMergeSimpleBlock(SmallVectorImpl<AnnotatedLine *>::const_iterator I,
SmallVectorImpl<AnnotatedLine *>::const_iterator E,
unsigned Limit) {
+ // Don't merge with a preprocessor directive.
+ if (I[1]->Type == LT_PreprocessorDirective)
+ return 0;
+
AnnotatedLine &Line = **I;
// Don't merge ObjC @ keywords and methods.
// FIXME: If an option to allow short exception handling clauses on a single
// line is added, change this to not return for @try and friends.
if (Style.Language != FormatStyle::LK_Java &&
- Line.First->isOneOf(tok::at, tok::minus, tok::plus))
+ Line.First->isOneOf(tok::at, tok::minus, tok::plus)) {
return 0;
+ }
// Check that the current line allows merging. This depends on whether we
// are in a control flow statements as well as several style flags.
if (Line.First->is(tok::kw_case) ||
- (Line.First->Next && Line.First->Next->is(tok::kw_else)))
+ (Line.First->Next && Line.First->Next->is(tok::kw_else))) {
return 0;
+ }
// default: in switch statement
if (Line.First->is(tok::kw_default)) {
const FormatToken *Tok = Line.First->getNextNonComment();
@@ -652,49 +717,60 @@ private:
if (Style.AllowShortBlocksOnASingleLine == FormatStyle::SBS_Never)
return 0;
if (Style.AllowShortBlocksOnASingleLine == FormatStyle::SBS_Empty &&
- !I[1]->First->is(tok::r_brace))
+ !I[1]->First->is(tok::r_brace)) {
return 0;
+ }
// Don't merge when we can't except the case when
// the control statement block is empty
if (!Style.AllowShortIfStatementsOnASingleLine &&
Line.First->isOneOf(tok::kw_if, tok::kw_else) &&
!Style.BraceWrapping.AfterControlStatement &&
- !I[1]->First->is(tok::r_brace))
+ !I[1]->First->is(tok::r_brace)) {
return 0;
+ }
if (!Style.AllowShortIfStatementsOnASingleLine &&
Line.First->isOneOf(tok::kw_if, tok::kw_else) &&
Style.BraceWrapping.AfterControlStatement ==
FormatStyle::BWACS_Always &&
- I + 2 != E && !I[2]->First->is(tok::r_brace))
+ I + 2 != E && !I[2]->First->is(tok::r_brace)) {
return 0;
+ }
if (!Style.AllowShortLoopsOnASingleLine &&
Line.First->isOneOf(tok::kw_while, tok::kw_do, tok::kw_for,
TT_ForEachMacro) &&
!Style.BraceWrapping.AfterControlStatement &&
- !I[1]->First->is(tok::r_brace))
+ !I[1]->First->is(tok::r_brace)) {
return 0;
+ }
if (!Style.AllowShortLoopsOnASingleLine &&
Line.First->isOneOf(tok::kw_while, tok::kw_do, tok::kw_for,
TT_ForEachMacro) &&
Style.BraceWrapping.AfterControlStatement ==
FormatStyle::BWACS_Always &&
- I + 2 != E && !I[2]->First->is(tok::r_brace))
+ I + 2 != E && !I[2]->First->is(tok::r_brace)) {
return 0;
+ }
// FIXME: Consider an option to allow short exception handling clauses on
// a single line.
// FIXME: This isn't covered by tests.
// FIXME: For catch, __except, __finally the first token on the line
// is '}', so this isn't correct here.
if (Line.First->isOneOf(tok::kw_try, tok::kw___try, tok::kw_catch,
- Keywords.kw___except, tok::kw___finally))
+ Keywords.kw___except, tok::kw___finally)) {
return 0;
+ }
}
if (Line.Last->is(tok::l_brace)) {
FormatToken *Tok = I[1]->First;
- if (Tok->is(tok::r_brace) && !Tok->MustBreakBefore &&
- (Tok->getNextNonComment() == nullptr ||
- Tok->getNextNonComment()->is(tok::semi))) {
+ auto ShouldMerge = [Tok]() {
+ if (Tok->isNot(tok::r_brace) || Tok->MustBreakBefore)
+ return false;
+ const FormatToken *Next = Tok->getNextNonComment();
+ return !Next || Next->is(tok::semi);
+ };
+
+ if (ShouldMerge()) {
// We merge empty blocks even if the line exceeds the column limit.
Tok->SpacesRequiredBefore = Style.SpaceInEmptyBlock ? 1 : 0;
Tok->CanBreakBefore = true;
@@ -702,18 +778,7 @@ private:
} else if (Limit != 0 && !Line.startsWithNamespace() &&
!startsExternCBlock(Line)) {
// We don't merge short records.
- FormatToken *RecordTok = Line.First;
- // Skip record modifiers.
- while (RecordTok->Next &&
- RecordTok->isOneOf(tok::kw_typedef, tok::kw_export,
- Keywords.kw_declare, Keywords.kw_abstract,
- tok::kw_default, Keywords.kw_override,
- tok::kw_public, tok::kw_private,
- tok::kw_protected, Keywords.kw_internal))
- RecordTok = RecordTok->Next;
- if (RecordTok &&
- RecordTok->isOneOf(tok::kw_class, tok::kw_union, tok::kw_struct,
- Keywords.kw_interface))
+ if (isRecordLBrace(*Line.Last))
return 0;
// Check that we still have three lines and they fit into the limit.
@@ -751,8 +816,9 @@ private:
// }
if (Line.First == Line.Last && Line.First->isNot(TT_FunctionLBrace) &&
Style.BraceWrapping.AfterControlStatement ==
- FormatStyle::BWACS_MultiLine)
+ FormatStyle::BWACS_MultiLine) {
return 0;
+ }
return 2;
}
@@ -800,10 +866,9 @@ private:
}
bool containsMustBreak(const AnnotatedLine *Line) {
- for (const FormatToken *Tok = Line->First; Tok; Tok = Tok->Next) {
+ for (const FormatToken *Tok = Line->First; Tok; Tok = Tok->Next)
if (Tok->MustBreakBefore)
return true;
- }
return false;
}
@@ -891,10 +956,11 @@ protected:
const FormatToken *LBrace = State.NextToken->getPreviousNonComment();
FormatToken &Previous = *State.NextToken->Previous;
if (!LBrace || LBrace->isNot(tok::l_brace) || LBrace->isNot(BK_Block) ||
- Previous.Children.size() == 0)
+ Previous.Children.size() == 0) {
// The previous token does not open a block. Nothing to do. We don't
// assert so that we can simply call this function for all tokens.
return true;
+ }
if (NewLine) {
const ParenState &P = State.Stack.back();
@@ -937,8 +1003,9 @@ protected:
// If the child line exceeds the column limit, we wouldn't want to merge it.
// We add +2 for the trailing " }".
if (Style.ColumnLimit > 0 &&
- Child->Last->TotalLength + State.Column + 2 > Style.ColumnLimit)
+ Child->Last->TotalLength + State.Column + 2 > Style.ColumnLimit) {
return false;
+ }
if (!DryRun) {
Whitespaces->replaceWhitespace(
@@ -1096,6 +1163,10 @@ private:
// While not empty, take first element and follow edges.
while (!Queue.empty()) {
+ // Quit if we still haven't found a solution by now.
+ if (Count > 25000000)
+ return 0;
+
Penalty = Queue.top().first.first;
StateNode *Node = Queue.top().second;
if (!Node->State.NextToken) {
@@ -1110,9 +1181,10 @@ private:
if (Count > 50000)
Node->State.IgnoreStackForComparison = true;
- if (!Seen.insert(&Node->State).second)
+ if (!Seen.insert(&Node->State).second) {
// State already examined with lower penalty.
continue;
+ }
FormatDecision LastFormat = Node->State.NextToken->getDecision();
if (LastFormat == FD_Unformatted || LastFormat == FD_Continue)
@@ -1261,26 +1333,28 @@ unsigned UnwrappedLineFormatter::format(
(!Style.isJavaScript() || !Style.JavaScriptWrapImports)) ||
(Style.isCSharp() &&
TheLine.InPPDirective); // don't split #regions in C#
- if (Style.ColumnLimit == 0)
+ if (Style.ColumnLimit == 0) {
NoColumnLimitLineFormatter(Indenter, Whitespaces, Style, this)
.formatLine(TheLine, NextStartColumn + Indent,
FirstLine ? FirstStartColumn : 0, DryRun);
- else if (FitsIntoOneLine)
+ } else if (FitsIntoOneLine) {
Penalty += NoLineBreakFormatter(Indenter, Whitespaces, Style, this)
.formatLine(TheLine, NextStartColumn + Indent,
FirstLine ? FirstStartColumn : 0, DryRun);
- else
+ } else {
Penalty += OptimizingLineFormatter(Indenter, Whitespaces, Style, this)
.formatLine(TheLine, NextStartColumn + Indent,
FirstLine ? FirstStartColumn : 0, DryRun);
+ }
RangeMinLevel = std::min(RangeMinLevel, TheLine.Level);
} else {
// If no token in the current line is affected, we still need to format
// affected children.
- if (TheLine.ChildrenAffected)
+ if (TheLine.ChildrenAffected) {
for (const FormatToken *Tok = TheLine.First; Tok; Tok = Tok->Next)
if (!Tok->Children.empty())
format(Tok->Children, DryRun);
+ }
// Adapt following lines on the current indent level to the same level
// unless the current \c AnnotatedLine is not at the beginning of a line.
@@ -1293,13 +1367,14 @@ unsigned UnwrappedLineFormatter::format(
StartsNewLine && ((PreviousLine && PreviousLine->Affected) ||
TheLine.LeadingEmptyLinesAffected);
// Format the first token.
- if (ReformatLeadingWhitespace)
+ if (ReformatLeadingWhitespace) {
formatFirstToken(TheLine, PreviousLine, PrevPrevLine, Lines,
TheLine.First->OriginalColumn,
TheLine.First->OriginalColumn);
- else
+ } else {
Whitespaces->addUntouchableToken(*TheLine.First,
TheLine.InPPDirective);
+ }
// Notify the WhitespaceManager about the unchanged whitespace.
for (FormatToken *Tok = TheLine.First->Next; Tok; Tok = Tok->Next)
@@ -1335,8 +1410,9 @@ void UnwrappedLineFormatter::formatFirstToken(
(!RootToken.Next ||
(RootToken.Next->is(tok::semi) && !RootToken.Next->Next)) &&
// Do not remove empty lines before namespace closing "}".
- !getNamespaceToken(&Line, Lines))
+ !getNamespaceToken(&Line, Lines)) {
Newlines = std::min(Newlines, 1u);
+ }
// Remove empty lines at the start of nested blocks (lambdas/arrow functions)
if (PreviousLine == nullptr && Line.Level > 0)
Newlines = std::min(Newlines, 1u);
@@ -1351,8 +1427,9 @@ void UnwrappedLineFormatter::formatFirstToken(
!PreviousLine->startsWithNamespace() &&
!(PrevPrevLine && PrevPrevLine->startsWithNamespace() &&
PreviousLine->startsWith(tok::l_brace)) &&
- !startsExternCBlock(*PreviousLine))
+ !startsExternCBlock(*PreviousLine)) {
Newlines = 1;
+ }
// Insert or remove empty line before access specifiers.
if (PreviousLine && RootToken.isAccessSpecifier()) {
@@ -1408,11 +1485,14 @@ void UnwrappedLineFormatter::formatFirstToken(
if (Newlines)
Indent = NewlineIndent;
- // Preprocessor directives get indented before the hash only if specified
- if (Style.IndentPPDirectives != FormatStyle::PPDIS_BeforeHash &&
+ // Preprocessor directives get indented before the hash only if specified. In
+ // Javascript import statements are indented like normal statements.
+ if (!Style.isJavaScript() &&
+ Style.IndentPPDirectives != FormatStyle::PPDIS_BeforeHash &&
(Line.Type == LT_PreprocessorDirective ||
- Line.Type == LT_ImportStatement))
+ Line.Type == LT_ImportStatement)) {
Indent = 0;
+ }
Whitespaces->replaceWhitespace(RootToken, Newlines, Indent, Indent,
/*IsAligned=*/false,
diff --git a/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp b/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp
index 2e2293c6d58b..d3383292f7a3 100644
--- a/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp
+++ b/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.cpp
@@ -20,6 +20,7 @@
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
+#include <utility>
#define DEBUG_TYPE "format-parser"
@@ -41,6 +42,12 @@ public:
// getNextToken().
virtual FormatToken *peekNextToken() = 0;
+ // Returns the token that would be returned after the next N calls to
+ // getNextToken(). N needs to be greater than zero, and small enough that
+ // there are still tokens. Check for tok::eof with N-1 before calling it with
+ // N.
+ virtual FormatToken *peekNextToken(int N) = 0;
+
// Returns whether we are at the end of the file.
// This can be different from whether getNextToken() returned an eof token
// when the FormatTokenSource is a view on a part of the token stream.
@@ -138,6 +145,13 @@ public:
return PreviousTokenSource->peekNextToken();
}
+ FormatToken *peekNextToken(int N) override {
+ assert(N > 0);
+ if (eof())
+ return &FakeEOF;
+ return PreviousTokenSource->peekNextToken(N);
+ }
+
bool isEOF() override { return PreviousTokenSource->isEOF(); }
unsigned getPosition() override { return PreviousTokenSource->getPosition(); }
@@ -184,9 +198,8 @@ public:
}
~ScopedLineState() {
- if (!Parser.Line->Tokens.empty()) {
+ if (!Parser.Line->Tokens.empty())
Parser.addUnwrappedLine();
- }
assert(Parser.Line->Tokens.empty());
Parser.Line = std::move(PreBlockLine);
if (Parser.CurrentLines == &Parser.PreprocessorDirectives)
@@ -259,6 +272,16 @@ public:
return Tokens[Next];
}
+ FormatToken *peekNextToken(int N) override {
+ assert(N > 0);
+ int Next = Position + N;
+ LLVM_DEBUG({
+ llvm::dbgs() << "Peeking (+" << (N - 1) << ") ";
+ dbgToken(Next);
+ });
+ return Tokens[Next];
+ }
+
bool isEOF() override { return Tokens[Position]->is(tok::eof); }
unsigned getPosition() override {
@@ -335,10 +358,11 @@ void UnwrappedLineParser::parse() {
// If we found an include guard then all preprocessor directives (other than
// the guard) are over-indented by one.
- if (IncludeGuard == IG_Found)
+ if (IncludeGuard == IG_Found) {
for (auto &Line : Lines)
if (Line.InPPDirective && Line.Level > 0)
--Line.Level;
+ }
// Create line with eof token.
pushToken(FormatTok);
@@ -371,7 +395,7 @@ void UnwrappedLineParser::parseFile() {
if (Style.Language == FormatStyle::LK_TextProto)
parseBracedList();
else
- parseLevel(/*HasOpeningBrace=*/false);
+ parseLevel();
// Make sure to format the remaining tokens.
//
// LK_TextProto is special since its top-level is parsed as the body of a
@@ -383,8 +407,9 @@ void UnwrappedLineParser::parseFile() {
// do not have a chance to be put on a line of their own until this point.
// Here we add this newline before end-of-file comments.
if (Style.Language == FormatStyle::LK_TextProto &&
- !CommentsBeforeNextToken.empty())
+ !CommentsBeforeNextToken.empty()) {
addUnwrappedLine();
+ }
flushComments(true);
addUnwrappedLine();
}
@@ -439,46 +464,50 @@ bool UnwrappedLineParser::precededByCommentOrPPDirective() const {
(Previous->IsMultiline || Previous->NewlinesBefore > 0);
}
-bool UnwrappedLineParser::mightFitOnOneLine() const {
- const auto ColumnLimit = Style.ColumnLimit;
- if (ColumnLimit == 0)
- return true;
-
- if (Lines.empty())
- return true;
-
- const auto &PreviousLine = Lines.back();
- const auto &Tokens = PreviousLine.Tokens;
- assert(!Tokens.empty());
- const auto *LastToken = Tokens.back().Tok;
- assert(LastToken);
- if (!LastToken->isOneOf(tok::semi, tok::comment))
- return true;
-
- AnnotatedLine Line(PreviousLine);
- assert(Line.Last == LastToken);
-
- TokenAnnotator Annotator(Style, Keywords);
- Annotator.annotate(Line);
- Annotator.calculateFormattingInformation(Line);
-
- return Line.Level * Style.IndentWidth + LastToken->TotalLength <= ColumnLimit;
-}
-
-// Returns true if a simple block, or false otherwise. (A simple block has a
-// single statement that fits on a single line.)
-bool UnwrappedLineParser::parseLevel(bool HasOpeningBrace, IfStmtKind *IfKind) {
+/// \brief Parses a level, that is ???.
+/// \param OpeningBrace Opening brace (\p nullptr if absent) of that level
+/// \param CanContainBracedList If the content can contain (at any level) a
+/// braced list.
+/// \param NextLBracesType The type for left brace found in this level.
+/// \param IfKind The \p if statement kind in the level.
+/// \param IfLeftBrace The left brace of the \p if block in the level.
+/// \returns true if a simple block of if/else/for/while, or false otherwise.
+/// (A simple block has a single statement.)
+bool UnwrappedLineParser::parseLevel(const FormatToken *OpeningBrace,
+ bool CanContainBracedList,
+ TokenType NextLBracesType,
+ IfStmtKind *IfKind,
+ FormatToken **IfLeftBrace) {
+ auto NextLevelLBracesType = NextLBracesType == TT_CompoundRequirementLBrace
+ ? TT_BracedListLBrace
+ : TT_Unknown;
const bool IsPrecededByCommentOrPPDirective =
!Style.RemoveBracesLLVM || precededByCommentOrPPDirective();
+ FormatToken *IfLBrace = nullptr;
+ bool HasDoWhile = false;
+ bool HasLabel = false;
unsigned StatementCount = 0;
bool SwitchLabelEncountered = false;
+
do {
+ if (FormatTok->getType() == TT_AttributeMacro) {
+ nextToken();
+ continue;
+ }
tok::TokenKind kind = FormatTok->Tok.getKind();
- if (FormatTok->getType() == TT_MacroBlockBegin) {
+ if (FormatTok->getType() == TT_MacroBlockBegin)
kind = tok::l_brace;
- } else if (FormatTok->getType() == TT_MacroBlockEnd) {
+ else if (FormatTok->getType() == TT_MacroBlockEnd)
kind = tok::r_brace;
- }
+
+ auto ParseDefault = [this, OpeningBrace, NextLevelLBracesType, IfKind,
+ &IfLBrace, &HasDoWhile, &HasLabel, &StatementCount] {
+ parseStructuralElement(!OpeningBrace, NextLevelLBracesType, IfKind,
+ &IfLBrace, HasDoWhile ? nullptr : &HasDoWhile,
+ HasLabel ? nullptr : &HasLabel);
+ ++StatementCount;
+ assert(StatementCount > 0 && "StatementCount overflow!");
+ };
switch (kind) {
case tok::comment:
@@ -486,28 +515,44 @@ bool UnwrappedLineParser::parseLevel(bool HasOpeningBrace, IfStmtKind *IfKind) {
addUnwrappedLine();
break;
case tok::l_brace:
- // FIXME: Add parameter whether this can happen - if this happens, we must
- // be in a non-declaration context.
- if (!FormatTok->is(TT_MacroBlockBegin) && tryToParseBracedList())
+ if (NextLBracesType != TT_Unknown) {
+ FormatTok->setFinalizedType(NextLBracesType);
+ } else if (FormatTok->Previous &&
+ FormatTok->Previous->ClosesRequiresClause) {
+ // We need the 'default' case here to correctly parse a function
+ // l_brace.
+ ParseDefault();
continue;
- parseBlock();
+ }
+ if (CanContainBracedList && !FormatTok->is(TT_MacroBlockBegin) &&
+ tryToParseBracedList()) {
+ continue;
+ }
+ parseBlock(/*MustBeDeclaration=*/false, /*AddLevels=*/1u,
+ /*MunchSemi=*/true, /*KeepBraces=*/true, /*IfKind=*/nullptr,
+ /*UnindentWhitesmithsBraces=*/false, CanContainBracedList,
+ NextLBracesType);
++StatementCount;
assert(StatementCount > 0 && "StatementCount overflow!");
addUnwrappedLine();
break;
case tok::r_brace:
- if (HasOpeningBrace) {
- if (!Style.RemoveBracesLLVM)
+ if (OpeningBrace) {
+ if (!Style.RemoveBracesLLVM ||
+ !OpeningBrace->isOneOf(TT_ControlStatementLBrace, TT_ElseLBrace)) {
return false;
- if (FormatTok->isNot(tok::r_brace) || StatementCount != 1 ||
- IsPrecededByCommentOrPPDirective ||
+ }
+ if (FormatTok->isNot(tok::r_brace) || StatementCount != 1 || HasLabel ||
+ HasDoWhile || IsPrecededByCommentOrPPDirective ||
precededByCommentOrPPDirective()) {
return false;
}
const FormatToken *Next = Tokens->peekNextToken();
if (Next->is(tok::comment) && Next->NewlinesBefore == 0)
return false;
- return mightFitOnOneLine();
+ if (IfLeftBrace)
+ *IfLeftBrace = IfLBrace;
+ return true;
}
nextToken();
addUnwrappedLine();
@@ -517,9 +562,10 @@ bool UnwrappedLineParser::parseLevel(bool HasOpeningBrace, IfStmtKind *IfKind) {
FormatToken *Next;
do {
Next = Tokens->getNextToken();
+ assert(Next);
} while (Next->is(tok::comment));
FormatTok = Tokens->setPosition(StoredPosition);
- if (Next && Next->isNot(tok::colon)) {
+ if (Next->isNot(tok::colon)) {
// default not followed by ':' is not a case label; treat it like
// an identifier.
parseStructuralElement();
@@ -535,8 +581,10 @@ bool UnwrappedLineParser::parseLevel(bool HasOpeningBrace, IfStmtKind *IfKind) {
break;
}
if (!SwitchLabelEncountered &&
- (Style.IndentCaseLabels || (Line->InPPDirective && Line->Level == 1)))
+ (Style.IndentCaseLabels ||
+ (Line->InPPDirective && Line->Level == 1))) {
++Line->Level;
+ }
SwitchLabelEncountered = true;
parseStructuralElement();
break;
@@ -546,14 +594,15 @@ bool UnwrappedLineParser::parseLevel(bool HasOpeningBrace, IfStmtKind *IfKind) {
parseCSharpAttribute();
break;
}
+ if (handleCppAttributes())
+ break;
LLVM_FALLTHROUGH;
default:
- parseStructuralElement(IfKind, !HasOpeningBrace);
- ++StatementCount;
- assert(StatementCount > 0 && "StatementCount overflow!");
+ ParseDefault();
break;
}
} while (!eof());
+
return false;
}
@@ -569,20 +618,18 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
// update information about whether an lbrace starts a
// braced init list or a different block during the loop.
SmallVector<FormatToken *, 8> LBraceStack;
- assert(Tok->Tok.is(tok::l_brace));
+ assert(Tok->is(tok::l_brace));
do {
// Get next non-comment token.
FormatToken *NextTok;
- unsigned ReadTokens = 0;
do {
NextTok = Tokens->getNextToken();
- ++ReadTokens;
} while (NextTok->is(tok::comment));
switch (Tok->Tok.getKind()) {
case tok::l_brace:
if (Style.isJavaScript() && PrevTok) {
- if (PrevTok->isOneOf(tok::colon, tok::less))
+ if (PrevTok->isOneOf(tok::colon, tok::less)) {
// A ':' indicates this code is in a type, or a braced list
// following a label in an object literal ({a: {b: 1}}).
// A '<' could be an object used in a comparison, but that is nonsense
@@ -593,9 +640,10 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
// trigger BK_Block. In both cases, this must be parsed as an inline
// braced init.
Tok->setBlockKind(BK_BracedInit);
- else if (PrevTok->is(tok::r_paren))
+ } else if (PrevTok->is(tok::r_paren)) {
// `) { }` can only occur in function or method declarations in JS.
Tok->setBlockKind(BK_Block);
+ }
} else {
Tok->setBlockKind(BK_Unknown);
}
@@ -616,7 +664,6 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
ScopedMacroState MacroState(*Line, Tokens, NextTok);
do {
NextTok = Tokens->getNextToken();
- ++ReadTokens;
} while (NextTok->isNot(tok::eof));
}
@@ -625,32 +672,50 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
bool NextIsObjCMethod = NextTok->isOneOf(tok::plus, tok::minus) &&
NextTok->OriginalColumn == 0;
+ // Try to detect a braced list. Note that regardless how we mark inner
+ // braces here, we will overwrite the BlockKind later if we parse a
+ // braced list (where all blocks inside are by default braced lists),
+ // or when we explicitly detect blocks (for example while parsing
+ // lambdas).
+
+ // If we already marked the opening brace as braced list, the closing
+ // must also be part of it.
+ ProbablyBracedList = LBraceStack.back()->is(TT_BracedListLBrace);
+
+ ProbablyBracedList = ProbablyBracedList ||
+ (Style.isJavaScript() &&
+ NextTok->isOneOf(Keywords.kw_of, Keywords.kw_in,
+ Keywords.kw_as));
+ ProbablyBracedList = ProbablyBracedList ||
+ (Style.isCpp() && NextTok->is(tok::l_paren));
+
// If there is a comma, semicolon or right paren after the closing
- // brace, we assume this is a braced initializer list. Note that
- // regardless how we mark inner braces here, we will overwrite the
- // BlockKind later if we parse a braced list (where all blocks
- // inside are by default braced lists), or when we explicitly detect
- // blocks (for example while parsing lambdas).
+ // brace, we assume this is a braced initializer list.
// FIXME: Some of these do not apply to JS, e.g. "} {" can never be a
// braced list in JS.
ProbablyBracedList =
- (Style.isJavaScript() &&
- NextTok->isOneOf(Keywords.kw_of, Keywords.kw_in,
- Keywords.kw_as)) ||
- (Style.isCpp() && NextTok->is(tok::l_paren)) ||
+ ProbablyBracedList ||
NextTok->isOneOf(tok::comma, tok::period, tok::colon,
tok::r_paren, tok::r_square, tok::l_brace,
- tok::ellipsis) ||
+ tok::ellipsis);
+
+ ProbablyBracedList =
+ ProbablyBracedList ||
(NextTok->is(tok::identifier) &&
- !PrevTok->isOneOf(tok::semi, tok::r_brace, tok::l_brace)) ||
- (NextTok->is(tok::semi) &&
- (!ExpectClassBody || LBraceStack.size() != 1)) ||
+ !PrevTok->isOneOf(tok::semi, tok::r_brace, tok::l_brace));
+
+ ProbablyBracedList = ProbablyBracedList ||
+ (NextTok->is(tok::semi) &&
+ (!ExpectClassBody || LBraceStack.size() != 1));
+
+ ProbablyBracedList =
+ ProbablyBracedList ||
(NextTok->isBinaryOperator() && !NextIsObjCMethod);
+
if (!Style.isCSharp() && NextTok->is(tok::l_square)) {
// We can have an array subscript after a braced init
// list, but C++11 attributes are expected after blocks.
NextTok = Tokens->getNextToken();
- ++ReadTokens;
ProbablyBracedList = NextTok->isNot(tok::l_square);
}
}
@@ -684,13 +749,12 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
}
PrevTok = Tok;
Tok = NextTok;
- } while (Tok->Tok.isNot(tok::eof) && !LBraceStack.empty());
+ } while (Tok->isNot(tok::eof) && !LBraceStack.empty());
// Assume other blocks for all unclosed opening braces.
- for (FormatToken *LBrace : LBraceStack) {
+ for (FormatToken *LBrace : LBraceStack)
if (LBrace->is(BK_Unknown))
LBrace->setBlockKind(BK_Block);
- }
FormatTok = Tokens->setPosition(StoredPosition);
}
@@ -710,13 +774,76 @@ size_t UnwrappedLineParser::computePPHash() const {
return h;
}
-UnwrappedLineParser::IfStmtKind
-UnwrappedLineParser::parseBlock(bool MustBeDeclaration, unsigned AddLevels,
- bool MunchSemi,
- bool UnindentWhitesmithsBraces) {
- assert(FormatTok->isOneOf(tok::l_brace, TT_MacroBlockBegin) &&
+// Checks whether \p ParsedLine might fit on a single line. If \p OpeningBrace
+// is not null, subtracts its length (plus the preceding space) when computing
+// the length of \p ParsedLine. We must clone the tokens of \p ParsedLine before
+// running the token annotator on it so that we can restore them afterward.
+bool UnwrappedLineParser::mightFitOnOneLine(
+ UnwrappedLine &ParsedLine, const FormatToken *OpeningBrace) const {
+ const auto ColumnLimit = Style.ColumnLimit;
+ if (ColumnLimit == 0)
+ return true;
+
+ auto &Tokens = ParsedLine.Tokens;
+ assert(!Tokens.empty());
+
+ const auto *LastToken = Tokens.back().Tok;
+ assert(LastToken);
+
+ SmallVector<UnwrappedLineNode> SavedTokens(Tokens.size());
+
+ int Index = 0;
+ for (const auto &Token : Tokens) {
+ assert(Token.Tok);
+ auto &SavedToken = SavedTokens[Index++];
+ SavedToken.Tok = new FormatToken;
+ SavedToken.Tok->copyFrom(*Token.Tok);
+ SavedToken.Children = std::move(Token.Children);
+ }
+
+ AnnotatedLine Line(ParsedLine);
+ assert(Line.Last == LastToken);
+
+ TokenAnnotator Annotator(Style, Keywords);
+ Annotator.annotate(Line);
+ Annotator.calculateFormattingInformation(Line);
+
+ auto Length = LastToken->TotalLength;
+ if (OpeningBrace) {
+ assert(OpeningBrace != Tokens.front().Tok);
+ Length -= OpeningBrace->TokenText.size() + 1;
+ }
+
+ Index = 0;
+ for (auto &Token : Tokens) {
+ const auto &SavedToken = SavedTokens[Index++];
+ Token.Tok->copyFrom(*SavedToken.Tok);
+ Token.Children = std::move(SavedToken.Children);
+ delete SavedToken.Tok;
+ }
+
+ return Line.Level * Style.IndentWidth + Length <= ColumnLimit;
+}
+
+FormatToken *UnwrappedLineParser::parseBlock(
+ bool MustBeDeclaration, unsigned AddLevels, bool MunchSemi, bool KeepBraces,
+ IfStmtKind *IfKind, bool UnindentWhitesmithsBraces,
+ bool CanContainBracedList, TokenType NextLBracesType) {
+ auto HandleVerilogBlockLabel = [this]() {
+ // ":" name
+ if (Style.isVerilog() && FormatTok->is(tok::colon)) {
+ nextToken();
+ if (Keywords.isVerilogIdentifier(*FormatTok))
+ nextToken();
+ }
+ };
+
+ assert((FormatTok->isOneOf(tok::l_brace, TT_MacroBlockBegin) ||
+ (Style.isVerilog() && Keywords.isVerilogBegin(*FormatTok))) &&
"'{' or macro block token expected");
FormatToken *Tok = FormatTok;
+ const bool FollowedByComment = Tokens->peekNextToken()->is(tok::comment);
+ auto Index = CurrentLines->size();
const bool MacroBlock = FormatTok->is(TT_MacroBlockBegin);
FormatTok->setBlockKind(BK_Block);
@@ -727,8 +854,13 @@ UnwrappedLineParser::parseBlock(bool MustBeDeclaration, unsigned AddLevels,
size_t PPStartHash = computePPHash();
- unsigned InitialLevel = Line->Level;
+ const unsigned InitialLevel = Line->Level;
nextToken(/*LevelDifference=*/AddLevels);
+ HandleVerilogBlockLabel();
+
+ // Bail out if there are too many levels. Otherwise, the stack might overflow.
+ if (Line->Level > 300)
+ return nullptr;
if (MacroBlock && FormatTok->is(tok::l_paren))
parseParens();
@@ -752,45 +884,77 @@ UnwrappedLineParser::parseBlock(bool MustBeDeclaration, unsigned AddLevels,
if (AddLevels > 0u && Style.BreakBeforeBraces != FormatStyle::BS_Whitesmiths)
Line->Level += AddLevels;
- IfStmtKind IfKind = IfStmtKind::NotIf;
- const bool SimpleBlock = parseLevel(/*HasOpeningBrace=*/true, &IfKind);
+ FormatToken *IfLBrace = nullptr;
+ const bool SimpleBlock =
+ parseLevel(Tok, CanContainBracedList, NextLBracesType, IfKind, &IfLBrace);
if (eof())
- return IfKind;
+ return IfLBrace;
if (MacroBlock ? !FormatTok->is(TT_MacroBlockEnd)
: !FormatTok->is(tok::r_brace)) {
Line->Level = InitialLevel;
FormatTok->setBlockKind(BK_Block);
- return IfKind;
+ return IfLBrace;
}
- if (SimpleBlock && Tok->is(tok::l_brace)) {
+ auto RemoveBraces = [=]() mutable {
+ if (!SimpleBlock)
+ return false;
+ assert(Tok->isOneOf(TT_ControlStatementLBrace, TT_ElseLBrace));
assert(FormatTok->is(tok::r_brace));
- const FormatToken *Previous = Tokens->getPreviousToken();
- assert(Previous);
- if (Previous->isNot(tok::r_brace) || Previous->Optional) {
- Tok->MatchingParen = FormatTok;
- FormatTok->MatchingParen = Tok;
+ const bool WrappedOpeningBrace = !Tok->Previous;
+ if (WrappedOpeningBrace && FollowedByComment)
+ return false;
+ const bool HasRequiredIfBraces = IfLBrace && !IfLBrace->Optional;
+ if (KeepBraces && !HasRequiredIfBraces)
+ return false;
+ if (Tok->isNot(TT_ElseLBrace) || !HasRequiredIfBraces) {
+ const FormatToken *Previous = Tokens->getPreviousToken();
+ assert(Previous);
+ if (Previous->is(tok::r_brace) && !Previous->Optional)
+ return false;
+ }
+ assert(!CurrentLines->empty());
+ auto &LastLine = CurrentLines->back();
+ if (LastLine.Level == InitialLevel + 1 && !mightFitOnOneLine(LastLine))
+ return false;
+ if (Tok->is(TT_ElseLBrace))
+ return true;
+ if (WrappedOpeningBrace) {
+ assert(Index > 0);
+ --Index; // The line above the wrapped l_brace.
+ Tok = nullptr;
}
+ return mightFitOnOneLine((*CurrentLines)[Index], Tok);
+ };
+ if (RemoveBraces()) {
+ Tok->MatchingParen = FormatTok;
+ FormatTok->MatchingParen = Tok;
}
size_t PPEndHash = computePPHash();
// Munch the closing brace.
nextToken(/*LevelDifference=*/-AddLevels);
+ HandleVerilogBlockLabel();
if (MacroBlock && FormatTok->is(tok::l_paren))
parseParens();
+ if (FormatTok->is(tok::kw_noexcept)) {
+ // A noexcept in a requires expression.
+ nextToken();
+ }
+
if (FormatTok->is(tok::arrow)) {
- // Following the } we can find a trailing return type arrow
+ // Following the } or noexcept we can find a trailing return type arrow
// as part of an implicit conversion constraint.
nextToken();
parseStructuralElement();
}
- if (MunchSemi && FormatTok->Tok.is(tok::semi))
+ if (MunchSemi && FormatTok->is(tok::semi))
nextToken();
Line->Level = InitialLevel;
@@ -804,7 +968,7 @@ UnwrappedLineParser::parseBlock(bool MustBeDeclaration, unsigned AddLevels,
}
}
- return IfKind;
+ return IfLBrace;
}
static bool isGoogScope(const UnwrappedLine &Line) {
@@ -845,21 +1009,31 @@ static bool isIIFE(const UnwrappedLine &Line,
static bool ShouldBreakBeforeBrace(const FormatStyle &Style,
const FormatToken &InitialToken) {
- if (InitialToken.isOneOf(tok::kw_namespace, TT_NamespaceMacro))
+ tok::TokenKind Kind = InitialToken.Tok.getKind();
+ if (InitialToken.is(TT_NamespaceMacro))
+ Kind = tok::kw_namespace;
+
+ switch (Kind) {
+ case tok::kw_namespace:
return Style.BraceWrapping.AfterNamespace;
- if (InitialToken.is(tok::kw_class))
+ case tok::kw_class:
return Style.BraceWrapping.AfterClass;
- if (InitialToken.is(tok::kw_union))
+ case tok::kw_union:
return Style.BraceWrapping.AfterUnion;
- if (InitialToken.is(tok::kw_struct))
+ case tok::kw_struct:
return Style.BraceWrapping.AfterStruct;
- if (InitialToken.is(tok::kw_enum))
+ case tok::kw_enum:
return Style.BraceWrapping.AfterEnum;
- return false;
+ default:
+ return false;
+ }
}
-void UnwrappedLineParser::parseChildBlock() {
+void UnwrappedLineParser::parseChildBlock(
+ bool CanContainBracedList, clang::format::TokenType NextLBracesType) {
+ assert(FormatTok->is(tok::l_brace));
FormatTok->setBlockKind(BK_Block);
+ const FormatToken *OpeningBrace = FormatTok;
nextToken();
{
bool SkipIndent = (Style.isJavaScript() &&
@@ -868,7 +1042,7 @@ void UnwrappedLineParser::parseChildBlock() {
ScopedDeclarationState DeclarationState(*Line, DeclarationScopeStack,
/*MustBeDeclaration=*/false);
Line->Level += SkipIndent ? 0 : 1;
- parseLevel(/*HasOpeningBrace=*/true);
+ parseLevel(OpeningBrace, CanContainBracedList, NextLBracesType);
flushComments(isOnNewLine(*FormatTok));
Line->Level -= SkipIndent ? 0 : 1;
}
@@ -876,7 +1050,7 @@ void UnwrappedLineParser::parseChildBlock() {
}
void UnwrappedLineParser::parsePPDirective() {
- assert(FormatTok->Tok.is(tok::hash) && "'#' expected");
+ assert(FormatTok->is(tok::hash) && "'#' expected");
ScopedMacroState MacroState(*Line, Tokens, FormatTok);
nextToken();
@@ -920,10 +1094,11 @@ void UnwrappedLineParser::conditionalCompilationCondition(bool Unreachable) {
Line += Lines.size();
if (Unreachable ||
- (!PPStack.empty() && PPStack.back().Kind == PP_Unreachable))
+ (!PPStack.empty() && PPStack.back().Kind == PP_Unreachable)) {
PPStack.push_back({PP_Unreachable, Line});
- else
+ } else {
PPStack.push_back({PP_Conditional, Line});
+ }
}
void UnwrappedLineParser::conditionalCompilationStart(bool Unreachable) {
@@ -952,9 +1127,8 @@ void UnwrappedLineParser::conditionalCompilationAlternative() {
void UnwrappedLineParser::conditionalCompilationEnd() {
assert(PPBranchLevel < (int)PPLevelBranchIndex.size());
if (PPBranchLevel >= 0 && !PPChainBranchIndex.empty()) {
- if (PPChainBranchIndex.top() + 1 > PPLevelBranchCount[PPBranchLevel]) {
+ if (PPChainBranchIndex.top() + 1 > PPLevelBranchCount[PPBranchLevel])
PPLevelBranchCount[PPBranchLevel] = PPChainBranchIndex.top() + 1;
- }
}
// Guard against #endif's without #if.
if (PPBranchLevel > -1)
@@ -978,7 +1152,7 @@ void UnwrappedLineParser::parsePPIf(bool IfDef) {
// If there's a #ifndef on the first line, and the only lines before it are
// comments, it could be an include guard.
bool MaybeIncludeGuard = IfNDef;
- if (IncludeGuard == IG_Inited && MaybeIncludeGuard)
+ if (IncludeGuard == IG_Inited && MaybeIncludeGuard) {
for (auto &Line : Lines) {
if (!Line.Tokens.front().Tok->is(tok::comment)) {
MaybeIncludeGuard = false;
@@ -986,6 +1160,7 @@ void UnwrappedLineParser::parsePPIf(bool IfDef) {
break;
}
}
+ }
--PPBranchLevel;
parsePPUnknown();
++PPBranchLevel;
@@ -1014,8 +1189,9 @@ void UnwrappedLineParser::parsePPEndIf() {
// If the #endif of a potential include guard is the last thing in the file,
// then we found an include guard.
if (IncludeGuard == IG_Defined && PPBranchLevel == -1 && Tokens->isEOF() &&
- Style.IndentPPDirectives != FormatStyle::PPDIS_None)
+ Style.IndentPPDirectives != FormatStyle::PPDIS_None) {
IncludeGuard = IG_Found;
+ }
}
void UnwrappedLineParser::parsePPDefine() {
@@ -1040,6 +1216,13 @@ void UnwrappedLineParser::parsePPDefine() {
}
}
+ // In the context of a define, even keywords should be treated as normal
+ // identifiers. Setting the kind to identifier is not enough, because we need
+ // to treat additional keywords like __except as well, which are already
+ // identifiers. Setting the identifier info to null interferes with include
+ // guard processing above, and changes preprocessing nesting.
+ FormatTok->Tok.setKind(tok::identifier);
+ FormatTok->Tok.setIdentifierInfo(Keywords.kw_internal_ident_after_define);
nextToken();
if (FormatTok->Tok.getKind() == tok::l_paren &&
!FormatTok->hasWhitespaceBefore()) {
@@ -1167,8 +1350,9 @@ static bool isC78ParameterDecl(const FormatToken *Tok, const FormatToken *Next,
return false;
if (!isC78Type(*Tok) &&
- !Tok->isOneOf(tok::kw_register, tok::kw_struct, tok::kw_union))
+ !Tok->isOneOf(tok::kw_register, tok::kw_struct, tok::kw_union)) {
return false;
+ }
if (Next->isNot(tok::star) && !Next->Tok.getIdentifierInfo())
return false;
@@ -1188,7 +1372,7 @@ void UnwrappedLineParser::parseModuleImport() {
nextToken();
while (!eof()) {
if (FormatTok->is(tok::colon)) {
- FormatTok->setType(TT_ModulePartitionColon);
+ FormatTok->setFinalizedType(TT_ModulePartitionColon);
}
// Handle import <foo/bar.h> as we would an include statement.
else if (FormatTok->is(tok::less)) {
@@ -1197,8 +1381,9 @@ void UnwrappedLineParser::parseModuleImport() {
// Mark tokens up to the trailing line comments as implicit string
// literals.
if (FormatTok->isNot(tok::comment) &&
- !FormatTok->TokenText.startswith("//"))
- FormatTok->setType(TT_ImplicitStringLiteral);
+ !FormatTok->TokenText.startswith("//")) {
+ FormatTok->setFinalizedType(TT_ImplicitStringLiteral);
+ }
nextToken();
}
}
@@ -1251,15 +1436,18 @@ void UnwrappedLineParser::readTokenWithJavaScriptASI() {
if (NextMustBeValue && !NextEndsTemplateExpr && !PreviousStartsTemplateExpr &&
(PreviousMustBeValue ||
Previous->isOneOf(tok::r_square, tok::r_paren, tok::plusplus,
- tok::minusminus)))
+ tok::minusminus))) {
return addUnwrappedLine();
+ }
if ((PreviousMustBeValue || Previous->is(tok::r_paren)) &&
- isJSDeclOrStmt(Keywords, Next))
+ isJSDeclOrStmt(Keywords, Next)) {
return addUnwrappedLine();
+ }
}
-void UnwrappedLineParser::parseStructuralElement(IfStmtKind *IfKind,
- bool IsTopLevel) {
+void UnwrappedLineParser::parseStructuralElement(
+ bool IsTopLevel, TokenType NextLBracesType, IfStmtKind *IfKind,
+ FormatToken **IfLeftBrace, bool *HasDoWhile, bool *HasLabel) {
if (Style.Language == FormatStyle::LK_TableGen &&
FormatTok->is(tok::pp_include)) {
nextToken();
@@ -1272,11 +1460,11 @@ void UnwrappedLineParser::parseStructuralElement(IfStmtKind *IfKind,
case tok::kw_asm:
nextToken();
if (FormatTok->is(tok::l_brace)) {
- FormatTok->setType(TT_InlineASMBrace);
+ FormatTok->setFinalizedType(TT_InlineASMBrace);
nextToken();
while (FormatTok && FormatTok->isNot(tok::eof)) {
if (FormatTok->is(tok::r_brace)) {
- FormatTok->setType(TT_InlineASMBrace);
+ FormatTok->setFinalizedType(TT_InlineASMBrace);
nextToken();
addUnwrappedLine();
break;
@@ -1293,40 +1481,51 @@ void UnwrappedLineParser::parseStructuralElement(IfStmtKind *IfKind,
case tok::kw_protected:
case tok::kw_private:
if (Style.Language == FormatStyle::LK_Java || Style.isJavaScript() ||
- Style.isCSharp())
+ Style.isCSharp()) {
nextToken();
- else
+ } else {
parseAccessSpecifier();
+ }
return;
- case tok::kw_if:
- if (Style.isJavaScript() && Line->MustBeDeclaration)
+ case tok::kw_if: {
+ if (Style.isJavaScript() && Line->MustBeDeclaration) {
// field/method declaration.
break;
- parseIfThenElse(IfKind);
+ }
+ FormatToken *Tok = parseIfThenElse(IfKind);
+ if (IfLeftBrace)
+ *IfLeftBrace = Tok;
return;
+ }
case tok::kw_for:
case tok::kw_while:
- if (Style.isJavaScript() && Line->MustBeDeclaration)
+ if (Style.isJavaScript() && Line->MustBeDeclaration) {
// field/method declaration.
break;
+ }
parseForOrWhileLoop();
return;
case tok::kw_do:
- if (Style.isJavaScript() && Line->MustBeDeclaration)
+ if (Style.isJavaScript() && Line->MustBeDeclaration) {
// field/method declaration.
break;
+ }
parseDoWhile();
+ if (HasDoWhile)
+ *HasDoWhile = true;
return;
case tok::kw_switch:
- if (Style.isJavaScript() && Line->MustBeDeclaration)
+ if (Style.isJavaScript() && Line->MustBeDeclaration) {
// 'switch: string' field declaration.
break;
+ }
parseSwitch();
return;
case tok::kw_default:
- if (Style.isJavaScript() && Line->MustBeDeclaration)
+ if (Style.isJavaScript() && Line->MustBeDeclaration) {
// 'default: string' field declaration.
break;
+ }
nextToken();
if (FormatTok->is(tok::colon)) {
parseLabel();
@@ -1335,23 +1534,26 @@ void UnwrappedLineParser::parseStructuralElement(IfStmtKind *IfKind,
// e.g. "default void f() {}" in a Java interface.
break;
case tok::kw_case:
- if (Style.isJavaScript() && Line->MustBeDeclaration)
+ if (Style.isJavaScript() && Line->MustBeDeclaration) {
// 'case: string' field declaration.
+ nextToken();
break;
+ }
parseCaseLabel();
return;
case tok::kw_try:
case tok::kw___try:
- if (Style.isJavaScript() && Line->MustBeDeclaration)
+ if (Style.isJavaScript() && Line->MustBeDeclaration) {
// field/method declaration.
break;
+ }
parseTryCatch();
return;
case tok::kw_extern:
nextToken();
- if (FormatTok->Tok.is(tok::string_literal)) {
+ if (FormatTok->is(tok::string_literal)) {
nextToken();
- if (FormatTok->Tok.is(tok::l_brace)) {
+ if (FormatTok->is(tok::l_brace)) {
if (Style.BraceWrapping.AfterExternBlock)
addUnwrappedLine();
// Either we indent or for backwards compatibility we follow the
@@ -1380,7 +1582,7 @@ void UnwrappedLineParser::parseStructuralElement(IfStmtKind *IfKind,
LLVM_FALLTHROUGH;
case tok::kw_inline:
nextToken();
- if (FormatTok->Tok.is(tok::kw_namespace)) {
+ if (FormatTok->is(tok::kw_namespace)) {
parseNamespace();
return;
}
@@ -1445,7 +1647,7 @@ void UnwrappedLineParser::parseStructuralElement(IfStmtKind *IfKind,
switch (FormatTok->Tok.getKind()) {
case tok::at:
nextToken();
- if (FormatTok->Tok.is(tok::l_brace)) {
+ if (FormatTok->is(tok::l_brace)) {
nextToken();
parseBracedList();
break;
@@ -1476,23 +1678,26 @@ void UnwrappedLineParser::parseStructuralElement(IfStmtKind *IfKind,
return;
case tok::objc_autoreleasepool:
nextToken();
- if (FormatTok->Tok.is(tok::l_brace)) {
+ if (FormatTok->is(tok::l_brace)) {
if (Style.BraceWrapping.AfterControlStatement ==
- FormatStyle::BWACS_Always)
+ FormatStyle::BWACS_Always) {
addUnwrappedLine();
+ }
parseBlock();
}
addUnwrappedLine();
return;
case tok::objc_synchronized:
nextToken();
- if (FormatTok->Tok.is(tok::l_paren))
+ if (FormatTok->is(tok::l_paren)) {
// Skip synchronization object
parseParens();
- if (FormatTok->Tok.is(tok::l_brace)) {
+ }
+ if (FormatTok->is(tok::l_brace)) {
if (Style.BraceWrapping.AfterControlStatement ==
- FormatStyle::BWACS_Always)
+ FormatStyle::BWACS_Always) {
addUnwrappedLine();
+ }
parseBlock();
}
addUnwrappedLine();
@@ -1509,9 +1714,16 @@ void UnwrappedLineParser::parseStructuralElement(IfStmtKind *IfKind,
case tok::kw_concept:
parseConcept();
return;
- case tok::kw_requires:
- parseRequires();
- return;
+ case tok::kw_requires: {
+ if (Style.isCpp()) {
+ bool ParsedClause = parseRequires();
+ if (ParsedClause)
+ return;
+ } else {
+ nextToken();
+ }
+ break;
+ }
case tok::kw_enum:
// Ignore if this is part of "template <enum ...".
if (Previous && Previous->is(tok::less)) {
@@ -1534,27 +1746,29 @@ void UnwrappedLineParser::parseStructuralElement(IfStmtKind *IfKind,
if (FormatTok->isOneOf(Keywords.kw_NS_ENUM, Keywords.kw_NS_OPTIONS,
Keywords.kw_CF_ENUM, Keywords.kw_CF_OPTIONS,
Keywords.kw_CF_CLOSED_ENUM,
- Keywords.kw_NS_CLOSED_ENUM))
+ Keywords.kw_NS_CLOSED_ENUM)) {
parseEnum();
+ }
break;
case tok::kw_struct:
case tok::kw_union:
case tok::kw_class:
- if (parseStructLike()) {
+ if (parseStructLike())
return;
- }
break;
case tok::period:
nextToken();
// In Java, classes have an implicit static member "class".
if (Style.Language == FormatStyle::LK_Java && FormatTok &&
- FormatTok->is(tok::kw_class))
+ FormatTok->is(tok::kw_class)) {
nextToken();
+ }
if (Style.isJavaScript() && FormatTok &&
- FormatTok->Tok.getIdentifierInfo())
+ FormatTok->Tok.getIdentifierInfo()) {
// JavaScript only has pseudo keywords, all keywords are allowed to
// appear in "IdentifierName" positions. See http://es5.github.io/#x7.6
nextToken();
+ }
break;
case tok::semi:
nextToken();
@@ -1583,14 +1797,17 @@ void UnwrappedLineParser::parseStructuralElement(IfStmtKind *IfKind,
case tok::caret:
nextToken();
if (FormatTok->Tok.isAnyIdentifier() ||
- FormatTok->isSimpleTypeSpecifier())
+ FormatTok->isSimpleTypeSpecifier()) {
nextToken();
+ }
if (FormatTok->is(tok::l_paren))
parseParens();
if (FormatTok->is(tok::l_brace))
parseChildBlock();
break;
case tok::l_brace:
+ if (NextLBracesType != TT_Unknown)
+ FormatTok->setFinalizedType(NextLBracesType);
if (!tryToParsePropertyAccessor() && !tryToParseBracedList()) {
// A block outside of parentheses must be the last part of a
// structural element.
@@ -1601,12 +1818,14 @@ void UnwrappedLineParser::parseStructuralElement(IfStmtKind *IfKind,
// If necessary, we could set the type to something different than
// TT_FunctionLBrace.
if (Style.BraceWrapping.AfterControlStatement ==
- FormatStyle::BWACS_Always)
+ FormatStyle::BWACS_Always) {
addUnwrappedLine();
+ }
} else if (Style.BraceWrapping.AfterFunction) {
addUnwrappedLine();
}
- FormatTok->setType(TT_FunctionLBrace);
+ if (!Line->InPPDirective)
+ FormatTok->setFinalizedType(TT_FunctionLBrace);
parseBlock();
addUnwrappedLine();
return;
@@ -1669,9 +1888,8 @@ void UnwrappedLineParser::parseStructuralElement(IfStmtKind *IfKind,
}
if (FormatTok->is(Keywords.kw_interface)) {
- if (parseStructLike()) {
+ if (parseStructLike())
return;
- }
break;
}
@@ -1691,12 +1909,23 @@ void UnwrappedLineParser::parseStructuralElement(IfStmtKind *IfKind,
if (Style.isJavaScript())
break;
- TokenCount = Line->Tokens.size();
- if (TokenCount == 1 ||
- (TokenCount == 2 && Line->Tokens.front().Tok->is(tok::comment))) {
- if (FormatTok->Tok.is(tok::colon) && !Line->MustBeDeclaration) {
+ auto OneTokenSoFar = [&]() {
+ const UnwrappedLineNode *Tok = &Line->Tokens.front(),
+ *End = Tok + Line->Tokens.size();
+ while (Tok != End && Tok->Tok->is(tok::comment))
+ ++Tok;
+ // In Verilog, macro invocations start with a backtick which the code
+ // treats as a hash. Skip it.
+ if (Style.isVerilog() && Tok != End && Tok->Tok->is(tok::hash))
+ ++Tok;
+ return End - Tok == 1;
+ };
+ if (OneTokenSoFar()) {
+ if (FormatTok->is(tok::colon) && !Line->MustBeDeclaration) {
Line->Tokens.begin()->Tok->MustBreakBefore = true;
parseLabel(!Style.IndentGotoLabels);
+ if (HasLabel)
+ *HasLabel = true;
return;
}
// Recognize function-like macro usages without trailing semicolon as
@@ -1712,7 +1941,7 @@ void UnwrappedLineParser::parseStructuralElement(IfStmtKind *IfKind,
if (FollowedByNewline && (Text.size() >= 5 || FunctionLike) &&
tokenCanStartNewLine(*FormatTok) && Text == Text.upper()) {
- PreviousToken->setType(TT_FunctionLikeOrFreestandingMacro);
+ PreviousToken->setFinalizedType(TT_FunctionLikeOrFreestandingMacro);
addUnwrappedLine();
return;
}
@@ -1727,7 +1956,7 @@ void UnwrappedLineParser::parseStructuralElement(IfStmtKind *IfKind,
}
nextToken();
- if (FormatTok->Tok.is(tok::l_brace)) {
+ if (FormatTok->is(tok::l_brace)) {
// Block kind should probably be set to BK_BracedInit for any language.
// C# needs this change to ensure that array initialisers and object
// initialisers are indented the same way.
@@ -1736,7 +1965,7 @@ void UnwrappedLineParser::parseStructuralElement(IfStmtKind *IfKind,
nextToken();
parseBracedList();
} else if (Style.Language == FormatStyle::LK_Proto &&
- FormatTok->Tok.is(tok::less)) {
+ FormatTok->is(tok::less)) {
nextToken();
parseBracedList(/*ContinueOnSemicolons=*/false, /*IsEnum=*/false,
/*ClosingBraceKind=*/tok::greater);
@@ -1748,6 +1977,14 @@ void UnwrappedLineParser::parseStructuralElement(IfStmtKind *IfKind,
case tok::kw_new:
parseNew();
break;
+ case tok::kw_case:
+ if (Style.isJavaScript() && Line->MustBeDeclaration) {
+ // 'case: string' field declaration.
+ nextToken();
+ break;
+ }
+ parseCaseLabel();
+ break;
default:
nextToken();
break;
@@ -1772,16 +2009,16 @@ bool UnwrappedLineParser::tryToParsePropertyAccessor() {
FormatToken *Tok = Tokens->getNextToken();
// A trivial property accessor is of the form:
- // { [ACCESS_SPECIFIER] [get]; [ACCESS_SPECIFIER] [set] }
+ // { [ACCESS_SPECIFIER] [get]; [ACCESS_SPECIFIER] [set|init] }
// Track these as they do not require line breaks to be introduced.
- bool HasGetOrSet = false;
+ bool HasSpecialAccessor = false;
bool IsTrivialPropertyAccessor = true;
while (!eof()) {
if (Tok->isOneOf(tok::semi, tok::kw_public, tok::kw_private,
tok::kw_protected, Keywords.kw_internal, Keywords.kw_get,
- Keywords.kw_set)) {
- if (Tok->isOneOf(Keywords.kw_get, Keywords.kw_set))
- HasGetOrSet = true;
+ Keywords.kw_init, Keywords.kw_set)) {
+ if (Tok->isOneOf(Keywords.kw_get, Keywords.kw_init, Keywords.kw_set))
+ HasSpecialAccessor = true;
Tok = Tokens->getNextToken();
continue;
}
@@ -1790,7 +2027,7 @@ bool UnwrappedLineParser::tryToParsePropertyAccessor() {
break;
}
- if (!HasGetOrSet) {
+ if (!HasSpecialAccessor) {
Tokens->setPosition(StoredPosition);
return false;
}
@@ -1832,7 +2069,8 @@ bool UnwrappedLineParser::tryToParsePropertyAccessor() {
nextToken();
break;
default:
- if (FormatTok->isOneOf(Keywords.kw_get, Keywords.kw_set) &&
+ if (FormatTok->isOneOf(Keywords.kw_get, Keywords.kw_init,
+ Keywords.kw_set) &&
!IsTrivialPropertyAccessor) {
// Non-trivial get/set needs to be on its own line.
addUnwrappedLine();
@@ -1846,11 +2084,11 @@ bool UnwrappedLineParser::tryToParsePropertyAccessor() {
}
bool UnwrappedLineParser::tryToParseLambda() {
+ assert(FormatTok->is(tok::l_square));
if (!Style.isCpp()) {
nextToken();
return false;
}
- assert(FormatTok->is(tok::l_square));
FormatToken &LSquare = *FormatTok;
if (!tryToParseLambdaIntroducer())
return false;
@@ -1936,7 +2174,7 @@ bool UnwrappedLineParser::tryToParseLambda() {
// This might or might not actually be a lambda arrow (this could be an
// ObjC method invocation followed by a dereferencing arrow). We might
// reset this back to TT_Unknown in TokenAnnotator.
- FormatTok->setType(TT_LambdaArrow);
+ FormatTok->setFinalizedType(TT_LambdaArrow);
SeenArrow = true;
nextToken();
break;
@@ -1944,25 +2182,29 @@ bool UnwrappedLineParser::tryToParseLambda() {
return true;
}
}
- FormatTok->setType(TT_LambdaLBrace);
- LSquare.setType(TT_LambdaLSquare);
+ FormatTok->setFinalizedType(TT_LambdaLBrace);
+ LSquare.setFinalizedType(TT_LambdaLSquare);
parseChildBlock();
return true;
}
bool UnwrappedLineParser::tryToParseLambdaIntroducer() {
const FormatToken *Previous = FormatTok->Previous;
+ const FormatToken *LeftSquare = FormatTok;
+ nextToken();
if (Previous &&
(Previous->isOneOf(tok::identifier, tok::kw_operator, tok::kw_new,
tok::kw_delete, tok::l_square) ||
- FormatTok->isCppStructuredBinding(Style) || Previous->closesScope() ||
+ LeftSquare->isCppStructuredBinding(Style) || Previous->closesScope() ||
Previous->isSimpleTypeSpecifier())) {
- nextToken();
return false;
}
- nextToken();
- if (FormatTok->is(tok::l_square)) {
+ if (FormatTok->is(tok::l_square))
return false;
+ if (FormatTok->is(tok::r_square)) {
+ const FormatToken *Next = Tokens->peekNextToken();
+ if (Next->is(tok::greater))
+ return false;
}
parseSquare(/*LambdaIntroducer=*/true);
return true;
@@ -1978,7 +2220,7 @@ void UnwrappedLineParser::tryToParseJSFunction() {
// Consume * (generator function). Treat it like C++'s overloaded operators.
if (FormatTok->is(tok::star)) {
- FormatTok->setType(TT_OverloadedOperator);
+ FormatTok->setFinalizedType(TT_OverloadedOperator);
nextToken();
}
@@ -2044,8 +2286,9 @@ bool UnwrappedLineParser::parseBracedList(bool ContinueOnSemicolons,
// replace this by using parseAssignmentExpression() inside.
do {
if (Style.isCSharp() && FormatTok->is(TT_FatArrow) &&
- tryToParseChildBlock())
+ tryToParseChildBlock()) {
continue;
+ }
if (Style.isJavaScript()) {
if (FormatTok->is(Keywords.kw_function) ||
FormatTok->startsSequence(Keywords.kw_async, Keywords.kw_function)) {
@@ -2090,7 +2333,8 @@ bool UnwrappedLineParser::parseBracedList(bool ContinueOnSemicolons,
parseBracedList();
break;
case tok::less:
- if (Style.Language == FormatStyle::LK_Proto) {
+ if (Style.Language == FormatStyle::LK_Proto ||
+ ClosingBraceKind == tok::greater) {
nextToken();
parseBracedList(/*ContinueOnSemicolons=*/false, /*IsEnum=*/false,
/*ClosingBraceKind=*/tok::greater);
@@ -2125,8 +2369,11 @@ bool UnwrappedLineParser::parseBracedList(bool ContinueOnSemicolons,
return false;
}
-void UnwrappedLineParser::parseParens() {
- assert(FormatTok->Tok.is(tok::l_paren) && "'(' expected.");
+/// \brief Parses a pair of parentheses (and everything between them).
+/// \param AmpAmpTokenType If different than TT_Unknown sets this type for all
+/// double ampersands. This only counts for the current parens scope.
+void UnwrappedLineParser::parseParens(TokenType AmpAmpTokenType) {
+ assert(FormatTok->is(tok::l_paren) && "'(' expected.");
nextToken();
do {
switch (FormatTok->Tok.getKind()) {
@@ -2150,7 +2397,7 @@ void UnwrappedLineParser::parseParens() {
break;
case tok::at:
nextToken();
- if (FormatTok->Tok.is(tok::l_brace)) {
+ if (FormatTok->is(tok::l_brace)) {
nextToken();
parseBracedList();
}
@@ -2170,11 +2417,23 @@ void UnwrappedLineParser::parseParens() {
case tok::identifier:
if (Style.isJavaScript() &&
(FormatTok->is(Keywords.kw_function) ||
- FormatTok->startsSequence(Keywords.kw_async, Keywords.kw_function)))
+ FormatTok->startsSequence(Keywords.kw_async,
+ Keywords.kw_function))) {
tryToParseJSFunction();
- else
+ } else {
nextToken();
+ }
+ break;
+ case tok::kw_requires: {
+ auto RequiresToken = FormatTok;
+ nextToken();
+ parseRequiresExpression(RequiresToken);
break;
+ }
+ case tok::ampamp:
+ if (AmpAmpTokenType != TT_Unknown)
+ FormatTok->setFinalizedType(AmpAmpTokenType);
+ LLVM_FALLTHROUGH;
default:
nextToken();
break;
@@ -2184,7 +2443,7 @@ void UnwrappedLineParser::parseParens() {
void UnwrappedLineParser::parseSquare(bool LambdaIntroducer) {
if (!LambdaIntroducer) {
- assert(FormatTok->Tok.is(tok::l_square) && "'[' expected.");
+ assert(FormatTok->is(tok::l_square) && "'[' expected.");
if (tryToParseLambda())
return;
}
@@ -2209,7 +2468,7 @@ void UnwrappedLineParser::parseSquare(bool LambdaIntroducer) {
}
case tok::at:
nextToken();
- if (FormatTok->Tok.is(tok::l_brace)) {
+ if (FormatTok->is(tok::l_brace)) {
nextToken();
parseBracedList();
}
@@ -2232,6 +2491,52 @@ void UnwrappedLineParser::keepAncestorBraces() {
NestedTooDeep.push_back(false);
}
+static FormatToken *getLastNonComment(const UnwrappedLine &Line) {
+ for (const auto &Token : llvm::reverse(Line.Tokens))
+ if (Token.Tok->isNot(tok::comment))
+ return Token.Tok;
+
+ return nullptr;
+}
+
+void UnwrappedLineParser::parseUnbracedBody(bool CheckEOF) {
+ FormatToken *Tok = nullptr;
+
+ if (Style.InsertBraces && !Line->InPPDirective && !Line->Tokens.empty() &&
+ PreprocessorDirectives.empty()) {
+ Tok = getLastNonComment(*Line);
+ assert(Tok);
+ if (Tok->BraceCount < 0) {
+ assert(Tok->BraceCount == -1);
+ Tok = nullptr;
+ } else {
+ Tok->BraceCount = -1;
+ }
+ }
+
+ addUnwrappedLine();
+ ++Line->Level;
+ parseStructuralElement();
+
+ if (Tok) {
+ assert(!Line->InPPDirective);
+ Tok = nullptr;
+ for (const auto &L : llvm::reverse(*CurrentLines)) {
+ if (!L.InPPDirective && getLastNonComment(L)) {
+ Tok = L.Tokens.back().Tok;
+ break;
+ }
+ }
+ assert(Tok);
+ ++Tok->BraceCount;
+ }
+
+ if (CheckEOF && FormatTok->is(tok::eof))
+ addUnwrappedLine();
+
+ --Line->Level;
+}
+
static void markOptionalBraces(FormatToken *LeftBrace) {
if (!LeftBrace)
return;
@@ -2252,24 +2557,40 @@ static void markOptionalBraces(FormatToken *LeftBrace) {
RightBrace->Optional = true;
}
+void UnwrappedLineParser::handleAttributes() {
+ // Handle AttributeMacro, e.g. `if (x) UNLIKELY`.
+ if (FormatTok->is(TT_AttributeMacro))
+ nextToken();
+ handleCppAttributes();
+}
+
+bool UnwrappedLineParser::handleCppAttributes() {
+ // Handle [[likely]] / [[unlikely]] attributes.
+ if (FormatTok->is(tok::l_square) && tryToParseSimpleAttribute()) {
+ parseSquare();
+ return true;
+ }
+ return false;
+}
+
FormatToken *UnwrappedLineParser::parseIfThenElse(IfStmtKind *IfKind,
bool KeepBraces) {
- auto HandleAttributes = [this]() {
- // Handle AttributeMacro, e.g. `if (x) UNLIKELY`.
- if (FormatTok->is(TT_AttributeMacro))
- nextToken();
- // Handle [[likely]] / [[unlikely]] attributes.
- if (FormatTok->is(tok::l_square) && tryToParseSimpleAttribute())
- parseSquare();
- };
-
- assert(FormatTok->Tok.is(tok::kw_if) && "'if' expected");
+ assert(FormatTok->is(tok::kw_if) && "'if' expected");
nextToken();
- if (FormatTok->Tok.isOneOf(tok::kw_constexpr, tok::identifier))
+ if (FormatTok->is(tok::exclaim))
nextToken();
- if (FormatTok->Tok.is(tok::l_paren))
- parseParens();
- HandleAttributes();
+
+ bool KeepIfBraces = true;
+ if (FormatTok->is(tok::kw_consteval)) {
+ nextToken();
+ } else {
+ KeepIfBraces = !Style.RemoveBracesLLVM || KeepBraces;
+ if (FormatTok->isOneOf(tok::kw_constexpr, tok::identifier))
+ nextToken();
+ if (FormatTok->is(tok::l_paren))
+ parseParens();
+ }
+ handleAttributes();
bool NeedsUnwrappedLine = false;
keepAncestorBraces();
@@ -2277,48 +2598,62 @@ FormatToken *UnwrappedLineParser::parseIfThenElse(IfStmtKind *IfKind,
FormatToken *IfLeftBrace = nullptr;
IfStmtKind IfBlockKind = IfStmtKind::NotIf;
- if (FormatTok->Tok.is(tok::l_brace)) {
+ if (Keywords.isBlockBegin(*FormatTok, Style)) {
+ FormatTok->setFinalizedType(TT_ControlStatementLBrace);
IfLeftBrace = FormatTok;
CompoundStatementIndenter Indenter(this, Style, Line->Level);
- IfBlockKind = parseBlock();
+ parseBlock(/*MustBeDeclaration=*/false, /*AddLevels=*/1u,
+ /*MunchSemi=*/true, KeepIfBraces, &IfBlockKind);
if (Style.BraceWrapping.BeforeElse)
addUnwrappedLine();
else
NeedsUnwrappedLine = true;
} else {
- addUnwrappedLine();
- ++Line->Level;
- parseStructuralElement();
- --Line->Level;
+ parseUnbracedBody();
}
- bool KeepIfBraces = false;
if (Style.RemoveBracesLLVM) {
assert(!NestedTooDeep.empty());
- KeepIfBraces = (IfLeftBrace && !IfLeftBrace->MatchingParen) ||
+ KeepIfBraces = KeepIfBraces ||
+ (IfLeftBrace && !IfLeftBrace->MatchingParen) ||
NestedTooDeep.back() || IfBlockKind == IfStmtKind::IfOnly ||
IfBlockKind == IfStmtKind::IfElseIf;
}
+ bool KeepElseBraces = KeepIfBraces;
FormatToken *ElseLeftBrace = nullptr;
IfStmtKind Kind = IfStmtKind::IfOnly;
- if (FormatTok->Tok.is(tok::kw_else)) {
+ if (FormatTok->is(tok::kw_else)) {
if (Style.RemoveBracesLLVM) {
NestedTooDeep.back() = false;
Kind = IfStmtKind::IfElse;
}
nextToken();
- HandleAttributes();
- if (FormatTok->Tok.is(tok::l_brace)) {
+ handleAttributes();
+ if (Keywords.isBlockBegin(*FormatTok, Style)) {
+ const bool FollowedByIf = Tokens->peekNextToken()->is(tok::kw_if);
+ FormatTok->setFinalizedType(TT_ElseLBrace);
ElseLeftBrace = FormatTok;
CompoundStatementIndenter Indenter(this, Style, Line->Level);
- if (parseBlock() == IfStmtKind::IfOnly)
- Kind = IfStmtKind::IfElseIf;
+ IfStmtKind ElseBlockKind = IfStmtKind::NotIf;
+ FormatToken *IfLBrace =
+ parseBlock(/*MustBeDeclaration=*/false, /*AddLevels=*/1u,
+ /*MunchSemi=*/true, KeepElseBraces, &ElseBlockKind);
+ if (FormatTok->is(tok::kw_else)) {
+ KeepElseBraces = KeepElseBraces ||
+ ElseBlockKind == IfStmtKind::IfOnly ||
+ ElseBlockKind == IfStmtKind::IfElseIf;
+ } else if (FollowedByIf && IfLBrace && !IfLBrace->Optional) {
+ KeepElseBraces = true;
+ assert(ElseLeftBrace->MatchingParen);
+ markOptionalBraces(ElseLeftBrace);
+ }
addUnwrappedLine();
- } else if (FormatTok->Tok.is(tok::kw_if)) {
- FormatToken *Previous = Tokens->getPreviousToken();
- const bool IsPrecededByComment = Previous && Previous->is(tok::comment);
+ } else if (FormatTok->is(tok::kw_if)) {
+ const FormatToken *Previous = Tokens->getPreviousToken();
+ assert(Previous);
+ const bool IsPrecededByComment = Previous->is(tok::comment);
if (IsPrecededByComment) {
addUnwrappedLine();
++Line->Level;
@@ -2328,23 +2663,16 @@ FormatToken *UnwrappedLineParser::parseIfThenElse(IfStmtKind *IfKind,
Kind = IfStmtKind::IfElseIf;
TooDeep = NestedTooDeep.pop_back_val();
}
- ElseLeftBrace =
- parseIfThenElse(/*IfKind=*/nullptr, KeepBraces || KeepIfBraces);
+ ElseLeftBrace = parseIfThenElse(/*IfKind=*/nullptr, KeepIfBraces);
if (Style.RemoveBracesLLVM)
NestedTooDeep.push_back(TooDeep);
if (IsPrecededByComment)
--Line->Level;
} else {
- addUnwrappedLine();
- ++Line->Level;
- parseStructuralElement();
- if (FormatTok->is(tok::eof))
- addUnwrappedLine();
- --Line->Level;
+ parseUnbracedBody(/*CheckEOF=*/true);
}
} else {
- if (Style.RemoveBracesLLVM)
- KeepIfBraces = KeepIfBraces || IfBlockKind == IfStmtKind::IfElse;
+ KeepIfBraces = KeepIfBraces || IfBlockKind == IfStmtKind::IfElse;
if (NeedsUnwrappedLine)
addUnwrappedLine();
}
@@ -2353,12 +2681,13 @@ FormatToken *UnwrappedLineParser::parseIfThenElse(IfStmtKind *IfKind,
return nullptr;
assert(!NestedTooDeep.empty());
- const bool KeepElseBraces =
- (ElseLeftBrace && !ElseLeftBrace->MatchingParen) || NestedTooDeep.back();
+ KeepElseBraces = KeepElseBraces ||
+ (ElseLeftBrace && !ElseLeftBrace->MatchingParen) ||
+ NestedTooDeep.back();
NestedTooDeep.pop_back();
- if (!KeepBraces && !KeepIfBraces && !KeepElseBraces) {
+ if (!KeepIfBraces && !KeepElseBraces) {
markOptionalBraces(IfLeftBrace);
markOptionalBraces(ElseLeftBrace);
} else if (IfLeftBrace) {
@@ -2410,20 +2739,18 @@ void UnwrappedLineParser::parseTryCatch() {
}
}
// Parse try with resource.
- if (Style.Language == FormatStyle::LK_Java && FormatTok->is(tok::l_paren)) {
+ if (Style.Language == FormatStyle::LK_Java && FormatTok->is(tok::l_paren))
parseParens();
- }
keepAncestorBraces();
if (FormatTok->is(tok::l_brace)) {
CompoundStatementIndenter Indenter(this, Style, Line->Level);
parseBlock();
- if (Style.BraceWrapping.BeforeCatch) {
+ if (Style.BraceWrapping.BeforeCatch)
addUnwrappedLine();
- } else {
+ else
NeedsUnwrappedLine = true;
- }
} else if (!FormatTok->is(tok::kw_catch)) {
// The C++ standard requires a compound-statement after a try.
// If there's none, we try to assume there's a structuralElement
@@ -2440,9 +2767,10 @@ void UnwrappedLineParser::parseTryCatch() {
tok::kw___finally) ||
((Style.Language == FormatStyle::LK_Java || Style.isJavaScript()) &&
FormatTok->is(Keywords.kw_finally)) ||
- (FormatTok->Tok.isObjCAtKeyword(tok::objc_catch) ||
- FormatTok->Tok.isObjCAtKeyword(tok::objc_finally))))
+ (FormatTok->isObjCAtKeyword(tok::objc_catch) ||
+ FormatTok->isObjCAtKeyword(tok::objc_finally)))) {
break;
+ }
nextToken();
while (FormatTok->isNot(tok::l_brace)) {
if (FormatTok->is(tok::l_paren)) {
@@ -2457,6 +2785,7 @@ void UnwrappedLineParser::parseTryCatch() {
nextToken();
}
NeedsUnwrappedLine = false;
+ Line->MustBeDeclaration = false;
CompoundStatementIndenter Indenter(this, Style, Line->Level);
parseBlock();
if (Style.BraceWrapping.BeforeCatch)
@@ -2482,15 +2811,17 @@ void UnwrappedLineParser::parseNamespace() {
parseParens();
} else {
while (FormatTok->isOneOf(tok::identifier, tok::coloncolon, tok::kw_inline,
- tok::l_square, tok::period) ||
+ tok::l_square, tok::period, tok::l_paren) ||
(Style.isCSharp() && FormatTok->is(tok::kw_union))) {
if (FormatTok->is(tok::l_square))
parseSquare();
+ else if (FormatTok->is(tok::l_paren))
+ parseParens();
else
nextToken();
}
}
- if (FormatTok->Tok.is(tok::l_brace)) {
+ if (FormatTok->is(tok::l_brace)) {
if (ShouldBreakBeforeBrace(Style, InitialToken))
addUnwrappedLine();
@@ -2509,13 +2840,13 @@ void UnwrappedLineParser::parseNamespace() {
if (ManageWhitesmithsBraces)
++Line->Level;
- parseBlock(/*MustBeDeclaration=*/true, AddLevels,
- /*MunchSemi=*/true,
- /*UnindentWhitesmithsBraces=*/ManageWhitesmithsBraces);
+ parseBlock(/*MustBeDeclaration=*/true, AddLevels, /*MunchSemi=*/true,
+ /*KeepBraces=*/true, /*IfKind=*/nullptr,
+ ManageWhitesmithsBraces);
// Munch the semicolon after a namespace. This is more common than one would
// think. Putting the semicolon into its own line is very ugly.
- if (FormatTok->Tok.is(tok::semi))
+ if (FormatTok->is(tok::semi))
nextToken();
addUnwrappedLine(AddLevels > 0 ? LineLevel::Remove : LineLevel::Keep);
@@ -2564,64 +2895,58 @@ void UnwrappedLineParser::parseNew() {
} while (!eof());
}
-void UnwrappedLineParser::parseForOrWhileLoop() {
- assert(FormatTok->isOneOf(tok::kw_for, tok::kw_while, TT_ForEachMacro) &&
- "'for', 'while' or foreach macro expected");
- nextToken();
- // JS' for await ( ...
- if (Style.isJavaScript() && FormatTok->is(Keywords.kw_await))
- nextToken();
- if (Style.isCpp() && FormatTok->is(tok::kw_co_await))
- nextToken();
- if (FormatTok->Tok.is(tok::l_paren))
- parseParens();
-
+void UnwrappedLineParser::parseLoopBody(bool KeepBraces, bool WrapRightBrace) {
keepAncestorBraces();
- if (FormatTok->Tok.is(tok::l_brace)) {
+ if (Keywords.isBlockBegin(*FormatTok, Style)) {
+ if (!KeepBraces)
+ FormatTok->setFinalizedType(TT_ControlStatementLBrace);
FormatToken *LeftBrace = FormatTok;
CompoundStatementIndenter Indenter(this, Style, Line->Level);
- parseBlock();
- if (Style.RemoveBracesLLVM) {
+ parseBlock(/*MustBeDeclaration=*/false, /*AddLevels=*/1u,
+ /*MunchSemi=*/true, KeepBraces);
+ if (!KeepBraces) {
assert(!NestedTooDeep.empty());
if (!NestedTooDeep.back())
markOptionalBraces(LeftBrace);
}
- addUnwrappedLine();
+ if (WrapRightBrace)
+ addUnwrappedLine();
} else {
- addUnwrappedLine();
- ++Line->Level;
- parseStructuralElement();
- --Line->Level;
+ parseUnbracedBody();
}
- if (Style.RemoveBracesLLVM)
+ if (!KeepBraces)
NestedTooDeep.pop_back();
}
-void UnwrappedLineParser::parseDoWhile() {
- assert(FormatTok->Tok.is(tok::kw_do) && "'do' expected");
+void UnwrappedLineParser::parseForOrWhileLoop() {
+ assert(FormatTok->isOneOf(tok::kw_for, tok::kw_while, TT_ForEachMacro) &&
+ "'for', 'while' or foreach macro expected");
+ const bool KeepBraces = !Style.RemoveBracesLLVM ||
+ !FormatTok->isOneOf(tok::kw_for, tok::kw_while);
+
nextToken();
+ // JS' for await ( ...
+ if (Style.isJavaScript() && FormatTok->is(Keywords.kw_await))
+ nextToken();
+ if (Style.isCpp() && FormatTok->is(tok::kw_co_await))
+ nextToken();
+ if (FormatTok->is(tok::l_paren))
+ parseParens();
- keepAncestorBraces();
+ handleAttributes();
+ parseLoopBody(KeepBraces, /*WrapRightBrace=*/true);
+}
- if (FormatTok->Tok.is(tok::l_brace)) {
- CompoundStatementIndenter Indenter(this, Style, Line->Level);
- parseBlock();
- if (Style.BraceWrapping.BeforeWhile)
- addUnwrappedLine();
- } else {
- addUnwrappedLine();
- ++Line->Level;
- parseStructuralElement();
- --Line->Level;
- }
+void UnwrappedLineParser::parseDoWhile() {
+ assert(FormatTok->is(tok::kw_do) && "'do' expected");
+ nextToken();
- if (Style.RemoveBracesLLVM)
- NestedTooDeep.pop_back();
+ parseLoopBody(/*KeepBraces=*/true, Style.BraceWrapping.BeforeWhile);
// FIXME: Add error handling.
- if (!FormatTok->Tok.is(tok::kw_while)) {
+ if (!FormatTok->is(tok::kw_while)) {
addUnwrappedLine();
return;
}
@@ -2644,13 +2969,13 @@ void UnwrappedLineParser::parseLabel(bool LeftAlignLabel) {
Line->Level = 0;
if (!Style.IndentCaseBlocks && CommentsBeforeNextToken.empty() &&
- FormatTok->Tok.is(tok::l_brace)) {
+ FormatTok->is(tok::l_brace)) {
CompoundStatementIndenter Indenter(this, Line->Level,
Style.BraceWrapping.AfterCaseLabel,
Style.BraceWrapping.IndentBraces);
parseBlock();
- if (FormatTok->Tok.is(tok::kw_break)) {
+ if (FormatTok->is(tok::kw_break)) {
if (Style.BraceWrapping.AfterControlStatement ==
FormatStyle::BWACS_Always) {
addUnwrappedLine();
@@ -2675,24 +3000,24 @@ void UnwrappedLineParser::parseLabel(bool LeftAlignLabel) {
}
void UnwrappedLineParser::parseCaseLabel() {
- assert(FormatTok->Tok.is(tok::kw_case) && "'case' expected");
+ assert(FormatTok->is(tok::kw_case) && "'case' expected");
// FIXME: fix handling of complex expressions here.
do {
nextToken();
- } while (!eof() && !FormatTok->Tok.is(tok::colon));
+ } while (!eof() && !FormatTok->is(tok::colon));
parseLabel();
}
void UnwrappedLineParser::parseSwitch() {
- assert(FormatTok->Tok.is(tok::kw_switch) && "'switch' expected");
+ assert(FormatTok->is(tok::kw_switch) && "'switch' expected");
nextToken();
- if (FormatTok->Tok.is(tok::l_paren))
+ if (FormatTok->is(tok::l_paren))
parseParens();
keepAncestorBraces();
- if (FormatTok->Tok.is(tok::l_brace)) {
+ if (FormatTok->is(tok::l_brace)) {
CompoundStatementIndenter Indenter(this, Style, Line->Level);
parseBlock();
addUnwrappedLine();
@@ -2707,6 +3032,57 @@ void UnwrappedLineParser::parseSwitch() {
NestedTooDeep.pop_back();
}
+// Operators that can follow a C variable.
+static bool isCOperatorFollowingVar(tok::TokenKind kind) {
+ switch (kind) {
+ case tok::ampamp:
+ case tok::ampequal:
+ case tok::arrow:
+ case tok::caret:
+ case tok::caretequal:
+ case tok::comma:
+ case tok::ellipsis:
+ case tok::equal:
+ case tok::equalequal:
+ case tok::exclaim:
+ case tok::exclaimequal:
+ case tok::greater:
+ case tok::greaterequal:
+ case tok::greatergreater:
+ case tok::greatergreaterequal:
+ case tok::l_paren:
+ case tok::l_square:
+ case tok::less:
+ case tok::lessequal:
+ case tok::lessless:
+ case tok::lesslessequal:
+ case tok::minus:
+ case tok::minusequal:
+ case tok::minusminus:
+ case tok::percent:
+ case tok::percentequal:
+ case tok::period:
+ case tok::pipe:
+ case tok::pipeequal:
+ case tok::pipepipe:
+ case tok::plus:
+ case tok::plusequal:
+ case tok::plusplus:
+ case tok::question:
+ case tok::r_brace:
+ case tok::r_paren:
+ case tok::r_square:
+ case tok::semi:
+ case tok::slash:
+ case tok::slashequal:
+ case tok::star:
+ case tok::starequal:
+ return true;
+ default:
+ return false;
+ }
+}
+
void UnwrappedLineParser::parseAccessSpecifier() {
FormatToken *AccessSpecifierCandidate = FormatTok;
nextToken();
@@ -2714,13 +3090,11 @@ void UnwrappedLineParser::parseAccessSpecifier() {
if (FormatTok->isOneOf(Keywords.kw_slots, Keywords.kw_qslots))
nextToken();
// Otherwise, we don't know what it is, and we'd better keep the next token.
- if (FormatTok->Tok.is(tok::colon)) {
+ if (FormatTok->is(tok::colon)) {
nextToken();
addUnwrappedLine();
- } else if (!FormatTok->Tok.is(tok::coloncolon) &&
- !std::binary_search(COperatorsFollowingVar.begin(),
- COperatorsFollowingVar.end(),
- FormatTok->Tok.getKind())) {
+ } else if (!FormatTok->is(tok::coloncolon) &&
+ !isCOperatorFollowingVar(FormatTok->Tok.getKind())) {
// Not a variable name nor namespace name.
addUnwrappedLine();
} else if (AccessSpecifierCandidate) {
@@ -2729,122 +3103,367 @@ void UnwrappedLineParser::parseAccessSpecifier() {
}
}
+/// \brief Parses a concept definition.
+/// \pre The current token has to be the concept keyword.
+///
+/// Returns if either the concept has been completely parsed, or if it detects
+/// that the concept definition is incorrect.
void UnwrappedLineParser::parseConcept() {
- assert(FormatTok->Tok.is(tok::kw_concept) && "'concept' expected");
+ assert(FormatTok->is(tok::kw_concept) && "'concept' expected");
nextToken();
- if (!FormatTok->Tok.is(tok::identifier))
+ if (!FormatTok->is(tok::identifier))
return;
nextToken();
- if (!FormatTok->Tok.is(tok::equal))
+ if (!FormatTok->is(tok::equal))
return;
nextToken();
- if (FormatTok->Tok.is(tok::kw_requires)) {
+ parseConstraintExpression();
+ if (FormatTok->is(tok::semi))
nextToken();
- parseRequiresExpression(Line->Level);
- } else {
- parseConstraintExpression(Line->Level);
- }
+ addUnwrappedLine();
}
-void UnwrappedLineParser::parseRequiresExpression(unsigned int OriginalLevel) {
- // requires (R range)
- if (FormatTok->Tok.is(tok::l_paren)) {
- parseParens();
- if (Style.IndentRequires && OriginalLevel != Line->Level) {
- addUnwrappedLine();
- --Line->Level;
- }
+/// \brief Parses a requires, decides if it is a clause or an expression.
+/// \pre The current token has to be the requires keyword.
+/// \returns true if it parsed a clause.
+bool clang::format::UnwrappedLineParser::parseRequires() {
+ assert(FormatTok->is(tok::kw_requires) && "'requires' expected");
+ auto RequiresToken = FormatTok;
+
+ // We try to guess if it is a requires clause, or a requires expression. For
+ // that we first consume the keyword and check the next token.
+ nextToken();
+
+ switch (FormatTok->Tok.getKind()) {
+ case tok::l_brace:
+ // This can only be an expression, never a clause.
+ parseRequiresExpression(RequiresToken);
+ return false;
+ case tok::l_paren:
+ // Clauses and expression can start with a paren, it's unclear what we have.
+ break;
+ default:
+ // All other tokens can only be a clause.
+ parseRequiresClause(RequiresToken);
+ return true;
}
- if (FormatTok->Tok.is(tok::l_brace)) {
- if (Style.BraceWrapping.AfterFunction)
- addUnwrappedLine();
- FormatTok->setType(TT_FunctionLBrace);
- parseBlock();
- addUnwrappedLine();
- } else {
- parseConstraintExpression(OriginalLevel);
+ // Looking forward we would have to decide if there are function declaration
+ // like arguments to the requires expression:
+ // requires (T t) {
+ // Or there is a constraint expression for the requires clause:
+ // requires (C<T> && ...
+
+ // But first let's look behind.
+ auto *PreviousNonComment = RequiresToken->getPreviousNonComment();
+
+ if (!PreviousNonComment ||
+ PreviousNonComment->is(TT_RequiresExpressionLBrace)) {
+ // If there is no token, or an expression left brace, we are a requires
+ // clause within a requires expression.
+ parseRequiresClause(RequiresToken);
+ return true;
}
-}
-void UnwrappedLineParser::parseConstraintExpression(
- unsigned int OriginalLevel) {
- // requires Id<T> && Id<T> || Id<T>
- while (
- FormatTok->isOneOf(tok::identifier, tok::kw_requires, tok::coloncolon)) {
- nextToken();
- while (FormatTok->isOneOf(tok::identifier, tok::coloncolon, tok::less,
- tok::greater, tok::comma, tok::ellipsis)) {
- if (FormatTok->Tok.is(tok::less)) {
- parseBracedList(/*ContinueOnSemicolons=*/false, /*IsEnum=*/false,
- /*ClosingBraceKind=*/tok::greater);
- continue;
- }
- nextToken();
- }
- if (FormatTok->Tok.is(tok::kw_requires)) {
- parseRequiresExpression(OriginalLevel);
+ switch (PreviousNonComment->Tok.getKind()) {
+ case tok::greater:
+ case tok::r_paren:
+ case tok::kw_noexcept:
+ case tok::kw_const:
+ // This is a requires clause.
+ parseRequiresClause(RequiresToken);
+ return true;
+ case tok::amp:
+ case tok::ampamp: {
+ // This can be either:
+ // if (... && requires (T t) ...)
+ // Or
+ // void member(...) && requires (C<T> ...
+ // We check the one token before that for a const:
+ // void member(...) const && requires (C<T> ...
+ auto PrevPrev = PreviousNonComment->getPreviousNonComment();
+ if (PrevPrev && PrevPrev->is(tok::kw_const)) {
+ parseRequiresClause(RequiresToken);
+ return true;
}
- if (FormatTok->Tok.is(tok::less)) {
- parseBracedList(/*ContinueOnSemicolons=*/false, /*IsEnum=*/false,
- /*ClosingBraceKind=*/tok::greater);
+ break;
+ }
+ default:
+ if (PreviousNonComment->isTypeOrIdentifier()) {
+ // This is a requires clause.
+ parseRequiresClause(RequiresToken);
+ return true;
}
+ // It's an expression.
+ parseRequiresExpression(RequiresToken);
+ return false;
+ }
- if (FormatTok->Tok.is(tok::l_paren)) {
- parseParens();
- }
- if (FormatTok->Tok.is(tok::l_brace)) {
- if (Style.BraceWrapping.AfterFunction)
- addUnwrappedLine();
- FormatTok->setType(TT_FunctionLBrace);
- parseBlock();
- }
- if (FormatTok->Tok.is(tok::semi)) {
- // Eat any trailing semi.
- nextToken();
- addUnwrappedLine();
- }
- if (FormatTok->Tok.is(tok::colon)) {
- return;
- }
- if (!FormatTok->Tok.isOneOf(tok::ampamp, tok::pipepipe)) {
- if (FormatTok->Previous &&
- !FormatTok->Previous->isOneOf(tok::identifier, tok::kw_requires,
- tok::coloncolon)) {
- addUnwrappedLine();
+ // Now we look forward and try to check if the paren content is a parameter
+ // list. The parameters can be cv-qualified and contain references or
+ // pointers.
+ // So we want basically to check for TYPE NAME, but TYPE can contain all kinds
+ // of stuff: typename, const, *, &, &&, ::, identifiers.
+
+ int NextTokenOffset = 1;
+ auto NextToken = Tokens->peekNextToken(NextTokenOffset);
+ auto PeekNext = [&NextTokenOffset, &NextToken, this] {
+ ++NextTokenOffset;
+ NextToken = Tokens->peekNextToken(NextTokenOffset);
+ };
+
+ bool FoundType = false;
+ bool LastWasColonColon = false;
+ int OpenAngles = 0;
+
+ for (; NextTokenOffset < 50; PeekNext()) {
+ switch (NextToken->Tok.getKind()) {
+ case tok::kw_volatile:
+ case tok::kw_const:
+ case tok::comma:
+ parseRequiresExpression(RequiresToken);
+ return false;
+ case tok::r_paren:
+ case tok::pipepipe:
+ parseRequiresClause(RequiresToken);
+ return true;
+ case tok::eof:
+ // Break out of the loop.
+ NextTokenOffset = 50;
+ break;
+ case tok::coloncolon:
+ LastWasColonColon = true;
+ break;
+ case tok::identifier:
+ if (FoundType && !LastWasColonColon && OpenAngles == 0) {
+ parseRequiresExpression(RequiresToken);
+ return false;
}
- if (Style.IndentRequires && OriginalLevel != Line->Level) {
- --Line->Level;
+ FoundType = true;
+ LastWasColonColon = false;
+ break;
+ case tok::less:
+ ++OpenAngles;
+ break;
+ case tok::greater:
+ --OpenAngles;
+ break;
+ default:
+ if (NextToken->isSimpleTypeSpecifier()) {
+ parseRequiresExpression(RequiresToken);
+ return false;
}
break;
- } else {
- FormatTok->setType(TT_ConstraintJunctions);
}
+ }
- nextToken();
+ // This seems to be a complicated expression, just assume it's a clause.
+ parseRequiresClause(RequiresToken);
+ return true;
+}
+
+/// \brief Parses a requires clause.
+/// \param RequiresToken The requires keyword token, which starts this clause.
+/// \pre We need to be on the next token after the requires keyword.
+/// \sa parseRequiresExpression
+///
+/// Returns if it either has finished parsing the clause, or it detects, that
+/// the clause is incorrect.
+void UnwrappedLineParser::parseRequiresClause(FormatToken *RequiresToken) {
+ assert(FormatTok->getPreviousNonComment() == RequiresToken);
+ assert(RequiresToken->is(tok::kw_requires) && "'requires' expected");
+
+ // If there is no previous token, we are within a requires expression,
+ // otherwise we will always have the template or function declaration in front
+ // of it.
+ bool InRequiresExpression =
+ !RequiresToken->Previous ||
+ RequiresToken->Previous->is(TT_RequiresExpressionLBrace);
+
+ RequiresToken->setFinalizedType(InRequiresExpression
+ ? TT_RequiresClauseInARequiresExpression
+ : TT_RequiresClause);
+
+ parseConstraintExpression();
+
+ if (!InRequiresExpression)
+ FormatTok->Previous->ClosesRequiresClause = true;
+}
+
+/// \brief Parses a requires expression.
+/// \param RequiresToken The requires keyword token, which starts this clause.
+/// \pre We need to be on the next token after the requires keyword.
+/// \sa parseRequiresClause
+///
+/// Returns if it either has finished parsing the expression, or it detects,
+/// that the expression is incorrect.
+void UnwrappedLineParser::parseRequiresExpression(FormatToken *RequiresToken) {
+ assert(FormatTok->getPreviousNonComment() == RequiresToken);
+ assert(RequiresToken->is(tok::kw_requires) && "'requires' expected");
+
+ RequiresToken->setFinalizedType(TT_RequiresExpression);
+
+ if (FormatTok->is(tok::l_paren)) {
+ FormatTok->setFinalizedType(TT_RequiresExpressionLParen);
+ parseParens();
+ }
+
+ if (FormatTok->is(tok::l_brace)) {
+ FormatTok->setFinalizedType(TT_RequiresExpressionLBrace);
+ parseChildBlock(/*CanContainBracedList=*/false,
+ /*NextLBracesType=*/TT_CompoundRequirementLBrace);
}
}
-void UnwrappedLineParser::parseRequires() {
- assert(FormatTok->Tok.is(tok::kw_requires) && "'requires' expected");
+/// \brief Parses a constraint expression.
+///
+/// This is either the definition of a concept, or the body of a requires
+/// clause. It returns, when the parsing is complete, or the expression is
+/// incorrect.
+void UnwrappedLineParser::parseConstraintExpression() {
+ // The special handling for lambdas is needed since tryToParseLambda() eats a
+ // token and if a requires expression is the last part of a requires clause
+ // and followed by an attribute like [[nodiscard]] the ClosesRequiresClause is
+ // not set on the correct token. Thus we need to be aware if we even expect a
+ // lambda to be possible.
+ // template <typename T> requires requires { ... } [[nodiscard]] ...;
+ bool LambdaNextTimeAllowed = true;
+ do {
+ bool LambdaThisTimeAllowed = std::exchange(LambdaNextTimeAllowed, false);
- unsigned OriginalLevel = Line->Level;
- if (FormatTok->Previous && FormatTok->Previous->is(tok::greater)) {
- addUnwrappedLine();
- if (Style.IndentRequires) {
- ++Line->Level;
+ switch (FormatTok->Tok.getKind()) {
+ case tok::kw_requires: {
+ auto RequiresToken = FormatTok;
+ nextToken();
+ parseRequiresExpression(RequiresToken);
+ break;
}
- }
- nextToken();
- parseRequiresExpression(OriginalLevel);
+ case tok::l_paren:
+ parseParens(/*AmpAmpTokenType=*/TT_BinaryOperator);
+ break;
+
+ case tok::l_square:
+ if (!LambdaThisTimeAllowed || !tryToParseLambda())
+ return;
+ break;
+
+ case tok::kw_const:
+ case tok::semi:
+ case tok::kw_class:
+ case tok::kw_struct:
+ case tok::kw_union:
+ return;
+
+ case tok::l_brace:
+ // Potential function body.
+ return;
+
+ case tok::ampamp:
+ case tok::pipepipe:
+ FormatTok->setFinalizedType(TT_BinaryOperator);
+ nextToken();
+ LambdaNextTimeAllowed = true;
+ break;
+
+ case tok::comma:
+ case tok::comment:
+ LambdaNextTimeAllowed = LambdaThisTimeAllowed;
+ nextToken();
+ break;
+
+ case tok::kw_sizeof:
+ case tok::greater:
+ case tok::greaterequal:
+ case tok::greatergreater:
+ case tok::less:
+ case tok::lessequal:
+ case tok::lessless:
+ case tok::equalequal:
+ case tok::exclaim:
+ case tok::exclaimequal:
+ case tok::plus:
+ case tok::minus:
+ case tok::star:
+ case tok::slash:
+ case tok::kw_decltype:
+ LambdaNextTimeAllowed = true;
+ // Just eat them.
+ nextToken();
+ break;
+
+ case tok::numeric_constant:
+ case tok::coloncolon:
+ case tok::kw_true:
+ case tok::kw_false:
+ // Just eat them.
+ nextToken();
+ break;
+
+ case tok::kw_static_cast:
+ case tok::kw_const_cast:
+ case tok::kw_reinterpret_cast:
+ case tok::kw_dynamic_cast:
+ nextToken();
+ if (!FormatTok->is(tok::less))
+ return;
+
+ nextToken();
+ parseBracedList(/*ContinueOnSemicolons=*/false, /*IsEnum=*/false,
+ /*ClosingBraceKind=*/tok::greater);
+ break;
+
+ case tok::kw_bool:
+ // bool is only allowed if it is directly followed by a paren for a cast:
+ // concept C = bool(...);
+ // and bool is the only type, all other types as cast must be inside a
+ // cast to bool an thus are handled by the other cases.
+ nextToken();
+ if (FormatTok->isNot(tok::l_paren))
+ return;
+ parseParens();
+ break;
+
+ default:
+ if (!FormatTok->Tok.getIdentifierInfo()) {
+ // Identifiers are part of the default case, we check for more then
+ // tok::identifier to handle builtin type traits.
+ return;
+ }
+
+ // We need to differentiate identifiers for a template deduction guide,
+ // variables, or function return types (the constraint expression has
+ // ended before that), and basically all other cases. But it's easier to
+ // check the other way around.
+ assert(FormatTok->Previous);
+ switch (FormatTok->Previous->Tok.getKind()) {
+ case tok::coloncolon: // Nested identifier.
+ case tok::ampamp: // Start of a function or variable for the
+ case tok::pipepipe: // constraint expression.
+ case tok::kw_requires: // Initial identifier of a requires clause.
+ case tok::equal: // Initial identifier of a concept declaration.
+ break;
+ default:
+ return;
+ }
+
+ // Read identifier with optional template declaration.
+ nextToken();
+ if (FormatTok->is(tok::less)) {
+ nextToken();
+ parseBracedList(/*ContinueOnSemicolons=*/false, /*IsEnum=*/false,
+ /*ClosingBraceKind=*/tok::greater);
+ }
+ break;
+ }
+ } while (!eof());
}
bool UnwrappedLineParser::parseEnum() {
const FormatToken &InitialToken = *FormatTok;
// Won't be 'enum' for NS_ENUMs.
- if (FormatTok->Tok.is(tok::kw_enum))
+ if (FormatTok->is(tok::kw_enum))
nextToken();
// In TypeScript, "enum" can also be used as property name, e.g. in interface
@@ -2858,16 +3477,23 @@ bool UnwrappedLineParser::parseEnum() {
return false;
// Eat up enum class ...
- if (FormatTok->Tok.is(tok::kw_class) || FormatTok->Tok.is(tok::kw_struct))
+ if (FormatTok->isOneOf(tok::kw_class, tok::kw_struct))
nextToken();
while (FormatTok->Tok.getIdentifierInfo() ||
FormatTok->isOneOf(tok::colon, tok::coloncolon, tok::less,
- tok::greater, tok::comma, tok::question)) {
+ tok::greater, tok::comma, tok::question,
+ tok::l_square, tok::r_square)) {
nextToken();
// We can have macros or attributes in between 'enum' and the enum name.
if (FormatTok->is(tok::l_paren))
parseParens();
+ if (FormatTok->is(TT_AttributeSquare)) {
+ parseSquare();
+ // Consume the closing TT_AttributeSquare.
+ if (FormatTok->Next && FormatTok->is(TT_AttributeSquare))
+ nextToken();
+ }
if (FormatTok->is(tok::identifier)) {
nextToken();
// If there are two identifiers in a row, this is likely an elaborate
@@ -2880,7 +3506,7 @@ bool UnwrappedLineParser::parseEnum() {
// Just a declaration or something is wrong.
if (FormatTok->isNot(tok::l_brace))
return true;
- FormatTok->setType(TT_RecordLBrace);
+ FormatTok->setFinalizedType(TT_EnumLBrace);
FormatTok->setBlockKind(BK_Block);
if (Style.Language == FormatStyle::LK_Java) {
@@ -2894,8 +3520,9 @@ bool UnwrappedLineParser::parseEnum() {
}
if (!Style.AllowShortEnumsOnASingleLine &&
- ShouldBreakBeforeBrace(Style, InitialToken))
+ ShouldBreakBeforeBrace(Style, InitialToken)) {
addUnwrappedLine();
+ }
// Parse enum body.
nextToken();
if (!Style.AllowShortEnumsOnASingleLine) {
@@ -2956,31 +3583,30 @@ bool UnwrappedLineParser::tryToParseSimpleAttribute() {
ScopedTokenPosition AutoPosition(Tokens);
FormatToken *Tok = Tokens->getNextToken();
// We already read the first [ check for the second.
- if (!Tok->is(tok::l_square)) {
+ if (!Tok->is(tok::l_square))
return false;
- }
// Double check that the attribute is just something
// fairly simple.
while (Tok->isNot(tok::eof)) {
- if (Tok->is(tok::r_square)) {
+ if (Tok->is(tok::r_square))
break;
- }
Tok = Tokens->getNextToken();
}
if (Tok->is(tok::eof))
return false;
Tok = Tokens->getNextToken();
- if (!Tok->is(tok::r_square)) {
+ if (!Tok->is(tok::r_square))
return false;
- }
Tok = Tokens->getNextToken();
- if (Tok->is(tok::semi)) {
+ if (Tok->is(tok::semi))
return false;
- }
return true;
}
void UnwrappedLineParser::parseJavaEnumBody() {
+ assert(FormatTok->is(tok::l_brace));
+ const FormatToken *OpeningBrace = FormatTok;
+
// Determine whether the enum is simple, i.e. does not have a semicolon or
// constants with class bodies. Simple enums can be formatted like braced
// lists, contracted to a single line, etc.
@@ -3014,7 +3640,7 @@ void UnwrappedLineParser::parseJavaEnumBody() {
++Line->Level;
// Parse the enum constants.
- while (FormatTok) {
+ while (FormatTok->isNot(tok::eof)) {
if (FormatTok->is(tok::l_brace)) {
// Parse the constant's class body.
parseBlock(/*MustBeDeclaration=*/true, /*AddLevels=*/1u,
@@ -3037,7 +3663,7 @@ void UnwrappedLineParser::parseJavaEnumBody() {
}
// Parse the class body after the enum's ";" if any.
- parseLevel(/*HasOpeningBrace=*/true);
+ parseLevel(OpeningBrace);
nextToken();
--Line->Level;
addUnwrappedLine();
@@ -3072,7 +3698,7 @@ void UnwrappedLineParser::parseRecord(bool ParseAsExpr) {
nextToken();
// We can have macros or attributes in between 'class' and the class name.
if (!IsNonMacroIdentifier) {
- if (FormatTok->Tok.is(tok::l_paren)) {
+ if (FormatTok->is(tok::l_paren)) {
parseParens();
} else if (FormatTok->is(TT_AttributeSquare)) {
parseSquare();
@@ -3094,7 +3720,7 @@ void UnwrappedLineParser::parseRecord(bool ParseAsExpr) {
// (this would still leave us with an ambiguity between template function
// and class declarations).
if (FormatTok->isOneOf(tok::colon, tok::less)) {
- while (!eof()) {
+ do {
if (FormatTok->is(tok::l_brace)) {
calculateBraceTypes(/*ExpectClassBody=*/true);
if (!tryToParseBracedList())
@@ -3108,9 +3734,12 @@ void UnwrappedLineParser::parseRecord(bool ParseAsExpr) {
// it was probably a pointer to an array: int (*)[].
if (!tryToParseLambda())
break;
+ } else {
+ parseSquare();
+ continue;
}
}
- if (FormatTok->Tok.is(tok::semi))
+ if (FormatTok->is(tok::semi))
return;
if (Style.isCSharp() && FormatTok->is(Keywords.kw_where)) {
addUnwrappedLine();
@@ -3119,10 +3748,24 @@ void UnwrappedLineParser::parseRecord(bool ParseAsExpr) {
break;
}
nextToken();
- }
+ } while (!eof());
}
- if (FormatTok->Tok.is(tok::l_brace)) {
- FormatTok->setType(TT_RecordLBrace);
+
+ auto GetBraceType = [](const FormatToken &RecordTok) {
+ switch (RecordTok.Tok.getKind()) {
+ case tok::kw_class:
+ return TT_ClassLBrace;
+ case tok::kw_struct:
+ return TT_StructLBrace;
+ case tok::kw_union:
+ return TT_UnionLBrace;
+ default:
+ // Useful for e.g. interface.
+ return TT_RecordLBrace;
+ }
+ };
+ if (FormatTok->is(tok::l_brace)) {
+ FormatTok->setFinalizedType(GetBraceType(InitialToken));
if (ParseAsExpr) {
parseChildBlock();
} else {
@@ -3139,14 +3782,14 @@ void UnwrappedLineParser::parseRecord(bool ParseAsExpr) {
}
void UnwrappedLineParser::parseObjCMethod() {
- assert(FormatTok->Tok.isOneOf(tok::l_paren, tok::identifier) &&
+ assert(FormatTok->isOneOf(tok::l_paren, tok::identifier) &&
"'(' or identifier expected.");
do {
- if (FormatTok->Tok.is(tok::semi)) {
+ if (FormatTok->is(tok::semi)) {
nextToken();
addUnwrappedLine();
return;
- } else if (FormatTok->Tok.is(tok::l_brace)) {
+ } else if (FormatTok->is(tok::l_brace)) {
if (Style.BraceWrapping.AfterFunction)
addUnwrappedLine();
parseBlock();
@@ -3159,20 +3802,21 @@ void UnwrappedLineParser::parseObjCMethod() {
}
void UnwrappedLineParser::parseObjCProtocolList() {
- assert(FormatTok->Tok.is(tok::less) && "'<' expected.");
+ assert(FormatTok->is(tok::less) && "'<' expected.");
do {
nextToken();
// Early exit in case someone forgot a close angle.
if (FormatTok->isOneOf(tok::semi, tok::l_brace) ||
- FormatTok->Tok.isObjCAtKeyword(tok::objc_end))
+ FormatTok->isObjCAtKeyword(tok::objc_end)) {
return;
- } while (!eof() && FormatTok->Tok.isNot(tok::greater));
+ }
+ } while (!eof() && FormatTok->isNot(tok::greater));
nextToken(); // Skip '>'.
}
void UnwrappedLineParser::parseObjCUntilAtEnd() {
do {
- if (FormatTok->Tok.isObjCAtKeyword(tok::objc_end)) {
+ if (FormatTok->isObjCAtKeyword(tok::objc_end)) {
nextToken();
addUnwrappedLine();
break;
@@ -3202,24 +3846,23 @@ void UnwrappedLineParser::parseObjCInterfaceOrImplementation() {
// @interface can be followed by a lightweight generic
// specialization list, then either a base class or a category.
- if (FormatTok->Tok.is(tok::less)) {
+ if (FormatTok->is(tok::less))
parseObjCLightweightGenerics();
- }
- if (FormatTok->Tok.is(tok::colon)) {
+ if (FormatTok->is(tok::colon)) {
nextToken();
nextToken(); // base class name
// The base class can also have lightweight generics applied to it.
- if (FormatTok->Tok.is(tok::less)) {
+ if (FormatTok->is(tok::less))
parseObjCLightweightGenerics();
- }
- } else if (FormatTok->Tok.is(tok::l_paren))
+ } else if (FormatTok->is(tok::l_paren)) {
// Skip category, if present.
parseParens();
+ }
- if (FormatTok->Tok.is(tok::less))
+ if (FormatTok->is(tok::less))
parseObjCProtocolList();
- if (FormatTok->Tok.is(tok::l_brace)) {
+ if (FormatTok->is(tok::l_brace)) {
if (Style.BraceWrapping.AfterObjCDeclaration)
addUnwrappedLine();
parseBlock(/*MustBeDeclaration=*/true);
@@ -3233,7 +3876,7 @@ void UnwrappedLineParser::parseObjCInterfaceOrImplementation() {
}
void UnwrappedLineParser::parseObjCLightweightGenerics() {
- assert(FormatTok->Tok.is(tok::less));
+ assert(FormatTok->is(tok::less));
// Unlike protocol lists, generic parameterizations support
// nested angles:
//
@@ -3246,11 +3889,12 @@ void UnwrappedLineParser::parseObjCLightweightGenerics() {
nextToken();
// Early exit in case someone forgot a close angle.
if (FormatTok->isOneOf(tok::semi, tok::l_brace) ||
- FormatTok->Tok.isObjCAtKeyword(tok::objc_end))
+ FormatTok->isObjCAtKeyword(tok::objc_end)) {
break;
- if (FormatTok->Tok.is(tok::less))
+ }
+ if (FormatTok->is(tok::less)) {
++NumOpenAngles;
- else if (FormatTok->Tok.is(tok::greater)) {
+ } else if (FormatTok->is(tok::greater)) {
assert(NumOpenAngles > 0 && "'>' makes NumOpenAngles negative");
--NumOpenAngles;
}
@@ -3264,9 +3908,10 @@ bool UnwrappedLineParser::parseObjCProtocol() {
assert(FormatTok->Tok.getObjCKeywordID() == tok::objc_protocol);
nextToken();
- if (FormatTok->is(tok::l_paren))
+ if (FormatTok->is(tok::l_paren)) {
// The expression form of @protocol, e.g. "Protocol* p = @protocol(foo);".
return false;
+ }
// The definition/declaration form,
// @protocol Foo
@@ -3275,11 +3920,11 @@ bool UnwrappedLineParser::parseObjCProtocol() {
nextToken(); // protocol name
- if (FormatTok->Tok.is(tok::less))
+ if (FormatTok->is(tok::less))
parseObjCProtocolList();
// Check for protocol declaration.
- if (FormatTok->Tok.is(tok::semi)) {
+ if (FormatTok->is(tok::semi)) {
nextToken();
addUnwrappedLine();
return true;
@@ -3314,8 +3959,9 @@ void UnwrappedLineParser::parseJavaScriptEs6ImportExport() {
// parsing the structural element, i.e. the declaration or expression for
// `export default`.
if (!IsImport && !FormatTok->isOneOf(tok::l_brace, tok::star) &&
- !FormatTok->isStringLiteral())
+ !FormatTok->isStringLiteral()) {
return;
+ }
while (!eof()) {
if (FormatTok->is(tok::semi))
@@ -3394,7 +4040,7 @@ void UnwrappedLineParser::addUnwrappedLine(LineLevel AdjustLevel) {
FormatTok->Previous = nullptr;
}
-bool UnwrappedLineParser::eof() const { return FormatTok->Tok.is(tok::eof); }
+bool UnwrappedLineParser::eof() const { return FormatTok->is(tok::eof); }
bool UnwrappedLineParser::isOnNewLine(const FormatToken &FormatTok) {
return (Line->InPPDirective || FormatTok.HasUnescapedNewline) &&
@@ -3412,8 +4058,9 @@ continuesLineCommentSection(const FormatToken &FormatTok,
StringRef IndentContent = FormatTok.TokenText;
if (FormatTok.TokenText.startswith("//") ||
- FormatTok.TokenText.startswith("/*"))
+ FormatTok.TokenText.startswith("/*")) {
IndentContent = FormatTok.TokenText.substr(2);
+ }
if (CommentPragmasRegex.match(IndentContent))
return false;
@@ -3497,13 +4144,11 @@ continuesLineCommentSection(const FormatToken &FormatTok,
PreviousToken = Node.Tok;
// Grab the last newline preceding a token in this unwrapped line.
- if (Node.Tok->NewlinesBefore > 0) {
+ if (Node.Tok->NewlinesBefore > 0)
MinColumnToken = Node.Tok;
- }
}
- if (PreviousToken && PreviousToken->is(tok::l_brace)) {
+ if (PreviousToken && PreviousToken->is(tok::l_brace))
MinColumnToken = PreviousToken;
- }
return continuesLineComment(FormatTok, /*Previous=*/Line.Tokens.back().Tok,
MinColumnToken);
@@ -3542,6 +4187,16 @@ void UnwrappedLineParser::nextToken(int LevelDifference) {
else
readTokenWithJavaScriptASI();
FormatTok->Previous = Previous;
+ if (Style.isVerilog()) {
+ // Blocks in Verilog can have `begin` and `end` instead of braces. For
+ // keywords like `begin`, we can't treat them the same as left braces
+ // because some contexts require one of them. For example structs use
+ // braces and if blocks use keywords, and a left brace can occur in an if
+ // statement, but it is not a block. For keywords like `end`, we simply
+ // treat them the same as right braces.
+ if (Keywords.isVerilogEnd(*FormatTok))
+ FormatTok->Tok.setKind(tok::r_brace);
+ }
}
void UnwrappedLineParser::distributeComments(
@@ -3591,35 +4246,55 @@ void UnwrappedLineParser::distributeComments(
(isOnNewLine(*FormatTok) || FormatTok->IsFirst)) {
ShouldPushCommentsInCurrentLine = false;
}
- if (ShouldPushCommentsInCurrentLine) {
+ if (ShouldPushCommentsInCurrentLine)
pushToken(FormatTok);
- } else {
+ else
CommentsBeforeNextToken.push_back(FormatTok);
- }
}
}
void UnwrappedLineParser::readToken(int LevelDifference) {
SmallVector<FormatToken *, 1> Comments;
+ bool PreviousWasComment = false;
+ bool FirstNonCommentOnLine = false;
do {
FormatTok = Tokens->getNextToken();
assert(FormatTok);
while (FormatTok->getType() == TT_ConflictStart ||
FormatTok->getType() == TT_ConflictEnd ||
FormatTok->getType() == TT_ConflictAlternative) {
- if (FormatTok->getType() == TT_ConflictStart) {
+ if (FormatTok->getType() == TT_ConflictStart)
conditionalCompilationStart(/*Unreachable=*/false);
- } else if (FormatTok->getType() == TT_ConflictAlternative) {
+ else if (FormatTok->getType() == TT_ConflictAlternative)
conditionalCompilationAlternative();
- } else if (FormatTok->getType() == TT_ConflictEnd) {
+ else if (FormatTok->getType() == TT_ConflictEnd)
conditionalCompilationEnd();
- }
FormatTok = Tokens->getNextToken();
FormatTok->MustBreakBefore = true;
}
- while (!Line->InPPDirective && FormatTok->Tok.is(tok::hash) &&
- (FormatTok->HasUnescapedNewline || FormatTok->IsFirst)) {
+ auto IsFirstNonCommentOnLine = [](bool FirstNonCommentOnLine,
+ const FormatToken &Tok,
+ bool PreviousWasComment) {
+ auto IsFirstOnLine = [](const FormatToken &Tok) {
+ return Tok.HasUnescapedNewline || Tok.IsFirst;
+ };
+
+ // Consider preprocessor directives preceded by block comments as first
+ // on line.
+ if (PreviousWasComment)
+ return FirstNonCommentOnLine || IsFirstOnLine(Tok);
+ return IsFirstOnLine(Tok);
+ };
+
+ FirstNonCommentOnLine = IsFirstNonCommentOnLine(
+ FirstNonCommentOnLine, *FormatTok, PreviousWasComment);
+ PreviousWasComment = FormatTok->is(tok::comment);
+
+ while (!Line->InPPDirective && FormatTok->is(tok::hash) &&
+ (!Style.isVerilog() ||
+ Keywords.isVerilogPPDirective(*Tokens->peekNextToken())) &&
+ FirstNonCommentOnLine) {
distributeComments(Comments, FormatTok);
Comments.clear();
// If there is an unfinished unwrapped line, we flush the preprocessor
@@ -3634,10 +4309,14 @@ void UnwrappedLineParser::readToken(int LevelDifference) {
// before the preprocessor directive, at the same level as the
// preprocessor directive, as we consider them to apply to the directive.
if (Style.IndentPPDirectives == FormatStyle::PPDIS_BeforeHash &&
- PPBranchLevel > 0)
+ PPBranchLevel > 0) {
Line->Level += PPBranchLevel;
+ }
flushComments(isOnNewLine(*FormatTok));
parsePPDirective();
+ PreviousWasComment = FormatTok->is(tok::comment);
+ FirstNonCommentOnLine = IsFirstNonCommentOnLine(
+ FirstNonCommentOnLine, *FormatTok, PreviousWasComment);
}
if (!PPStack.empty() && (PPStack.back().Kind == PP_Unreachable) &&
@@ -3645,7 +4324,7 @@ void UnwrappedLineParser::readToken(int LevelDifference) {
continue;
}
- if (!FormatTok->Tok.is(tok::comment)) {
+ if (!FormatTok->is(tok::comment)) {
distributeComments(Comments, FormatTok);
Comments.clear();
return;
diff --git a/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.h b/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.h
index 3f64d57c7bff..8f63870412d0 100644
--- a/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.h
+++ b/contrib/llvm-project/clang/lib/Format/UnwrappedLineParser.h
@@ -92,12 +92,21 @@ private:
void reset();
void parseFile();
bool precededByCommentOrPPDirective() const;
- bool mightFitOnOneLine() const;
- bool parseLevel(bool HasOpeningBrace, IfStmtKind *IfKind = nullptr);
- IfStmtKind parseBlock(bool MustBeDeclaration = false, unsigned AddLevels = 1u,
- bool MunchSemi = true,
- bool UnindentWhitesmithsBraces = false);
- void parseChildBlock();
+ bool parseLevel(const FormatToken *OpeningBrace = nullptr,
+ bool CanContainBracedList = true,
+ TokenType NextLBracesType = TT_Unknown,
+ IfStmtKind *IfKind = nullptr,
+ FormatToken **IfLeftBrace = nullptr);
+ bool mightFitOnOneLine(UnwrappedLine &Line,
+ const FormatToken *OpeningBrace = nullptr) const;
+ FormatToken *parseBlock(bool MustBeDeclaration = false,
+ unsigned AddLevels = 1u, bool MunchSemi = true,
+ bool KeepBraces = true, IfStmtKind *IfKind = nullptr,
+ bool UnindentWhitesmithsBraces = false,
+ bool CanContainBracedList = true,
+ TokenType NextLBracesType = TT_Unknown);
+ void parseChildBlock(bool CanContainBracedList = true,
+ TokenType NextLBracesType = TT_Unknown);
void parsePPDirective();
void parsePPDefine();
void parsePPIf(bool IfDef);
@@ -106,16 +115,24 @@ private:
void parsePPEndIf();
void parsePPUnknown();
void readTokenWithJavaScriptASI();
- void parseStructuralElement(IfStmtKind *IfKind = nullptr,
- bool IsTopLevel = false);
+ void parseStructuralElement(bool IsTopLevel = false,
+ TokenType NextLBracesType = TT_Unknown,
+ IfStmtKind *IfKind = nullptr,
+ FormatToken **IfLeftBrace = nullptr,
+ bool *HasDoWhile = nullptr,
+ bool *HasLabel = nullptr);
bool tryToParseBracedList();
bool parseBracedList(bool ContinueOnSemicolons = false, bool IsEnum = false,
tok::TokenKind ClosingBraceKind = tok::r_brace);
- void parseParens();
+ void parseParens(TokenType AmpAmpTokenType = TT_Unknown);
void parseSquare(bool LambdaIntroducer = false);
void keepAncestorBraces();
+ void parseUnbracedBody(bool CheckEOF = false);
+ void handleAttributes();
+ bool handleCppAttributes();
FormatToken *parseIfThenElse(IfStmtKind *IfKind, bool KeepBraces = false);
void parseTryCatch();
+ void parseLoopBody(bool KeepBraces, bool WrapRightBrace);
void parseForOrWhileLoop();
void parseDoWhile();
void parseLabel(bool LeftAlignLabel = false);
@@ -128,9 +145,10 @@ private:
bool parseEnum();
bool parseStructLike();
void parseConcept();
- void parseRequires();
- void parseRequiresExpression(unsigned int OriginalLevel);
- void parseConstraintExpression(unsigned int OriginalLevel);
+ bool parseRequires();
+ void parseRequiresClause(FormatToken *RequiresToken);
+ void parseRequiresExpression(FormatToken *RequiresToken);
+ void parseConstraintExpression();
void parseJavaEnumBody();
// Parses a record (aka class) as a top level element. If ParseAsExpr is true,
// parses the record as a child block, i.e. if the class declaration is an
diff --git a/contrib/llvm-project/clang/lib/Format/WhitespaceManager.cpp b/contrib/llvm-project/clang/lib/Format/WhitespaceManager.cpp
index f5a0b9963b5d..6ec788ad23c6 100644
--- a/contrib/llvm-project/clang/lib/Format/WhitespaceManager.cpp
+++ b/contrib/llvm-project/clang/lib/Format/WhitespaceManager.cpp
@@ -167,11 +167,12 @@ void WhitespaceManager::calculateLineBreakInformation() {
// If there are multiple changes in this token, sum up all the changes until
// the end of the line.
- if (Changes[i - 1].IsInsideToken && Changes[i - 1].NewlinesBefore == 0)
+ if (Changes[i - 1].IsInsideToken && Changes[i - 1].NewlinesBefore == 0) {
LastOutsideTokenChange->TokenLength +=
Changes[i - 1].TokenLength + Changes[i - 1].Spaces;
- else
+ } else {
LastOutsideTokenChange = &Changes[i - 1];
+ }
Changes[i].PreviousEndOfTokenColumn =
Changes[i - 1].StartOfTokenColumn + Changes[i - 1].TokenLength;
@@ -227,13 +228,12 @@ void WhitespaceManager::calculateLineBreakInformation() {
Change.StartOfBlockComment = nullptr;
Change.IndentationOffset = 0;
if (Change.Tok->is(tok::comment)) {
- if (Change.Tok->is(TT_LineComment) || !Change.IsInsideToken)
+ if (Change.Tok->is(TT_LineComment) || !Change.IsInsideToken) {
LastBlockComment = &Change;
- else {
- if ((Change.StartOfBlockComment = LastBlockComment))
- Change.IndentationOffset =
- Change.StartOfTokenColumn -
- Change.StartOfBlockComment->StartOfTokenColumn;
+ } else if ((Change.StartOfBlockComment = LastBlockComment)) {
+ Change.IndentationOffset =
+ Change.StartOfTokenColumn -
+ Change.StartOfBlockComment->StartOfTokenColumn;
}
} else {
LastBlockComment = nullptr;
@@ -260,19 +260,20 @@ void WhitespaceManager::calculateLineBreakInformation() {
Change.ConditionalsLevel = ConditionalsLevel;
- for (unsigned i = Change.Tok->FakeRParens; i > 0 && ScopeStack.size();
- --i) {
+ for (unsigned i = Change.Tok->FakeRParens; i > 0 && ScopeStack.size(); --i)
if (ScopeStack.pop_back_val())
--ConditionalsLevel;
- }
}
}
// Align a single sequence of tokens, see AlignTokens below.
+// Column - The token for which Matches returns true is moved to this column.
+// RightJustify - Whether it is the token's right end or left end that gets
+// moved to that column.
template <typename F>
static void
AlignTokenSequence(const FormatStyle &Style, unsigned Start, unsigned End,
- unsigned Column, F &&Matches,
+ unsigned Column, bool RightJustify, F &&Matches,
SmallVector<WhitespaceManager::Change, 16> &Changes) {
bool FoundMatchOnLine = false;
int Shift = 0;
@@ -302,18 +303,21 @@ AlignTokenSequence(const FormatStyle &Style, unsigned Start, unsigned End,
for (unsigned i = Start; i != End; ++i) {
if (ScopeStack.size() != 0 &&
Changes[i].indentAndNestingLevel() <
- Changes[ScopeStack.back()].indentAndNestingLevel())
+ Changes[ScopeStack.back()].indentAndNestingLevel()) {
ScopeStack.pop_back();
+ }
// Compare current token to previous non-comment token to ensure whether
// it is in a deeper scope or not.
unsigned PreviousNonComment = i - 1;
while (PreviousNonComment > Start &&
- Changes[PreviousNonComment].Tok->is(tok::comment))
+ Changes[PreviousNonComment].Tok->is(tok::comment)) {
--PreviousNonComment;
+ }
if (i != Start && Changes[i].indentAndNestingLevel() >
- Changes[PreviousNonComment].indentAndNestingLevel())
+ Changes[PreviousNonComment].indentAndNestingLevel()) {
ScopeStack.push_back(i);
+ }
bool InsideNestedScope = ScopeStack.size() != 0;
bool ContinuedStringLiteral = i > Start &&
@@ -331,8 +335,16 @@ AlignTokenSequence(const FormatStyle &Style, unsigned Start, unsigned End,
// shifted by the same amount
if (!FoundMatchOnLine && !SkipMatchCheck && Matches(Changes[i])) {
FoundMatchOnLine = true;
- Shift = Column - Changes[i].StartOfTokenColumn;
+ Shift = Column - (RightJustify ? Changes[i].TokenLength : 0) -
+ Changes[i].StartOfTokenColumn;
Changes[i].Spaces += Shift;
+ // FIXME: This is a workaround that should be removed when we fix
+ // http://llvm.org/PR53699. An assertion later below verifies this.
+ if (Changes[i].NewlinesBefore == 0) {
+ Changes[i].Spaces =
+ std::max(Changes[i].Spaces,
+ static_cast<int>(Changes[i].Tok->SpacesRequiredBefore));
+ }
}
// This is for function parameters that are split across multiple lines,
@@ -350,8 +362,9 @@ AlignTokenSequence(const FormatStyle &Style, unsigned Start, unsigned End,
// Continued function declaration
if (ScopeStart > Start + 1 &&
- Changes[ScopeStart - 2].Tok->is(TT_FunctionDeclarationName))
+ Changes[ScopeStart - 2].Tok->is(TT_FunctionDeclarationName)) {
return true;
+ }
// Continued function call
if (ScopeStart > Start + 1 &&
@@ -359,8 +372,15 @@ AlignTokenSequence(const FormatStyle &Style, unsigned Start, unsigned End,
Changes[ScopeStart - 1].Tok->is(tok::l_paren) &&
Changes[ScopeStart].Tok->isNot(TT_LambdaLSquare)) {
if (Changes[i].Tok->MatchingParen &&
- Changes[i].Tok->MatchingParen->is(TT_LambdaLBrace))
+ Changes[i].Tok->MatchingParen->is(TT_LambdaLBrace)) {
+ return false;
+ }
+ if (Changes[ScopeStart].NewlinesBefore > 0)
return false;
+ if (Changes[i].Tok->is(tok::l_brace) &&
+ Changes[i].Tok->is(BK_BracedInit)) {
+ return true;
+ }
return Style.BinPackArguments;
}
@@ -374,8 +394,18 @@ AlignTokenSequence(const FormatStyle &Style, unsigned Start, unsigned End,
// Continued ternary operator
if (Changes[i].Tok->Previous &&
- Changes[i].Tok->Previous->is(TT_ConditionalExpr))
+ Changes[i].Tok->Previous->is(TT_ConditionalExpr)) {
return true;
+ }
+
+ // Continued direct-list-initialization using braced list.
+ if (ScopeStart > Start + 1 &&
+ Changes[ScopeStart - 2].Tok->is(tok::identifier) &&
+ Changes[ScopeStart - 1].Tok->is(tok::l_brace) &&
+ Changes[i].Tok->is(tok::l_brace) &&
+ Changes[i].Tok->is(BK_BracedInit)) {
+ return true;
+ }
// Continued braced list.
if (ScopeStart > Start + 1 &&
@@ -385,9 +415,12 @@ AlignTokenSequence(const FormatStyle &Style, unsigned Start, unsigned End,
for (unsigned OuterScopeStart : llvm::reverse(ScopeStack)) {
// Lambda.
if (OuterScopeStart > Start &&
- Changes[OuterScopeStart - 1].Tok->is(TT_LambdaLBrace))
+ Changes[OuterScopeStart - 1].Tok->is(TT_LambdaLBrace)) {
return false;
+ }
}
+ if (Changes[ScopeStart].NewlinesBefore > 0)
+ return false;
return true;
}
@@ -401,6 +434,12 @@ AlignTokenSequence(const FormatStyle &Style, unsigned Start, unsigned End,
if (ContinuedStringLiteral)
Changes[i].Spaces += Shift;
+ // We should not remove required spaces unless we break the line before.
+ assert(Shift >= 0 || Changes[i].NewlinesBefore > 0 ||
+ Changes[i].Spaces >=
+ static_cast<int>(Changes[i].Tok->SpacesRequiredBefore) ||
+ Changes[i].Tok->is(tok::eof));
+
Changes[i].StartOfTokenColumn += Shift;
if (i + 1 != Changes.size())
Changes[i + 1].PreviousEndOfTokenColumn += Shift;
@@ -447,13 +486,31 @@ AlignTokenSequence(const FormatStyle &Style, unsigned Start, unsigned End,
// However, the special exception is that we do NOT skip function parameters
// that are split across multiple lines. See the test case in FormatTest.cpp
// that mentions "split function parameter alignment" for an example of this.
+// When the parameter RightJustify is true, the operator will be
+// right-justified. It is used to align compound assignments like `+=` and `=`.
+// When RightJustify and ACS.PadOperators are true, operators in each block to
+// be aligned will be padded on the left to the same length before aligning.
template <typename F>
-static unsigned AlignTokens(
- const FormatStyle &Style, F &&Matches,
- SmallVector<WhitespaceManager::Change, 16> &Changes, unsigned StartAt,
- const FormatStyle::AlignConsecutiveStyle &ACS = FormatStyle::ACS_None) {
- unsigned MinColumn = 0;
- unsigned MaxColumn = UINT_MAX;
+static unsigned AlignTokens(const FormatStyle &Style, F &&Matches,
+ SmallVector<WhitespaceManager::Change, 16> &Changes,
+ unsigned StartAt,
+ const FormatStyle::AlignConsecutiveStyle &ACS = {},
+ bool RightJustify = false) {
+ // We arrange each line in 3 parts. The operator to be aligned (the anchor),
+ // and text to its left and right. In the aligned text the width of each part
+ // will be the maximum of that over the block that has been aligned. Maximum
+ // widths of each part so far. When RightJustify is true and ACS.PadOperators
+ // is false, the part from start of line to the right end of the anchor.
+ // Otherwise, only the part to the left of the anchor. Including the space
+ // that exists on its left from the start. Not including the padding added on
+ // the left to right-justify the anchor.
+ unsigned WidthLeft = 0;
+ // The operator to be aligned when RightJustify is true and ACS.PadOperators
+ // is false. 0 otherwise.
+ unsigned WidthAnchor = 0;
+ // Width to the right of the anchor. Plus width of the anchor when
+ // RightJustify is false.
+ unsigned WidthRight = 0;
// Line number of the start and the end of the current token sequence.
unsigned StartOfSequence = 0;
@@ -485,11 +542,14 @@ static unsigned AlignTokens(
// We need to adjust the StartOfTokenColumn of each Change that is on a line
// containing any matching token to be aligned and located after such token.
auto AlignCurrentSequence = [&] {
- if (StartOfSequence > 0 && StartOfSequence < EndOfSequence)
- AlignTokenSequence(Style, StartOfSequence, EndOfSequence, MinColumn,
- Matches, Changes);
- MinColumn = 0;
- MaxColumn = UINT_MAX;
+ if (StartOfSequence > 0 && StartOfSequence < EndOfSequence) {
+ AlignTokenSequence(Style, StartOfSequence, EndOfSequence,
+ WidthLeft + WidthAnchor, RightJustify, Matches,
+ Changes);
+ }
+ WidthLeft = 0;
+ WidthAnchor = 0;
+ WidthRight = 0;
StartOfSequence = 0;
EndOfSequence = 0;
};
@@ -505,17 +565,12 @@ static unsigned AlignTokens(
// Whether to break the alignment sequence because of an empty line.
bool EmptyLineBreak =
- (Changes[i].NewlinesBefore > 1) &&
- (ACS != FormatStyle::ACS_AcrossEmptyLines) &&
- (ACS != FormatStyle::ACS_AcrossEmptyLinesAndComments);
+ (Changes[i].NewlinesBefore > 1) && !ACS.AcrossEmptyLines;
// Whether to break the alignment sequence because of a line without a
// match.
bool NoMatchBreak =
- !FoundMatchOnLine &&
- !(LineIsComment &&
- ((ACS == FormatStyle::ACS_AcrossComments) ||
- (ACS == FormatStyle::ACS_AcrossEmptyLinesAndComments)));
+ !FoundMatchOnLine && !(LineIsComment && ACS.AcrossComments);
if (EmptyLineBreak || NoMatchBreak)
AlignCurrentSequence();
@@ -523,14 +578,14 @@ static unsigned AlignTokens(
// A new line starts, re-initialize line status tracking bools.
// Keep the match state if a string literal is continued on this line.
if (i == 0 || !Changes[i].Tok->is(tok::string_literal) ||
- !Changes[i - 1].Tok->is(tok::string_literal))
+ !Changes[i - 1].Tok->is(tok::string_literal)) {
FoundMatchOnLine = false;
+ }
LineIsComment = true;
}
- if (!Changes[i].Tok->is(tok::comment)) {
+ if (!Changes[i].Tok->is(tok::comment))
LineIsComment = false;
- }
if (Changes[i].Tok->is(tok::comma)) {
++CommasBeforeMatch;
@@ -555,29 +610,44 @@ static unsigned AlignTokens(
if (StartOfSequence == 0)
StartOfSequence = i;
- unsigned ChangeMinColumn = Changes[i].StartOfTokenColumn;
- int LineLengthAfter = Changes[i].TokenLength;
+ unsigned ChangeWidthLeft = Changes[i].StartOfTokenColumn;
+ unsigned ChangeWidthAnchor = 0;
+ unsigned ChangeWidthRight = 0;
+ if (RightJustify)
+ if (ACS.PadOperators)
+ ChangeWidthAnchor = Changes[i].TokenLength;
+ else
+ ChangeWidthLeft += Changes[i].TokenLength;
+ else
+ ChangeWidthRight = Changes[i].TokenLength;
for (unsigned j = i + 1; j != e && Changes[j].NewlinesBefore == 0; ++j) {
- LineLengthAfter += Changes[j].Spaces;
+ ChangeWidthRight += Changes[j].Spaces;
// Changes are generally 1:1 with the tokens, but a change could also be
// inside of a token, in which case it's counted more than once: once for
// the whitespace surrounding the token (!IsInsideToken) and once for
// each whitespace change within it (IsInsideToken).
// Therefore, changes inside of a token should only count the space.
if (!Changes[j].IsInsideToken)
- LineLengthAfter += Changes[j].TokenLength;
+ ChangeWidthRight += Changes[j].TokenLength;
}
- unsigned ChangeMaxColumn = Style.ColumnLimit - LineLengthAfter;
// If we are restricted by the maximum column width, end the sequence.
- if (ChangeMinColumn > MaxColumn || ChangeMaxColumn < MinColumn ||
- CommasBeforeLastMatch != CommasBeforeMatch) {
+ unsigned NewLeft = std::max(ChangeWidthLeft, WidthLeft);
+ unsigned NewAnchor = std::max(ChangeWidthAnchor, WidthAnchor);
+ unsigned NewRight = std::max(ChangeWidthRight, WidthRight);
+ // `ColumnLimit == 0` means there is no column limit.
+ if (Style.ColumnLimit != 0 &&
+ Style.ColumnLimit < NewLeft + NewAnchor + NewRight) {
AlignCurrentSequence();
StartOfSequence = i;
+ WidthLeft = ChangeWidthLeft;
+ WidthAnchor = ChangeWidthAnchor;
+ WidthRight = ChangeWidthRight;
+ } else {
+ WidthLeft = NewLeft;
+ WidthAnchor = NewAnchor;
+ WidthRight = NewRight;
}
-
- MinColumn = std::max(MinColumn, ChangeMinColumn);
- MaxColumn = std::min(MaxColumn, ChangeMaxColumn);
}
EndOfSequence = i;
@@ -631,7 +701,7 @@ static void AlignMacroSequence(
}
void WhitespaceManager::alignConsecutiveMacros() {
- if (Style.AlignConsecutiveMacros == FormatStyle::ACS_None)
+ if (!Style.AlignConsecutiveMacros.Enabled)
return;
auto AlignMacrosMatches = [](const Change &C) {
@@ -682,33 +752,27 @@ void WhitespaceManager::alignConsecutiveMacros() {
EndOfSequence = I;
// Whether to break the alignment sequence because of an empty line.
- bool EmptyLineBreak =
- (Changes[I].NewlinesBefore > 1) &&
- (Style.AlignConsecutiveMacros != FormatStyle::ACS_AcrossEmptyLines) &&
- (Style.AlignConsecutiveMacros !=
- FormatStyle::ACS_AcrossEmptyLinesAndComments);
+ bool EmptyLineBreak = (Changes[I].NewlinesBefore > 1) &&
+ !Style.AlignConsecutiveMacros.AcrossEmptyLines;
// Whether to break the alignment sequence because of a line without a
// match.
bool NoMatchBreak =
!FoundMatchOnLine &&
- !(LineIsComment && ((Style.AlignConsecutiveMacros ==
- FormatStyle::ACS_AcrossComments) ||
- (Style.AlignConsecutiveMacros ==
- FormatStyle::ACS_AcrossEmptyLinesAndComments)));
+ !(LineIsComment && Style.AlignConsecutiveMacros.AcrossComments);
- if (EmptyLineBreak || NoMatchBreak)
+ if (EmptyLineBreak || NoMatchBreak) {
AlignMacroSequence(StartOfSequence, EndOfSequence, MinColumn, MaxColumn,
FoundMatchOnLine, AlignMacrosMatches, Changes);
+ }
// A new line starts, re-initialize line status tracking bools.
FoundMatchOnLine = false;
LineIsComment = true;
}
- if (!Changes[I].Tok->is(tok::comment)) {
+ if (!Changes[I].Tok->is(tok::comment))
LineIsComment = false;
- }
if (!AlignMacrosMatches(Changes[I]))
continue;
@@ -734,7 +798,7 @@ void WhitespaceManager::alignConsecutiveMacros() {
}
void WhitespaceManager::alignConsecutiveAssignments() {
- if (Style.AlignConsecutiveAssignments == FormatStyle::ACS_None)
+ if (!Style.AlignConsecutiveAssignments.Enabled)
return;
AlignTokens(
@@ -753,13 +817,16 @@ void WhitespaceManager::alignConsecutiveAssignments() {
if (Previous && Previous->is(tok::kw_operator))
return false;
- return C.Tok->is(tok::equal);
+ return Style.AlignConsecutiveAssignments.AlignCompound
+ ? C.Tok->getPrecedence() == prec::Assignment
+ : C.Tok->is(tok::equal);
},
- Changes, /*StartAt=*/0, Style.AlignConsecutiveAssignments);
+ Changes, /*StartAt=*/0, Style.AlignConsecutiveAssignments,
+ /*RightJustify=*/true);
}
void WhitespaceManager::alignConsecutiveBitFields() {
- if (Style.AlignConsecutiveBitFields == FormatStyle::ACS_None)
+ if (!Style.AlignConsecutiveBitFields.Enabled)
return;
AlignTokens(
@@ -779,7 +846,7 @@ void WhitespaceManager::alignConsecutiveBitFields() {
}
void WhitespaceManager::alignConsecutiveDeclarations() {
- if (Style.AlignConsecutiveDeclarations == FormatStyle::ACS_None)
+ if (!Style.AlignConsecutiveDeclarations.Enabled)
return;
AlignTokens(
@@ -803,8 +870,9 @@ void WhitespaceManager::alignConsecutiveDeclarations() {
if (!Next->Tok.getIdentifierInfo())
break;
if (Next->isOneOf(TT_StartOfName, TT_FunctionDeclarationName,
- tok::kw_operator))
+ tok::kw_operator)) {
return false;
+ }
}
return true;
},
@@ -835,10 +903,9 @@ void WhitespaceManager::alignChainedConditionals() {
// Ensure we keep alignment of wrapped operands with non-wrapped operands
// Since we actually align the operators, the wrapped operands need the
// extra offset to be properly aligned.
- for (Change &C : Changes) {
+ for (Change &C : Changes)
if (AlignWrappedOperand(C))
C.StartOfTokenColumn -= 2;
- }
AlignTokens(
Style,
[this](Change const &C) {
@@ -940,9 +1007,8 @@ void WhitespaceManager::alignTrailingComments(unsigned Start, unsigned End,
unsigned Column) {
for (unsigned i = Start; i != End; ++i) {
int Shift = 0;
- if (Changes[i].IsTrailingComment) {
+ if (Changes[i].IsTrailingComment)
Shift = Column - Changes[i].StartOfTokenColumn;
- }
if (Changes[i].StartOfBlockComment) {
Shift = Changes[i].IndentationOffset +
Changes[i].StartOfBlockComment->StartOfTokenColumn -
@@ -1027,11 +1093,13 @@ void WhitespaceManager::alignArrayInitializers(unsigned Start, unsigned End) {
void WhitespaceManager::alignArrayInitializersRightJustified(
CellDescriptions &&CellDescs) {
- auto &Cells = CellDescs.Cells;
+ if (!CellDescs.isRectangular())
+ return;
+ auto &Cells = CellDescs.Cells;
// Now go through and fixup the spaces.
auto *CellIter = Cells.begin();
- for (auto i = 0U; i < CellDescs.CellCount; ++i, ++CellIter) {
+ for (auto i = 0U; i < CellDescs.CellCounts[0]; ++i, ++CellIter) {
unsigned NetWidth = 0U;
if (isSplitCell(*CellIter))
NetWidth = getNetWidth(Cells.begin(), CellIter, CellDescs.InitialSpaces);
@@ -1041,28 +1109,30 @@ void WhitespaceManager::alignArrayInitializersRightJustified(
// So in here we want to see if there is a brace that falls
// on a line that was split. If so on that line we make sure that
// the spaces in front of the brace are enough.
- Changes[CellIter->Index].NewlinesBefore = 0;
- Changes[CellIter->Index].Spaces = 0;
- for (const auto *Next = CellIter->NextColumnElement; Next != nullptr;
- Next = Next->NextColumnElement) {
- Changes[Next->Index].Spaces = 0;
- Changes[Next->Index].NewlinesBefore = 0;
- }
+ const auto *Next = CellIter;
+ do {
+ const FormatToken *Previous = Changes[Next->Index].Tok->Previous;
+ if (Previous && Previous->isNot(TT_LineComment)) {
+ Changes[Next->Index].Spaces = 0;
+ Changes[Next->Index].NewlinesBefore = 0;
+ }
+ Next = Next->NextColumnElement;
+ } while (Next);
// Unless the array is empty, we need the position of all the
// immediately adjacent cells
if (CellIter != Cells.begin()) {
auto ThisNetWidth =
getNetWidth(Cells.begin(), CellIter, CellDescs.InitialSpaces);
- auto MaxNetWidth =
- getMaximumNetWidth(Cells.begin(), CellIter, CellDescs.InitialSpaces,
- CellDescs.CellCount);
+ auto MaxNetWidth = getMaximumNetWidth(
+ Cells.begin(), CellIter, CellDescs.InitialSpaces,
+ CellDescs.CellCounts[0], CellDescs.CellCounts.size());
if (ThisNetWidth < MaxNetWidth)
Changes[CellIter->Index].Spaces = (MaxNetWidth - ThisNetWidth);
auto RowCount = 1U;
auto Offset = std::distance(Cells.begin(), CellIter);
for (const auto *Next = CellIter->NextColumnElement; Next != nullptr;
Next = Next->NextColumnElement) {
- auto *Start = (Cells.begin() + RowCount * CellDescs.CellCount);
+ auto *Start = (Cells.begin() + RowCount * CellDescs.CellCounts[0]);
auto *End = Start + Offset;
ThisNetWidth = getNetWidth(Start, End, CellDescs.InitialSpaces);
if (ThisNetWidth < MaxNetWidth)
@@ -1095,8 +1165,11 @@ void WhitespaceManager::alignArrayInitializersRightJustified(
void WhitespaceManager::alignArrayInitializersLeftJustified(
CellDescriptions &&CellDescs) {
- auto &Cells = CellDescs.Cells;
+ if (!CellDescs.isRectangular())
+ return;
+
+ auto &Cells = CellDescs.Cells;
// Now go through and fixup the spaces.
auto *CellIter = Cells.begin();
// The first cell needs to be against the left brace.
@@ -1105,9 +1178,10 @@ void WhitespaceManager::alignArrayInitializersLeftJustified(
else
Changes[CellIter->Index].Spaces = CellDescs.InitialSpaces;
++CellIter;
- for (auto i = 1U; i < CellDescs.CellCount; i++, ++CellIter) {
+ for (auto i = 1U; i < CellDescs.CellCounts[0]; i++, ++CellIter) {
auto MaxNetWidth = getMaximumNetWidth(
- Cells.begin(), CellIter, CellDescs.InitialSpaces, CellDescs.CellCount);
+ Cells.begin(), CellIter, CellDescs.InitialSpaces,
+ CellDescs.CellCounts[0], CellDescs.CellCounts.size());
auto ThisNetWidth =
getNetWidth(Cells.begin(), CellIter, CellDescs.InitialSpaces);
if (Changes[CellIter->Index].NewlinesBefore == 0) {
@@ -1119,7 +1193,9 @@ void WhitespaceManager::alignArrayInitializersLeftJustified(
auto Offset = std::distance(Cells.begin(), CellIter);
for (const auto *Next = CellIter->NextColumnElement; Next != nullptr;
Next = Next->NextColumnElement) {
- auto *Start = (Cells.begin() + RowCount * CellDescs.CellCount);
+ if (RowCount > CellDescs.CellCounts.size())
+ break;
+ auto *Start = (Cells.begin() + RowCount * CellDescs.CellCounts[0]);
auto *End = Start + Offset;
auto ThisNetWidth = getNetWidth(Start, End, CellDescs.InitialSpaces);
if (Changes[Next->Index].NewlinesBefore == 0) {
@@ -1148,7 +1224,7 @@ WhitespaceManager::CellDescriptions WhitespaceManager::getCells(unsigned Start,
unsigned Depth = 0;
unsigned Cell = 0;
- unsigned CellCount = 0;
+ SmallVector<unsigned> CellCounts;
unsigned InitialSpaces = 0;
unsigned InitialTokenLength = 0;
unsigned EndSpaces = 0;
@@ -1188,7 +1264,8 @@ WhitespaceManager::CellDescriptions WhitespaceManager::getCells(unsigned Start,
if (!Cells.empty())
Cells.back().EndIndex = i;
Cells.push_back(CellDescription{i, ++Cell, i + 1, false, nullptr});
- CellCount = C.Tok->Previous->isNot(tok::comma) ? Cell + 1 : Cell;
+ CellCounts.push_back(C.Tok->Previous->isNot(tok::comma) ? Cell + 1
+ : Cell);
// Go to the next non-comment and ensure there is a break in front
const auto *NextNonComment = C.Tok->getNextNonComment();
while (NextNonComment->is(tok::comma))
@@ -1258,7 +1335,7 @@ WhitespaceManager::CellDescriptions WhitespaceManager::getCells(unsigned Start,
}
}
- return linkCells({Cells, CellCount, InitialSpaces});
+ return linkCells({Cells, CellCounts, InitialSpaces});
}
unsigned WhitespaceManager::calculateCellWidth(unsigned Start, unsigned End,
@@ -1278,10 +1355,9 @@ void WhitespaceManager::alignToStartOfCell(unsigned Start, unsigned End) {
return;
// If the line is broken anywhere in there make sure everything
// is aligned to the parent
- for (auto i = Start + 1; i < End; i++) {
+ for (auto i = Start + 1; i < End; i++)
if (Changes[i].NewlinesBefore > 0)
Changes[i].Spaces = Changes[Start].Spaces;
- }
}
WhitespaceManager::CellDescriptions
@@ -1311,12 +1387,13 @@ void WhitespaceManager::generateChanges() {
}
if (C.CreateReplacement) {
std::string ReplacementText = C.PreviousLinePostfix;
- if (C.ContinuesPPDirective)
+ if (C.ContinuesPPDirective) {
appendEscapedNewlineText(ReplacementText, C.NewlinesBefore,
C.PreviousEndOfTokenColumn,
C.EscapedNewlineColumn);
- else
+ } else {
appendNewlineText(ReplacementText, C.NewlinesBefore);
+ }
// FIXME: This assert should hold if we computed the column correctly.
// assert((int)C.StartOfTokenColumn >= C.Spaces);
appendIndentText(
@@ -1334,8 +1411,9 @@ void WhitespaceManager::storeReplacement(SourceRange Range, StringRef Text) {
SourceMgr.getFileOffset(Range.getBegin());
// Don't create a replacement, if it does not change anything.
if (StringRef(SourceMgr.getCharacterData(Range.getBegin()),
- WhitespaceLength) == Text)
+ WhitespaceLength) == Text) {
return;
+ }
auto Err = Replaces.add(tooling::Replacement(
SourceMgr, CharSourceRange::getCharRange(Range), Text));
// FIXME: better error handling. For now, just print an error message in the
diff --git a/contrib/llvm-project/clang/lib/Format/WhitespaceManager.h b/contrib/llvm-project/clang/lib/Format/WhitespaceManager.h
index 42f958f68ba6..2be62338bc9a 100644
--- a/contrib/llvm-project/clang/lib/Format/WhitespaceManager.h
+++ b/contrib/llvm-project/clang/lib/Format/WhitespaceManager.h
@@ -196,8 +196,20 @@ private:
struct CellDescriptions {
SmallVector<CellDescription> Cells;
- unsigned CellCount = 0;
+ SmallVector<unsigned> CellCounts;
unsigned InitialSpaces = 0;
+
+ // Determine if every row in the the array
+ // has the same number of columns.
+ bool isRectangular() const {
+ if (CellCounts.empty())
+ return false;
+
+ for (auto NumberOfColumns : CellCounts)
+ if (NumberOfColumns != CellCounts[0])
+ return false;
+ return true;
+ }
};
/// Calculate \c IsTrailingComment, \c TokenLength for the last tokens
@@ -295,13 +307,15 @@ private:
/// Get The maximum width of all columns to a given cell.
template <typename I>
unsigned getMaximumNetWidth(const I &CellStart, const I &CellStop,
- unsigned InitialSpaces,
- unsigned CellCount) const {
+ unsigned InitialSpaces, unsigned CellCount,
+ unsigned MaxRowCount) const {
auto MaxNetWidth = getNetWidth(CellStart, CellStop, InitialSpaces);
auto RowCount = 1U;
auto Offset = std::distance(CellStart, CellStop);
for (const auto *Next = CellStop->NextColumnElement; Next != nullptr;
Next = Next->NextColumnElement) {
+ if (RowCount > MaxRowCount)
+ break;
auto Start = (CellStart + RowCount * CellCount);
auto End = Start + Offset;
MaxNetWidth =
diff --git a/contrib/llvm-project/clang/lib/Frontend/ASTUnit.cpp b/contrib/llvm-project/clang/lib/Frontend/ASTUnit.cpp
index 5f587cc1c023..5a157d665a9d 100644
--- a/contrib/llvm-project/clang/lib/Frontend/ASTUnit.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/ASTUnit.cpp
@@ -759,7 +759,8 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
WhatToLoad ToLoad, IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
const FileSystemOptions &FileSystemOpts, bool UseDebugInfo,
bool OnlyLocalDecls, CaptureDiagsKind CaptureDiagnostics,
- bool AllowASTWithCompilerErrors, bool UserFilesAreVolatile) {
+ bool AllowASTWithCompilerErrors, bool UserFilesAreVolatile,
+ IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS) {
std::unique_ptr<ASTUnit> AST(new ASTUnit(true));
// Recover resources if we crash before exiting this method.
@@ -775,8 +776,6 @@ std::unique_ptr<ASTUnit> ASTUnit::LoadFromASTFile(
AST->OnlyLocalDecls = OnlyLocalDecls;
AST->CaptureDiagnostics = CaptureDiagnostics;
AST->Diagnostics = Diags;
- IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS =
- llvm::vfs::getRealFileSystem();
AST->FileMgr = new FileManager(FileSystemOpts, VFS);
AST->UserFilesAreVolatile = UserFilesAreVolatile;
AST->SourceMgr = new SourceManager(AST->getDiagnostics(),
@@ -1729,8 +1728,12 @@ ASTUnit *ASTUnit::LoadFromCommandLine(
CaptureDroppedDiagnostics Capture(CaptureDiagnostics, *Diags,
&StoredDiagnostics, nullptr);
- CI = createInvocationFromCommandLine(
- llvm::makeArrayRef(ArgBegin, ArgEnd), Diags, VFS);
+ CreateInvocationOptions CIOpts;
+ CIOpts.VFS = VFS;
+ CIOpts.Diags = Diags;
+ CIOpts.ProbePrecompiled = true; // FIXME: historical default. Needed?
+ CI = createInvocation(llvm::makeArrayRef(ArgBegin, ArgEnd),
+ std::move(CIOpts));
if (!CI)
return nullptr;
}
@@ -1753,8 +1756,7 @@ ASTUnit *ASTUnit::LoadFromCommandLine(
SkipFunctionBodies == SkipFunctionBodiesScope::PreambleAndMainFile;
if (ModuleFormat)
- CI->getHeaderSearchOpts().ModuleFormat =
- std::string(ModuleFormat.getValue());
+ CI->getHeaderSearchOpts().ModuleFormat = std::string(*ModuleFormat);
// Create the AST unit.
std::unique_ptr<ASTUnit> AST;
diff --git a/contrib/llvm-project/clang/lib/Frontend/CompilerInstance.cpp b/contrib/llvm-project/clang/lib/Frontend/CompilerInstance.cpp
index 2465a7e2453b..b982ca72c78c 100644
--- a/contrib/llvm-project/clang/lib/Frontend/CompilerInstance.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/CompilerInstance.cpp
@@ -123,12 +123,12 @@ bool CompilerInstance::createTarget() {
}
if (!getTarget().hasStrictFP() && !getLangOpts().ExpStrictFP) {
- if (getLangOpts().getFPRoundingMode() !=
- llvm::RoundingMode::NearestTiesToEven) {
+ if (getLangOpts().RoundingMath) {
getDiagnostics().Report(diag::warn_fe_backend_unsupported_fp_rounding);
- getLangOpts().setFPRoundingMode(llvm::RoundingMode::NearestTiesToEven);
+ getLangOpts().RoundingMath = false;
}
- if (getLangOpts().getFPExceptionMode() != LangOptions::FPE_Ignore) {
+ auto FPExc = getLangOpts().getFPExceptionMode();
+ if (FPExc != LangOptions::FPE_Default && FPExc != LangOptions::FPE_Ignore) {
getDiagnostics().Report(diag::warn_fe_backend_unsupported_fp_exceptions);
getLangOpts().setFPExceptionMode(LangOptions::FPE_Ignore);
}
@@ -232,7 +232,7 @@ static void collectIncludePCH(CompilerInstance &CI,
StringRef PCHInclude = PPOpts.ImplicitPCHInclude;
FileManager &FileMgr = CI.getFileManager();
- auto PCHDir = FileMgr.getDirectory(PCHInclude);
+ auto PCHDir = FileMgr.getOptionalDirectoryRef(PCHInclude);
if (!PCHDir) {
MDC->addFile(PCHInclude);
return;
@@ -240,7 +240,7 @@ static void collectIncludePCH(CompilerInstance &CI,
std::error_code EC;
SmallString<128> DirNative;
- llvm::sys::path::native((*PCHDir)->getName(), DirNative);
+ llvm::sys::path::native(PCHDir->getName(), DirNative);
llvm::vfs::FileSystem &FS = FileMgr.getVirtualFileSystem();
SimpleASTReaderListener Validator(CI.getPreprocessor());
for (llvm::vfs::directory_iterator Dir = FS.dir_begin(DirNative, EC), DirEnd;
@@ -710,13 +710,10 @@ static bool EnableCodeCompletion(Preprocessor &PP,
void CompilerInstance::createCodeCompletionConsumer() {
const ParsedSourceLocation &Loc = getFrontendOpts().CodeCompletionAt;
if (!CompletionConsumer) {
- setCodeCompletionConsumer(
- createCodeCompletionConsumer(getPreprocessor(),
- Loc.FileName, Loc.Line, Loc.Column,
- getFrontendOpts().CodeCompleteOpts,
- llvm::outs()));
- if (!CompletionConsumer)
- return;
+ setCodeCompletionConsumer(createCodeCompletionConsumer(
+ getPreprocessor(), Loc.FileName, Loc.Line, Loc.Column,
+ getFrontendOpts().CodeCompleteOpts, llvm::outs()));
+ return;
} else if (EnableCodeCompletion(getPreprocessor(), Loc.FileName,
Loc.Line, Loc.Column)) {
setCodeCompletionConsumer(nullptr);
diff --git a/contrib/llvm-project/clang/lib/Frontend/CompilerInvocation.cpp b/contrib/llvm-project/clang/lib/Frontend/CompilerInvocation.cpp
index 7f1ce3da7e7e..abef4cf65496 100644
--- a/contrib/llvm-project/clang/lib/Frontend/CompilerInvocation.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/CompilerInvocation.cpp
@@ -94,6 +94,7 @@
#include <cassert>
#include <cstddef>
#include <cstring>
+#include <fstream>
#include <memory>
#include <string>
#include <tuple>
@@ -107,6 +108,20 @@ using namespace options;
using namespace llvm::opt;
//===----------------------------------------------------------------------===//
+// Helpers.
+//===----------------------------------------------------------------------===//
+
+// Parse misexpect tolerance argument value.
+// Valid option values are integers in the range [0, 100)
+inline Expected<Optional<uint64_t>> parseToleranceOption(StringRef Arg) {
+ int64_t Val;
+ if (Arg.getAsInteger(10, Val))
+ return llvm::createStringError(llvm::inconvertibleErrorCode(),
+ "Not an integer: %s", Arg.data());
+ return Val;
+}
+
+//===----------------------------------------------------------------------===//
// Initialization.
//===----------------------------------------------------------------------===//
@@ -503,6 +518,22 @@ static bool FixupInvocation(CompilerInvocation &Invocation,
Diags.Report(diag::warn_ignored_hip_only_option)
<< Args.getLastArg(OPT_gpu_max_threads_per_block_EQ)->getAsString(Args);
+ // When these options are used, the compiler is allowed to apply
+ // optimizations that may affect the final result. For example
+ // (x+y)+z is transformed to x+(y+z) but may not give the same
+ // final result; it's not value safe.
+ // Another example can be to simplify x/x to 1.0 but x could be 0.0, INF
+ // or NaN. Final result may then differ. An error is issued when the eval
+ // method is set with one of these options.
+ if (Args.hasArg(OPT_ffp_eval_method_EQ)) {
+ if (LangOpts.ApproxFunc)
+ Diags.Report(diag::err_incompatible_fp_eval_method_options) << 0;
+ if (LangOpts.AllowFPReassoc)
+ Diags.Report(diag::err_incompatible_fp_eval_method_options) << 1;
+ if (LangOpts.AllowRecip)
+ Diags.Report(diag::err_incompatible_fp_eval_method_options) << 2;
+ }
+
// -cl-strict-aliasing needs to emit diagnostic in the case where CL > 1.0.
// This option should be deprecated for CL > 1.0 because
// this option was added for compatibility with OpenCL 1.0.
@@ -526,11 +557,6 @@ static bool FixupInvocation(CompilerInvocation &Invocation,
<< A->getSpelling() << T.getTriple();
}
- if (!CodeGenOpts.ProfileRemappingFile.empty() && CodeGenOpts.LegacyPassManager)
- Diags.Report(diag::err_drv_argument_only_allowed_with)
- << Args.getLastArg(OPT_fprofile_remapping_file_EQ)->getAsString(Args)
- << "-fno-legacy-pass-manager";
-
return Diags.getNumErrors() == NumErrorsBefore;
}
@@ -790,18 +816,6 @@ static void GenerateAnalyzerArgs(AnalyzerOptions &Opts,
#include "clang/Driver/Options.inc"
#undef ANALYZER_OPTION_WITH_MARSHALLING
- if (Opts.AnalysisStoreOpt != RegionStoreModel) {
- switch (Opts.AnalysisStoreOpt) {
-#define ANALYSIS_STORE(NAME, CMDFLAG, DESC, CREATFN) \
- case NAME##Model: \
- GenerateArg(Args, OPT_analyzer_store, CMDFLAG, SA); \
- break;
-#include "clang/StaticAnalyzer/Core/Analyses.def"
- default:
- llvm_unreachable("Tried to generate unknown analysis store.");
- }
- }
-
if (Opts.AnalysisConstraintsOpt != RangeConstraintsModel) {
switch (Opts.AnalysisConstraintsOpt) {
#define ANALYSIS_CONSTRAINTS(NAME, CMDFLAG, DESC, CREATFN) \
@@ -889,20 +903,13 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
#include "clang/Driver/Options.inc"
#undef ANALYZER_OPTION_WITH_MARSHALLING
- if (Arg *A = Args.getLastArg(OPT_analyzer_store)) {
- StringRef Name = A->getValue();
- AnalysisStores Value = llvm::StringSwitch<AnalysisStores>(Name)
-#define ANALYSIS_STORE(NAME, CMDFLAG, DESC, CREATFN) \
- .Case(CMDFLAG, NAME##Model)
-#include "clang/StaticAnalyzer/Core/Analyses.def"
- .Default(NumStores);
- if (Value == NumStores) {
- Diags.Report(diag::err_drv_invalid_value)
- << A->getAsString(Args) << Name;
- } else {
- Opts.AnalysisStoreOpt = Value;
- }
- }
+ if (Args.hasArg(OPT_analyzer_store))
+ Diags.Report(diag::warn_analyzer_deprecated_option) << "-analyzer-store"
+ << "clang-16";
+ if (Args.hasArg(OPT_analyzer_opt_analyze_nested_blocks))
+ Diags.Report(diag::warn_analyzer_deprecated_option)
+ << "-analyzer-opt-analyze-nested-blocks"
+ << "clang-16";
if (Arg *A = Args.getLastArg(OPT_analyzer_constraints)) {
StringRef Name = A->getValue();
@@ -915,6 +922,11 @@ static bool ParseAnalyzerArgs(AnalyzerOptions &Opts, ArgList &Args,
Diags.Report(diag::err_drv_invalid_value)
<< A->getAsString(Args) << Name;
} else {
+#ifndef LLVM_WITH_Z3
+ if (Value == AnalysisConstraints::Z3ConstraintsModel) {
+ Diags.Report(diag::err_analyzer_not_built_with_z3);
+ }
+#endif // LLVM_WITH_Z3
Opts.AnalysisConstraintsOpt = Value;
}
}
@@ -1062,7 +1074,7 @@ static void initOption(AnalyzerOptions::ConfigTable &Config,
else
OptionField = DefaultVal;
} else
- OptionField = PossiblyInvalidVal.getValue();
+ OptionField = *PossiblyInvalidVal;
}
static void initOption(AnalyzerOptions::ConfigTable &Config,
@@ -1081,25 +1093,22 @@ static void initOption(AnalyzerOptions::ConfigTable &Config,
static void parseAnalyzerConfigs(AnalyzerOptions &AnOpts,
DiagnosticsEngine *Diags) {
// TODO: There's no need to store the entire configtable, it'd be plenty
- // enough tostore checker options.
+ // enough to store checker options.
#define ANALYZER_OPTION(TYPE, NAME, CMDFLAG, DESC, DEFAULT_VAL) \
initOption(AnOpts.Config, Diags, AnOpts.NAME, CMDFLAG, DEFAULT_VAL);
+#define ANALYZER_OPTION_DEPENDS_ON_USER_MODE(...)
+#include "clang/StaticAnalyzer/Core/AnalyzerOptions.def"
-#define ANALYZER_OPTION_DEPENDS_ON_USER_MODE(TYPE, NAME, CMDFLAG, DESC, \
- SHALLOW_VAL, DEEP_VAL) \
- switch (AnOpts.getUserMode()) { \
- case UMK_Shallow: \
- initOption(AnOpts.Config, Diags, AnOpts.NAME, CMDFLAG, SHALLOW_VAL); \
- break; \
- case UMK_Deep: \
- initOption(AnOpts.Config, Diags, AnOpts.NAME, CMDFLAG, DEEP_VAL); \
- break; \
- } \
+ assert(AnOpts.UserMode == "shallow" || AnOpts.UserMode == "deep");
+ const bool InShallowMode = AnOpts.UserMode == "shallow";
+#define ANALYZER_OPTION(...)
+#define ANALYZER_OPTION_DEPENDS_ON_USER_MODE(TYPE, NAME, CMDFLAG, DESC, \
+ SHALLOW_VAL, DEEP_VAL) \
+ initOption(AnOpts.Config, Diags, AnOpts.NAME, CMDFLAG, \
+ InShallowMode ? SHALLOW_VAL : DEEP_VAL);
#include "clang/StaticAnalyzer/Core/AnalyzerOptions.def"
-#undef ANALYZER_OPTION
-#undef ANALYZER_OPTION_DEPENDS_ON_USER_MODE
// At this point, AnalyzerOptions is configured. Let's validate some options.
@@ -1473,6 +1482,9 @@ void CompilerInvocation::GenerateCodeGenArgs(
else if (Opts.CFProtectionBranch)
GenerateArg(Args, OPT_fcf_protection_EQ, "branch", SA);
+ if (Opts.IBTSeal)
+ GenerateArg(Args, OPT_mibt_seal, SA);
+
for (const auto &F : Opts.LinkBitcodeFiles) {
bool Builtint = F.LinkFlags == llvm::Linker::Flags::LinkOnlyNeeded &&
F.PropagateAttrs && F.Internalize;
@@ -1492,7 +1504,8 @@ void CompilerInvocation::GenerateCodeGenArgs(
if (Opts.FPDenormalMode != llvm::DenormalMode::getIEEE())
GenerateArg(Args, OPT_fdenormal_fp_math_EQ, Opts.FPDenormalMode.str(), SA);
- if (Opts.FP32DenormalMode != llvm::DenormalMode::getIEEE())
+ if ((Opts.FPDenormalMode != Opts.FP32DenormalMode) ||
+ (Opts.FP32DenormalMode != llvm::DenormalMode::getIEEE()))
GenerateArg(Args, OPT_fdenormal_fp_math_f32_EQ, Opts.FP32DenormalMode.str(),
SA);
@@ -1530,6 +1543,9 @@ void CompilerInvocation::GenerateCodeGenArgs(
: "auto",
SA);
+ GenerateArg(Args, OPT_fdiagnostics_misexpect_tolerance_EQ,
+ Twine(*Opts.DiagnosticsMisExpectTolerance), SA);
+
for (StringRef Sanitizer : serializeSanitizerKinds(Opts.SanitizeRecover))
GenerateArg(Args, OPT_fsanitize_recover_EQ, Sanitizer, SA);
@@ -1591,23 +1607,22 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
// At O0 we want to fully disable inlining outside of cases marked with
// 'alwaysinline' that are required for correctness.
- Opts.setInlining((Opts.OptimizationLevel == 0)
- ? CodeGenOptions::OnlyAlwaysInlining
- : CodeGenOptions::NormalInlining);
- // Explicit inlining flags can disable some or all inlining even at
- // optimization levels above zero.
- if (Arg *InlineArg = Args.getLastArg(
- options::OPT_finline_functions, options::OPT_finline_hint_functions,
- options::OPT_fno_inline_functions, options::OPT_fno_inline)) {
- if (Opts.OptimizationLevel > 0) {
- const Option &InlineOpt = InlineArg->getOption();
- if (InlineOpt.matches(options::OPT_finline_functions))
- Opts.setInlining(CodeGenOptions::NormalInlining);
- else if (InlineOpt.matches(options::OPT_finline_hint_functions))
- Opts.setInlining(CodeGenOptions::OnlyHintInlining);
- else
- Opts.setInlining(CodeGenOptions::OnlyAlwaysInlining);
- }
+ if (Opts.OptimizationLevel == 0) {
+ Opts.setInlining(CodeGenOptions::OnlyAlwaysInlining);
+ } else if (const Arg *A = Args.getLastArg(options::OPT_finline_functions,
+ options::OPT_finline_hint_functions,
+ options::OPT_fno_inline_functions,
+ options::OPT_fno_inline)) {
+ // Explicit inlining flags can disable some or all inlining even at
+ // optimization levels above zero.
+ if (A->getOption().matches(options::OPT_finline_functions))
+ Opts.setInlining(CodeGenOptions::NormalInlining);
+ else if (A->getOption().matches(options::OPT_finline_hint_functions))
+ Opts.setInlining(CodeGenOptions::OnlyHintInlining);
+ else
+ Opts.setInlining(CodeGenOptions::OnlyAlwaysInlining);
+ } else {
+ Opts.setInlining(CodeGenOptions::NormalInlining);
}
// PIC defaults to -fno-direct-access-external-data while non-PIC defaults to
@@ -1710,10 +1725,6 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
// -ftime-report= is only for new pass manager.
if (A->getOption().getID() == OPT_ftime_report_EQ) {
- if (Opts.LegacyPassManager)
- Diags.Report(diag::err_drv_argument_only_allowed_with)
- << A->getAsString(Args) << "-fno-legacy-pass-manager";
-
StringRef Val = A->getValue();
if (Val == "per-pass")
Opts.TimePassesPerRun = false;
@@ -1847,6 +1858,7 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
if (Arg *A = Args.getLastArg(OPT_fdenormal_fp_math_EQ)) {
StringRef Val = A->getValue();
Opts.FPDenormalMode = llvm::parseDenormalFPAttribute(Val);
+ Opts.FP32DenormalMode = Opts.FPDenormalMode;
if (!Opts.FPDenormalMode.isValid())
Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args) << Val;
}
@@ -1939,7 +1951,7 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
<< "-fdiagnostics-hotness-threshold=";
} else {
Opts.DiagnosticsHotnessThreshold = *ResultOrErr;
- if ((!Opts.DiagnosticsHotnessThreshold.hasValue() ||
+ if ((!Opts.DiagnosticsHotnessThreshold ||
Opts.DiagnosticsHotnessThreshold.getValue() > 0) &&
!UsingProfile)
Diags.Report(diag::warn_drv_diagnostics_hotness_requires_pgo)
@@ -1947,6 +1959,23 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
}
}
+ if (auto *arg =
+ Args.getLastArg(options::OPT_fdiagnostics_misexpect_tolerance_EQ)) {
+ auto ResultOrErr = parseToleranceOption(arg->getValue());
+
+ if (!ResultOrErr) {
+ Diags.Report(diag::err_drv_invalid_diagnotics_misexpect_tolerance)
+ << "-fdiagnostics-misexpect-tolerance=";
+ } else {
+ Opts.DiagnosticsMisExpectTolerance = *ResultOrErr;
+ if ((!Opts.DiagnosticsMisExpectTolerance ||
+ Opts.DiagnosticsMisExpectTolerance.getValue() > 0) &&
+ !UsingProfile)
+ Diags.Report(diag::warn_drv_diagnostics_misexpect_requires_pgo)
+ << "-fdiagnostics-misexpect-tolerance=";
+ }
+ }
+
// If the user requested to use a sample profile for PGO, then the
// backend will need to track source location information so the profile
// can be incorporated into the IR.
@@ -1977,8 +2006,8 @@ bool CompilerInvocation::ParseCodeGenArgs(CodeGenOptions &Opts, ArgList &Args,
else if (Args.hasArg(options::OPT_fno_finite_loops))
Opts.FiniteLoops = CodeGenOptions::FiniteLoopsKind::Never;
- Opts.EmitIEEENaNCompliantInsts =
- Args.hasFlag(options::OPT_mamdgpu_ieee, options::OPT_mno_amdgpu_ieee);
+ Opts.EmitIEEENaNCompliantInsts = Args.hasFlag(
+ options::OPT_mamdgpu_ieee, options::OPT_mno_amdgpu_ieee, true);
if (!Opts.EmitIEEENaNCompliantInsts && !LangOptsRef.NoHonorNaNs)
Diags.Report(diag::err_drv_amdgpu_ieee_without_no_honor_nans);
@@ -2102,11 +2131,9 @@ static bool parseShowColorsArgs(const ArgList &Args, bool DefaultColor) {
} ShowColors = DefaultColor ? Colors_Auto : Colors_Off;
for (auto *A : Args) {
const Option &O = A->getOption();
- if (O.matches(options::OPT_fcolor_diagnostics) ||
- O.matches(options::OPT_fdiagnostics_color)) {
+ if (O.matches(options::OPT_fcolor_diagnostics)) {
ShowColors = Colors_On;
- } else if (O.matches(options::OPT_fno_color_diagnostics) ||
- O.matches(options::OPT_fno_diagnostics_color)) {
+ } else if (O.matches(options::OPT_fno_color_diagnostics)) {
ShowColors = Colors_Off;
} else if (O.matches(options::OPT_fdiagnostics_color_EQ)) {
StringRef Value(A->getValue());
@@ -2406,7 +2433,6 @@ static const auto &getFrontendActionTable() {
{frontend::EmitLLVM, OPT_emit_llvm},
{frontend::EmitLLVMOnly, OPT_emit_llvm_only},
{frontend::EmitCodeGenOnly, OPT_emit_codegen_only},
- {frontend::EmitCodeGenOnly, OPT_emit_codegen_only},
{frontend::EmitObj, OPT_emit_obj},
{frontend::ExtractAPI, OPT_extract_api},
@@ -2416,6 +2442,7 @@ static const auto &getFrontendActionTable() {
{frontend::GenerateModule, OPT_emit_module},
{frontend::GenerateModuleInterface, OPT_emit_module_interface},
{frontend::GenerateHeaderModule, OPT_emit_header_module},
+ {frontend::GenerateHeaderUnit, OPT_emit_header_unit},
{frontend::GeneratePCH, OPT_emit_pch},
{frontend::GenerateInterfaceStubs, OPT_emit_interface_stubs},
{frontend::InitOnly, OPT_init_only},
@@ -2432,7 +2459,7 @@ static const auto &getFrontendActionTable() {
{frontend::MigrateSource, OPT_migrate},
{frontend::RunPreprocessorOnly, OPT_Eonly},
{frontend::PrintDependencyDirectivesSourceMinimizerOutput,
- OPT_print_dependency_directives_minimized_source},
+ OPT_print_dependency_directives_minimized_source},
};
return Table;
@@ -2551,10 +2578,10 @@ static void GenerateFrontendArgs(const FrontendOptions &Opts,
for (const auto &ModuleFile : Opts.ModuleFiles)
GenerateArg(Args, OPT_fmodule_file, ModuleFile, SA);
- if (Opts.AuxTargetCPU.hasValue())
+ if (Opts.AuxTargetCPU)
GenerateArg(Args, OPT_aux_target_cpu, *Opts.AuxTargetCPU, SA);
- if (Opts.AuxTargetFeatures.hasValue())
+ if (Opts.AuxTargetFeatures)
for (const auto &Feature : *Opts.AuxTargetFeatures)
GenerateArg(Args, OPT_aux_target_feature, Feature, SA);
@@ -2562,6 +2589,20 @@ static void GenerateFrontendArgs(const FrontendOptions &Opts,
StringRef Preprocessed = Opts.DashX.isPreprocessed() ? "-cpp-output" : "";
StringRef ModuleMap =
Opts.DashX.getFormat() == InputKind::ModuleMap ? "-module-map" : "";
+ StringRef HeaderUnit = "";
+ switch (Opts.DashX.getHeaderUnitKind()) {
+ case InputKind::HeaderUnit_None:
+ break;
+ case InputKind::HeaderUnit_User:
+ HeaderUnit = "-user";
+ break;
+ case InputKind::HeaderUnit_System:
+ HeaderUnit = "-system";
+ break;
+ case InputKind::HeaderUnit_Abs:
+ HeaderUnit = "-header-unit";
+ break;
+ }
StringRef Header = IsHeader ? "-header" : "";
StringRef Lang;
@@ -2604,9 +2645,13 @@ static void GenerateFrontendArgs(const FrontendOptions &Opts,
case Language::LLVM_IR:
Lang = "ir";
break;
+ case Language::HLSL:
+ Lang = "hlsl";
+ break;
}
- GenerateArg(Args, OPT_x, Lang + Header + ModuleMap + Preprocessed, SA);
+ GenerateArg(Args, OPT_x,
+ Lang + HeaderUnit + Header + ModuleMap + Preprocessed, SA);
}
// OPT_INPUT has a unique class, generate it directly.
@@ -2751,13 +2796,32 @@ static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
if (const Arg *A = Args.getLastArg(OPT_x)) {
StringRef XValue = A->getValue();
- // Parse suffixes: '<lang>(-header|[-module-map][-cpp-output])'.
+ // Parse suffixes:
+ // '<lang>(-[{header-unit,user,system}-]header|[-module-map][-cpp-output])'.
// FIXME: Supporting '<lang>-header-cpp-output' would be useful.
bool Preprocessed = XValue.consume_back("-cpp-output");
bool ModuleMap = XValue.consume_back("-module-map");
- IsHeaderFile = !Preprocessed && !ModuleMap &&
- XValue != "precompiled-header" &&
- XValue.consume_back("-header");
+ // Detect and consume the header indicator.
+ bool IsHeader =
+ XValue != "precompiled-header" && XValue.consume_back("-header");
+
+ // If we have c++-{user,system}-header, that indicates a header unit input
+ // likewise, if the user put -fmodule-header together with a header with an
+ // absolute path (header-unit-header).
+ InputKind::HeaderUnitKind HUK = InputKind::HeaderUnit_None;
+ if (IsHeader || Preprocessed) {
+ if (XValue.consume_back("-header-unit"))
+ HUK = InputKind::HeaderUnit_Abs;
+ else if (XValue.consume_back("-system"))
+ HUK = InputKind::HeaderUnit_System;
+ else if (XValue.consume_back("-user"))
+ HUK = InputKind::HeaderUnit_User;
+ }
+
+ // The value set by this processing is an un-preprocessed source which is
+ // not intended to be a module map or header unit.
+ IsHeaderFile = IsHeader && !Preprocessed && !ModuleMap &&
+ HUK == InputKind::HeaderUnit_None;
// Principal languages.
DashX = llvm::StringSwitch<InputKind>(XValue)
@@ -2770,18 +2834,21 @@ static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
.Case("objective-c", Language::ObjC)
.Case("objective-c++", Language::ObjCXX)
.Case("renderscript", Language::RenderScript)
+ .Case("hlsl", Language::HLSL)
.Default(Language::Unknown);
// "objc[++]-cpp-output" is an acceptable synonym for
// "objective-c[++]-cpp-output".
- if (DashX.isUnknown() && Preprocessed && !IsHeaderFile && !ModuleMap)
+ if (DashX.isUnknown() && Preprocessed && !IsHeaderFile && !ModuleMap &&
+ HUK == InputKind::HeaderUnit_None)
DashX = llvm::StringSwitch<InputKind>(XValue)
.Case("objc", Language::ObjC)
.Case("objc++", Language::ObjCXX)
.Default(Language::Unknown);
// Some special cases cannot be combined with suffixes.
- if (DashX.isUnknown() && !Preprocessed && !ModuleMap && !IsHeaderFile)
+ if (DashX.isUnknown() && !Preprocessed && !IsHeaderFile && !ModuleMap &&
+ HUK == InputKind::HeaderUnit_None)
DashX = llvm::StringSwitch<InputKind>(XValue)
.Case("cpp-output", InputKind(Language::C).getPreprocessed())
.Case("assembler-with-cpp", Language::Asm)
@@ -2796,6 +2863,12 @@ static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
if (Preprocessed)
DashX = DashX.getPreprocessed();
+ // A regular header is considered mutually exclusive with a header unit.
+ if (HUK != InputKind::HeaderUnit_None) {
+ DashX = DashX.withHeaderUnit(HUK);
+ IsHeaderFile = true;
+ } else if (IsHeaderFile)
+ DashX = DashX.getHeader();
if (ModuleMap)
DashX = DashX.withFormat(InputKind::ModuleMap);
}
@@ -2805,6 +2878,11 @@ static bool ParseFrontendArgs(FrontendOptions &Opts, ArgList &Args,
Opts.Inputs.clear();
if (Inputs.empty())
Inputs.push_back("-");
+
+ if (DashX.getHeaderUnitKind() != InputKind::HeaderUnit_None &&
+ Inputs.size() > 1)
+ Diags.Report(diag::err_drv_header_unit_extra_inputs) << Inputs[1];
+
for (unsigned i = 0, e = Inputs.size(); i != e; ++i) {
InputKind IK = DashX;
if (IK.isUnknown()) {
@@ -3103,155 +3181,6 @@ static bool ParseHeaderSearchArgs(HeaderSearchOptions &Opts, ArgList &Args,
return Diags.getNumErrors() == NumErrorsBefore;
}
-void CompilerInvocation::setLangDefaults(LangOptions &Opts, InputKind IK,
- const llvm::Triple &T,
- std::vector<std::string> &Includes,
- LangStandard::Kind LangStd) {
- // Set some properties which depend solely on the input kind; it would be nice
- // to move these to the language standard, and have the driver resolve the
- // input kind + language standard.
- //
- // FIXME: Perhaps a better model would be for a single source file to have
- // multiple language standards (C / C++ std, ObjC std, OpenCL std, OpenMP std)
- // simultaneously active?
- if (IK.getLanguage() == Language::Asm) {
- Opts.AsmPreprocessor = 1;
- } else if (IK.isObjectiveC()) {
- Opts.ObjC = 1;
- }
-
- if (LangStd == LangStandard::lang_unspecified) {
- // Based on the base language, pick one.
- switch (IK.getLanguage()) {
- case Language::Unknown:
- case Language::LLVM_IR:
- llvm_unreachable("Invalid input kind!");
- case Language::OpenCL:
- LangStd = LangStandard::lang_opencl12;
- break;
- case Language::OpenCLCXX:
- LangStd = LangStandard::lang_openclcpp10;
- break;
- case Language::CUDA:
- LangStd = LangStandard::lang_cuda;
- break;
- case Language::Asm:
- case Language::C:
-#if defined(CLANG_DEFAULT_STD_C)
- LangStd = CLANG_DEFAULT_STD_C;
-#else
- // The PS4 uses C99 as the default C standard.
- if (T.isPS4())
- LangStd = LangStandard::lang_gnu99;
- else
- LangStd = LangStandard::lang_gnu17;
-#endif
- break;
- case Language::ObjC:
-#if defined(CLANG_DEFAULT_STD_C)
- LangStd = CLANG_DEFAULT_STD_C;
-#else
- LangStd = LangStandard::lang_gnu11;
-#endif
- break;
- case Language::CXX:
- case Language::ObjCXX:
-#if defined(CLANG_DEFAULT_STD_CXX)
- LangStd = CLANG_DEFAULT_STD_CXX;
-#else
- LangStd = LangStandard::lang_gnucxx14;
-#endif
- break;
- case Language::RenderScript:
- LangStd = LangStandard::lang_c99;
- break;
- case Language::HIP:
- LangStd = LangStandard::lang_hip;
- break;
- }
- }
-
- const LangStandard &Std = LangStandard::getLangStandardForKind(LangStd);
- Opts.LangStd = LangStd;
- Opts.LineComment = Std.hasLineComments();
- Opts.C99 = Std.isC99();
- Opts.C11 = Std.isC11();
- Opts.C17 = Std.isC17();
- Opts.C2x = Std.isC2x();
- Opts.CPlusPlus = Std.isCPlusPlus();
- Opts.CPlusPlus11 = Std.isCPlusPlus11();
- Opts.CPlusPlus14 = Std.isCPlusPlus14();
- Opts.CPlusPlus17 = Std.isCPlusPlus17();
- Opts.CPlusPlus20 = Std.isCPlusPlus20();
- Opts.CPlusPlus2b = Std.isCPlusPlus2b();
- Opts.GNUMode = Std.isGNUMode();
- Opts.GNUCVersion = 0;
- Opts.HexFloats = Std.hasHexFloats();
- Opts.ImplicitInt = Std.hasImplicitInt();
-
- // Set OpenCL Version.
- Opts.OpenCL = Std.isOpenCL();
- if (LangStd == LangStandard::lang_opencl10)
- Opts.OpenCLVersion = 100;
- else if (LangStd == LangStandard::lang_opencl11)
- Opts.OpenCLVersion = 110;
- else if (LangStd == LangStandard::lang_opencl12)
- Opts.OpenCLVersion = 120;
- else if (LangStd == LangStandard::lang_opencl20)
- Opts.OpenCLVersion = 200;
- else if (LangStd == LangStandard::lang_opencl30)
- Opts.OpenCLVersion = 300;
- else if (LangStd == LangStandard::lang_openclcpp10)
- Opts.OpenCLCPlusPlusVersion = 100;
- else if (LangStd == LangStandard::lang_openclcpp2021)
- Opts.OpenCLCPlusPlusVersion = 202100;
-
- // OpenCL has some additional defaults.
- if (Opts.OpenCL) {
- Opts.AltiVec = 0;
- Opts.ZVector = 0;
- Opts.setDefaultFPContractMode(LangOptions::FPM_On);
- Opts.OpenCLCPlusPlus = Opts.CPlusPlus;
- Opts.OpenCLPipes = Opts.getOpenCLCompatibleVersion() == 200;
- Opts.OpenCLGenericAddressSpace = Opts.getOpenCLCompatibleVersion() == 200;
-
- // Include default header file for OpenCL.
- if (Opts.IncludeDefaultHeader) {
- if (Opts.DeclareOpenCLBuiltins) {
- // Only include base header file for builtin types and constants.
- Includes.push_back("opencl-c-base.h");
- } else {
- Includes.push_back("opencl-c.h");
- }
- }
- }
-
- Opts.HIP = IK.getLanguage() == Language::HIP;
- Opts.CUDA = IK.getLanguage() == Language::CUDA || Opts.HIP;
- if (Opts.HIP) {
- // HIP toolchain does not support 'Fast' FPOpFusion in backends since it
- // fuses multiplication/addition instructions without contract flag from
- // device library functions in LLVM bitcode, which causes accuracy loss in
- // certain math functions, e.g. tan(-1e20) becomes -0.933 instead of 0.8446.
- // For device library functions in bitcode to work, 'Strict' or 'Standard'
- // FPOpFusion options in backends is needed. Therefore 'fast-honor-pragmas'
- // FP contract option is used to allow fuse across statements in frontend
- // whereas respecting contract flag in backend.
- Opts.setDefaultFPContractMode(LangOptions::FPM_FastHonorPragmas);
- } else if (Opts.CUDA) {
- // Allow fuse across statements disregarding pragmas.
- Opts.setDefaultFPContractMode(LangOptions::FPM_Fast);
- }
-
- Opts.RenderScript = IK.getLanguage() == Language::RenderScript;
-
- // OpenCL and C++ both have bool, true, false keywords.
- Opts.Bool = Opts.OpenCL || Opts.CPlusPlus;
-
- // OpenCL has half keyword
- Opts.Half = Opts.OpenCL;
-}
-
/// Check if input file kind and language standard are compatible.
static bool IsInputCompatibleWithStandard(InputKind IK,
const LangStandard &S) {
@@ -3289,6 +3218,9 @@ static bool IsInputCompatibleWithStandard(InputKind IK,
// FIXME: The -std= value is not ignored; it affects the tokenization
// and preprocessing rules if we're preprocessing this asm input.
return true;
+
+ case Language::HLSL:
+ return S.getLanguage() == Language::HLSL;
}
llvm_unreachable("unexpected input language");
@@ -3321,6 +3253,9 @@ static StringRef GetInputKindName(InputKind IK) {
case Language::LLVM_IR:
return "LLVM IR";
+ case Language::HLSL:
+ return "HLSL";
+
case Language::Unknown:
break;
}
@@ -3456,6 +3391,8 @@ void CompilerInvocation::GenerateLangArgs(const LangOptions &Opts,
GenerateArg(Args, OPT_mlong_double_128, SA);
else if (Opts.LongDoubleSize == 64)
GenerateArg(Args, OPT_mlong_double_64, SA);
+ else if (Opts.LongDoubleSize == 80)
+ GenerateArg(Args, OPT_mlong_double_80, SA);
// Not generating '-mrtd', it's just an alias for '-fdefault-calling-conv='.
@@ -3484,9 +3421,6 @@ void CompilerInvocation::GenerateLangArgs(const LangOptions &Opts,
GenerateArg(Args, OPT_fopenmp_version_EQ, Twine(Opts.OpenMP), SA);
}
- if (Opts.OpenMPTargetNewRuntime)
- GenerateArg(Args, OPT_fopenmp_target_new_runtime, SA);
-
if (Opts.OpenMPThreadSubscription)
GenerateArg(Args, OPT_fopenmp_assume_threads_oversubscription, SA);
@@ -3560,6 +3494,8 @@ void CompilerInvocation::GenerateLangArgs(const LangOptions &Opts,
GenerateArg(Args, OPT_fclang_abi_compat_EQ, "11.0", SA);
else if (Opts.getClangABICompat() == LangOptions::ClangABI::Ver12)
GenerateArg(Args, OPT_fclang_abi_compat_EQ, "12.0", SA);
+ else if (Opts.getClangABICompat() == LangOptions::ClangABI::Ver14)
+ GenerateArg(Args, OPT_fclang_abi_compat_EQ, "14.0", SA);
if (Opts.getSignReturnAddressScope() ==
LangOptions::SignReturnAddressScopeKind::All)
@@ -3581,8 +3517,16 @@ void CompilerInvocation::GenerateLangArgs(const LangOptions &Opts,
else
GenerateArg(Args, OPT_fno_experimental_relative_cxx_abi_vtables, SA);
+ if (Opts.UseTargetPathSeparator)
+ GenerateArg(Args, OPT_ffile_reproducible, SA);
+ else
+ GenerateArg(Args, OPT_fno_file_reproducible, SA);
+
for (const auto &MP : Opts.MacroPrefixMap)
GenerateArg(Args, OPT_fmacro_prefix_map_EQ, MP.first + "=" + MP.second, SA);
+
+ if (!Opts.RandstructSeed.empty())
+ GenerateArg(Args, OPT_frandomize_layout_seed_EQ, Opts.RandstructSeed, SA);
}
bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
@@ -3680,7 +3624,7 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
Opts.IncludeDefaultHeader = Args.hasArg(OPT_finclude_default_header);
Opts.DeclareOpenCLBuiltins = Args.hasArg(OPT_fdeclare_opencl_builtins);
- CompilerInvocation::setLangDefaults(Opts, IK, T, Includes, LangStd);
+ LangOptions::setLangDefaults(Opts, IK.getLanguage(), T, Includes, LangStd);
// The key paths of codegen options defined in Options.td start with
// "LangOpts->". Let's provide the expected variable name and type.
@@ -3767,8 +3711,8 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
VersionTuple GNUCVer;
bool Invalid = GNUCVer.tryParse(A->getValue());
unsigned Major = GNUCVer.getMajor();
- unsigned Minor = GNUCVer.getMinor().getValueOr(0);
- unsigned Patch = GNUCVer.getSubminor().getValueOr(0);
+ unsigned Minor = GNUCVer.getMinor().value_or(0);
+ unsigned Patch = GNUCVer.getSubminor().value_or(0);
if (Invalid || GNUCVer.getBuild() || Minor >= 100 || Patch >= 100) {
Diags.Report(diag::err_drv_invalid_value)
<< A->getAsString(Args) << A->getValue();
@@ -3776,28 +3720,7 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
Opts.GNUCVersion = Major * 100 * 100 + Minor * 100 + Patch;
}
- // In AIX OS, the -mignore-xcoff-visibility is enable by default if there is
- // no -fvisibility=* option.
- // This is the reason why '-fvisibility' needs to be always generated:
- // its absence implies '-mignore-xcoff-visibility'.
- //
- // Suppose the original cc1 command line does contain '-fvisibility default':
- // '-mignore-xcoff-visibility' should not be implied.
- // * If '-fvisibility' is not generated (as most options with default values
- // don't), its absence would imply '-mignore-xcoff-visibility'. This changes
- // the command line semantics.
- // * If '-fvisibility' is generated regardless of its presence and value,
- // '-mignore-xcoff-visibility' won't be implied and the command line
- // semantics are kept intact.
- //
- // When the original cc1 command line does **not** contain '-fvisibility',
- // '-mignore-xcoff-visibility' is implied. The generated command line will
- // contain both '-fvisibility default' and '-mignore-xcoff-visibility' and
- // subsequent calls to `CreateFromArgs`/`generateCC1CommandLine` will always
- // produce the same arguments.
-
- if (T.isOSAIX() && (Args.hasArg(OPT_mignore_xcoff_visibility) ||
- !Args.hasArg(OPT_fvisibility)))
+ if (T.isOSAIX() && (Args.hasArg(OPT_mignore_xcoff_visibility)))
Opts.IgnoreXCOFFVisibility = 1;
if (Args.hasArg(OPT_ftrapv)) {
@@ -3816,8 +3739,8 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
Diags.Report(diag::err_drv_invalid_value) << A->getAsString(Args)
<< A->getValue();
Opts.MSCompatibilityVersion = VT.getMajor() * 10000000 +
- VT.getMinor().getValueOr(0) * 100000 +
- VT.getSubminor().getValueOr(0);
+ VT.getMinor().value_or(0) * 100000 +
+ VT.getSubminor().value_or(0);
}
// Mimicking gcc's behavior, trigraphs are only enabled if -trigraphs
@@ -3839,11 +3762,19 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
Opts.NoBuiltin = Args.hasArg(OPT_fno_builtin) || Opts.Freestanding;
if (!Opts.NoBuiltin)
getAllNoBuiltinFuncValues(Args, Opts.NoBuiltinFuncs);
- Opts.LongDoubleSize = Args.hasArg(OPT_mlong_double_128)
- ? 128
- : Args.hasArg(OPT_mlong_double_64) ? 64 : 0;
+ if (Arg *A = Args.getLastArg(options::OPT_LongDouble_Group)) {
+ if (A->getOption().matches(options::OPT_mlong_double_64))
+ Opts.LongDoubleSize = 64;
+ else if (A->getOption().matches(options::OPT_mlong_double_80))
+ Opts.LongDoubleSize = 80;
+ else if (A->getOption().matches(options::OPT_mlong_double_128))
+ Opts.LongDoubleSize = 128;
+ else
+ Opts.LongDoubleSize = 0;
+ }
if (Opts.FastRelaxedMath)
Opts.setDefaultFPContractMode(LangOptions::FPM_Fast);
+
llvm::sort(Opts.ModuleFeatures);
// -mrtd option
@@ -3875,9 +3806,6 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
Opts.OpenMP && Args.hasArg(options::OPT_fopenmp_enable_irbuilder);
bool IsTargetSpecified =
Opts.OpenMPIsDevice || Args.hasArg(options::OPT_fopenmp_targets_EQ);
- Opts.OpenMPTargetNewRuntime =
- Opts.OpenMPIsDevice &&
- Args.hasArg(options::OPT_fopenmp_target_new_runtime);
Opts.ConvergentFunctions = Opts.ConvergentFunctions || Opts.OpenMPIsDevice;
@@ -3925,17 +3853,13 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
// Set either by a specific value or to a default if not specified.
if (Opts.OpenMPIsDevice && (Args.hasArg(OPT_fopenmp_target_debug) ||
Args.hasArg(OPT_fopenmp_target_debug_EQ))) {
- if (Opts.OpenMPTargetNewRuntime) {
- Opts.OpenMPTargetDebug = getLastArgIntValue(
- Args, OPT_fopenmp_target_debug_EQ, Opts.OpenMPTargetDebug, Diags);
- if (!Opts.OpenMPTargetDebug && Args.hasArg(OPT_fopenmp_target_debug))
- Opts.OpenMPTargetDebug = 1;
- } else {
- Diags.Report(diag::err_drv_debug_no_new_runtime);
- }
+ Opts.OpenMPTargetDebug = getLastArgIntValue(
+ Args, OPT_fopenmp_target_debug_EQ, Opts.OpenMPTargetDebug, Diags);
+ if (!Opts.OpenMPTargetDebug && Args.hasArg(OPT_fopenmp_target_debug))
+ Opts.OpenMPTargetDebug = 1;
}
- if (Opts.OpenMPIsDevice && Opts.OpenMPTargetNewRuntime) {
+ if (Opts.OpenMPIsDevice) {
if (Args.hasArg(OPT_fopenmp_assume_teams_oversubscription))
Opts.OpenMPTeamSubscription = true;
if (Args.hasArg(OPT_fopenmp_assume_threads_oversubscription))
@@ -4062,6 +3986,8 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
Opts.setClangABICompat(LangOptions::ClangABI::Ver11);
else if (Major <= 12)
Opts.setClangABICompat(LangOptions::ClangABI::Ver12);
+ else if (Major <= 14)
+ Opts.setClangABICompat(LangOptions::ClangABI::Ver14);
} else if (Ver != "latest") {
Diags.Report(diag::err_drv_invalid_value)
<< A->getAsString(Args) << A->getValue();
@@ -4125,6 +4051,12 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
{std::string(Split.first), std::string(Split.second)});
}
+ Opts.UseTargetPathSeparator =
+ !Args.getLastArg(OPT_fno_file_reproducible) &&
+ (Args.getLastArg(OPT_ffile_compilation_dir_EQ) ||
+ Args.getLastArg(OPT_fmacro_prefix_map_EQ) ||
+ Args.getLastArg(OPT_ffile_reproducible));
+
// Error if -mvscale-min is unbounded.
if (Arg *A = Args.getLastArg(options::OPT_mvscale_min_EQ)) {
unsigned VScaleMin;
@@ -4132,6 +4064,19 @@ bool CompilerInvocation::ParseLangArgs(LangOptions &Opts, ArgList &Args,
Diags.Report(diag::err_cc1_unbounded_vscale_min);
}
+ if (const Arg *A = Args.getLastArg(OPT_frandomize_layout_seed_file_EQ)) {
+ std::ifstream SeedFile(A->getValue(0));
+
+ if (!SeedFile.is_open())
+ Diags.Report(diag::err_drv_cannot_open_randomize_layout_seed_file)
+ << A->getValue(0);
+
+ std::getline(SeedFile, Opts.RandstructSeed);
+ }
+
+ if (const Arg *A = Args.getLastArg(OPT_frandomize_layout_seed_EQ))
+ Opts.RandstructSeed = A->getValue(0);
+
return Diags.getNumErrors() == NumErrorsBefore;
}
@@ -4153,6 +4098,7 @@ static bool isStrictlyPreprocessorAction(frontend::ActionKind Action) {
case frontend::GenerateModule:
case frontend::GenerateModuleInterface:
case frontend::GenerateHeaderModule:
+ case frontend::GenerateHeaderUnit:
case frontend::GeneratePCH:
case frontend::GenerateInterfaceStubs:
case frontend::ParseSyntaxOnly:
@@ -4234,6 +4180,10 @@ static void GeneratePreprocessorArgs(PreprocessorOptions &Opts,
((LangOpts.DeclareOpenCLBuiltins && I == "opencl-c-base.h") ||
I == "opencl-c.h"))
continue;
+ // Don't generate HLSL includes. They are implied by other flags that are
+ // generated elsewhere.
+ if (LangOpts.HLSL && I == "hlsl.h")
+ continue;
GenerateArg(Args, OPT_include, I, SA);
}
@@ -4356,6 +4306,8 @@ static void GeneratePreprocessorOutputArgs(
GenerateArg(Args, OPT_dM, SA);
if (!Generate_dM && Opts.ShowMacros)
GenerateArg(Args, OPT_dD, SA);
+ if (Opts.DirectivesOnly)
+ GenerateArg(Args, OPT_fdirectives_only, SA);
}
static bool ParsePreprocessorOutputArgs(PreprocessorOutputOptions &Opts,
@@ -4378,6 +4330,7 @@ static bool ParsePreprocessorOutputArgs(PreprocessorOutputOptions &Opts,
Opts.ShowCPP = isStrictlyPreprocessorAction(Action) && !Args.hasArg(OPT_dM);
Opts.ShowMacros = Args.hasArg(OPT_dM) || Args.hasArg(OPT_dD);
+ Opts.DirectivesOnly = Args.hasArg(OPT_fdirectives_only);
return Diags.getNumErrors() == NumErrorsBefore;
}
@@ -4400,6 +4353,9 @@ static void GenerateTargetArgs(const TargetOptions &Opts,
if (!Opts.SDKVersion.empty())
GenerateArg(Args, OPT_target_sdk_version_EQ, Opts.SDKVersion.getAsString(),
SA);
+ if (!Opts.DarwinTargetVariantSDKVersion.empty())
+ GenerateArg(Args, OPT_darwin_target_variant_sdk_version_EQ,
+ Opts.DarwinTargetVariantSDKVersion.getAsString(), SA);
}
static bool ParseTargetArgs(TargetOptions &Opts, ArgList &Args,
@@ -4427,6 +4383,15 @@ static bool ParseTargetArgs(TargetOptions &Opts, ArgList &Args,
else
Opts.SDKVersion = Version;
}
+ if (Arg *A =
+ Args.getLastArg(options::OPT_darwin_target_variant_sdk_version_EQ)) {
+ llvm::VersionTuple Version;
+ if (Version.tryParse(A->getValue()))
+ Diags.Report(diag::err_drv_invalid_value)
+ << A->getAsString(Args) << A->getValue();
+ else
+ Opts.DarwinTargetVariantSDKVersion = Version;
+ }
return Diags.getNumErrors() == NumErrorsBefore;
}
@@ -4478,6 +4443,13 @@ bool CompilerInvocation::CreateFromArgsImpl(
if (Res.getFrontendOpts().ProgramAction == frontend::RewriteObjC)
LangOpts.ObjCExceptions = 1;
+ for (auto Warning : Res.getDiagnosticOpts().Warnings) {
+ if (Warning == "misexpect" &&
+ !Diags.isIgnored(diag::warn_profile_data_misexpect, SourceLocation())) {
+ Res.getCodeGenOpts().MisExpect = true;
+ }
+ }
+
if (LangOpts.CUDA) {
// During CUDA device-side compilation, the aux triple is the
// triple used for host compilation.
diff --git a/contrib/llvm-project/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp b/contrib/llvm-project/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp
index c5627d13a7a7..35b5e2144e6d 100644
--- a/contrib/llvm-project/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/CreateInvocationFromCommandLine.cpp
@@ -26,16 +26,13 @@
using namespace clang;
using namespace llvm::opt;
-std::unique_ptr<CompilerInvocation> clang::createInvocationFromCommandLine(
- ArrayRef<const char *> ArgList, IntrusiveRefCntPtr<DiagnosticsEngine> Diags,
- IntrusiveRefCntPtr<llvm::vfs::FileSystem> VFS, bool ShouldRecoverOnErorrs,
- std::vector<std::string> *CC1Args) {
+std::unique_ptr<CompilerInvocation>
+clang::createInvocation(ArrayRef<const char *> ArgList,
+ CreateInvocationOptions Opts) {
assert(!ArgList.empty());
- if (!Diags.get()) {
- // No diagnostics engine was provided, so create our own diagnostics object
- // with the default options.
- Diags = CompilerInstance::createDiagnostics(new DiagnosticOptions);
- }
+ auto Diags = Opts.Diags
+ ? std::move(Opts.Diags)
+ : CompilerInstance::createDiagnostics(new DiagnosticOptions);
SmallVector<const char *, 16> Args(ArgList.begin(), ArgList.end());
@@ -47,15 +44,19 @@ std::unique_ptr<CompilerInvocation> clang::createInvocationFromCommandLine(
// FIXME: We shouldn't have to pass in the path info.
driver::Driver TheDriver(Args[0], llvm::sys::getDefaultTargetTriple(), *Diags,
- "clang LLVM compiler", VFS);
+ "clang LLVM compiler", Opts.VFS);
// Don't check that inputs exist, they may have been remapped.
TheDriver.setCheckInputsExist(false);
+ TheDriver.setProbePrecompiled(Opts.ProbePrecompiled);
std::unique_ptr<driver::Compilation> C(TheDriver.BuildCompilation(Args));
if (!C)
return nullptr;
+ if (C->getArgs().hasArg(driver::options::OPT_fdriver_only))
+ return nullptr;
+
// Just print the cc1 options if -### was present.
if (C->getArgs().hasArg(driver::options::OPT__HASH_HASH_HASH)) {
C->getJobs().Print(llvm::errs(), "\n", true);
@@ -81,7 +82,7 @@ std::unique_ptr<CompilerInvocation> clang::createInvocationFromCommandLine(
}
}
- bool PickFirstOfMany = OffloadCompilation || ShouldRecoverOnErorrs;
+ bool PickFirstOfMany = OffloadCompilation || Opts.RecoverOnError;
if (Jobs.size() == 0 || (Jobs.size() > 1 && !PickFirstOfMany)) {
SmallString<256> Msg;
llvm::raw_svector_ostream OS(Msg);
@@ -98,11 +99,11 @@ std::unique_ptr<CompilerInvocation> clang::createInvocationFromCommandLine(
}
const ArgStringList &CCArgs = Cmd->getArguments();
- if (CC1Args)
- *CC1Args = {CCArgs.begin(), CCArgs.end()};
+ if (Opts.CC1Args)
+ *Opts.CC1Args = {CCArgs.begin(), CCArgs.end()};
auto CI = std::make_unique<CompilerInvocation>();
if (!CompilerInvocation::CreateFromArgs(*CI, CCArgs, *Diags, Args[0]) &&
- !ShouldRecoverOnErorrs)
+ !Opts.RecoverOnError)
return nullptr;
return CI;
}
diff --git a/contrib/llvm-project/clang/lib/Frontend/DependencyFile.cpp b/contrib/llvm-project/clang/lib/Frontend/DependencyFile.cpp
index 288827374106..06ddb0f0db20 100644
--- a/contrib/llvm-project/clang/lib/Frontend/DependencyFile.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/DependencyFile.cpp
@@ -31,23 +31,21 @@ using namespace clang;
namespace {
struct DepCollectorPPCallbacks : public PPCallbacks {
DependencyCollector &DepCollector;
- SourceManager &SM;
- DiagnosticsEngine &Diags;
- DepCollectorPPCallbacks(DependencyCollector &L, SourceManager &SM,
- DiagnosticsEngine &Diags)
- : DepCollector(L), SM(SM), Diags(Diags) {}
-
- void FileChanged(SourceLocation Loc, FileChangeReason Reason,
- SrcMgr::CharacteristicKind FileType,
- FileID PrevFID) override {
- if (Reason != PPCallbacks::EnterFile)
+ Preprocessor &PP;
+ DepCollectorPPCallbacks(DependencyCollector &L, Preprocessor &PP)
+ : DepCollector(L), PP(PP) {}
+
+ void LexedFileChanged(FileID FID, LexedFileChangeReason Reason,
+ SrcMgr::CharacteristicKind FileType, FileID PrevFID,
+ SourceLocation Loc) override {
+ if (Reason != PPCallbacks::LexedFileChangeReason::EnterFile)
return;
// Dependency generation really does want to go all the way to the
// file entry for a source location to find out what is depended on.
// We do not want #line markers to affect dependency generation!
- if (Optional<StringRef> Filename = SM.getNonBuiltinFilenameForID(
- SM.getFileID(SM.getExpansionLoc(Loc))))
+ if (Optional<StringRef> Filename =
+ PP.getSourceManager().getNonBuiltinFilenameForID(FID))
DepCollector.maybeAddDependency(
llvm::sys::path::remove_leading_dotslash(*Filename),
/*FromModule*/ false, isSystem(FileType), /*IsModuleFile*/ false,
@@ -66,9 +64,9 @@ struct DepCollectorPPCallbacks : public PPCallbacks {
void InclusionDirective(SourceLocation HashLoc, const Token &IncludeTok,
StringRef FileName, bool IsAngled,
- CharSourceRange FilenameRange, const FileEntry *File,
- StringRef SearchPath, StringRef RelativePath,
- const Module *Imported,
+ CharSourceRange FilenameRange,
+ Optional<FileEntryRef> File, StringRef SearchPath,
+ StringRef RelativePath, const Module *Imported,
SrcMgr::CharacteristicKind FileType) override {
if (!File)
DepCollector.maybeAddDependency(FileName, /*FromModule*/false,
@@ -90,7 +88,9 @@ struct DepCollectorPPCallbacks : public PPCallbacks {
/*IsMissing=*/false);
}
- void EndOfMainFile() override { DepCollector.finishedMainFile(Diags); }
+ void EndOfMainFile() override {
+ DepCollector.finishedMainFile(PP.getDiagnostics());
+ }
};
struct DepCollectorMMCallbacks : public ModuleMapCallbacks {
@@ -175,8 +175,7 @@ bool DependencyCollector::sawDependency(StringRef Filename, bool FromModule,
DependencyCollector::~DependencyCollector() { }
void DependencyCollector::attachToPreprocessor(Preprocessor &PP) {
- PP.addPPCallbacks(std::make_unique<DepCollectorPPCallbacks>(
- *this, PP.getSourceManager(), PP.getDiagnostics()));
+ PP.addPPCallbacks(std::make_unique<DepCollectorPPCallbacks>(*this, PP));
PP.getHeaderSearchInfo().getModuleMap().addModuleMapCallbacks(
std::make_unique<DepCollectorMMCallbacks>(*this));
}
diff --git a/contrib/llvm-project/clang/lib/Frontend/DependencyGraph.cpp b/contrib/llvm-project/clang/lib/Frontend/DependencyGraph.cpp
index 4bed4e2d4403..4cbdb3d5eeb8 100644
--- a/contrib/llvm-project/clang/lib/Frontend/DependencyGraph.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/DependencyGraph.cpp
@@ -29,9 +29,9 @@ class DependencyGraphCallback : public PPCallbacks {
const Preprocessor *PP;
std::string OutputFile;
std::string SysRoot;
- llvm::SetVector<const FileEntry *> AllFiles;
- typedef llvm::DenseMap<const FileEntry *,
- SmallVector<const FileEntry *, 2> > DependencyMap;
+ llvm::SetVector<FileEntryRef> AllFiles;
+ using DependencyMap =
+ llvm::DenseMap<FileEntryRef, SmallVector<FileEntryRef, 2>>;
DependencyMap Dependencies;
@@ -47,9 +47,9 @@ public:
void InclusionDirective(SourceLocation HashLoc, const Token &IncludeTok,
StringRef FileName, bool IsAngled,
- CharSourceRange FilenameRange, const FileEntry *File,
- StringRef SearchPath, StringRef RelativePath,
- const Module *Imported,
+ CharSourceRange FilenameRange,
+ Optional<FileEntryRef> File, StringRef SearchPath,
+ StringRef RelativePath, const Module *Imported,
SrcMgr::CharacteristicKind FileType) override;
void EndOfMainFile() override {
@@ -71,7 +71,7 @@ void DependencyGraphCallback::InclusionDirective(
StringRef FileName,
bool IsAngled,
CharSourceRange FilenameRange,
- const FileEntry *File,
+ Optional<FileEntryRef> File,
StringRef SearchPath,
StringRef RelativePath,
const Module *Imported,
@@ -80,15 +80,15 @@ void DependencyGraphCallback::InclusionDirective(
return;
SourceManager &SM = PP->getSourceManager();
- const FileEntry *FromFile
- = SM.getFileEntryForID(SM.getFileID(SM.getExpansionLoc(HashLoc)));
+ Optional<FileEntryRef> FromFile =
+ SM.getFileEntryRefForID(SM.getFileID(SM.getExpansionLoc(HashLoc)));
if (!FromFile)
return;
- Dependencies[FromFile].push_back(File);
+ Dependencies[*FromFile].push_back(*File);
- AllFiles.insert(File);
- AllFiles.insert(FromFile);
+ AllFiles.insert(*File);
+ AllFiles.insert(*FromFile);
}
raw_ostream &
@@ -115,7 +115,7 @@ void DependencyGraphCallback::OutputGraphFile() {
OS.indent(2);
writeNodeReference(OS, AllFiles[I]);
OS << " [ shape=\"box\", label=\"";
- StringRef FileName = AllFiles[I]->getName();
+ StringRef FileName = AllFiles[I].getName();
if (FileName.startswith(SysRoot))
FileName = FileName.substr(SysRoot.size());
diff --git a/contrib/llvm-project/clang/lib/Frontend/ExtractAPIConsumer.cpp b/contrib/llvm-project/clang/lib/Frontend/ExtractAPIConsumer.cpp
deleted file mode 100644
index cdf67f3c327a..000000000000
--- a/contrib/llvm-project/clang/lib/Frontend/ExtractAPIConsumer.cpp
+++ /dev/null
@@ -1,32 +0,0 @@
-#include "clang/AST/ASTConsumer.h"
-#include "clang/AST/RecursiveASTVisitor.h"
-#include "clang/Frontend/ASTConsumers.h"
-#include "clang/Frontend/CompilerInstance.h"
-#include "clang/Frontend/FrontendActions.h"
-
-using namespace clang;
-
-namespace {
-class ExtractAPIVisitor : public RecursiveASTVisitor<ExtractAPIVisitor> {
-public:
- bool VisitNamedDecl(NamedDecl *Decl) {
- llvm::outs() << Decl->getName() << "\n";
- return true;
- }
-};
-
-class ExtractAPIConsumer : public ASTConsumer {
-public:
- void HandleTranslationUnit(ASTContext &Context) override {
- Visitor.TraverseDecl(Context.getTranslationUnitDecl());
- }
-
-private:
- ExtractAPIVisitor Visitor;
-};
-} // namespace
-
-std::unique_ptr<ASTConsumer>
-ExtractAPIAction::CreateASTConsumer(CompilerInstance &CI, StringRef InFile) {
- return std::make_unique<ExtractAPIConsumer>();
-}
diff --git a/contrib/llvm-project/clang/lib/Frontend/FrontendAction.cpp b/contrib/llvm-project/clang/lib/Frontend/FrontendAction.cpp
index 089f40b36089..65160dd7e0b1 100644
--- a/contrib/llvm-project/clang/lib/Frontend/FrontendAction.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/FrontendAction.cpp
@@ -331,7 +331,7 @@ static std::error_code collectModuleHeaderIncludes(
return std::error_code();
// Resolve all lazy header directives to header files.
- ModMap.resolveHeaderDirectives(Module);
+ ModMap.resolveHeaderDirectives(Module, /*File=*/llvm::None);
// If any headers are missing, we can't build this module. In most cases,
// diagnostics for this should have already been produced; we only get here
@@ -413,11 +413,7 @@ static std::error_code collectModuleHeaderIncludes(
// Sort header paths and make the header inclusion order deterministic
// across different OSs and filesystems.
- llvm::sort(Headers.begin(), Headers.end(), [](
- const std::pair<std::string, const FileEntry *> &LHS,
- const std::pair<std::string, const FileEntry *> &RHS) {
- return LHS.first < RHS.first;
- });
+ llvm::sort(Headers.begin(), Headers.end(), llvm::less_first());
for (auto &H : Headers) {
// Include this header as part of the umbrella directory.
Module->addTopHeader(H.second);
@@ -465,6 +461,15 @@ static bool loadModuleMapForModuleBuild(CompilerInstance &CI, bool IsSystem,
if (SrcMgr.getBufferOrFake(ModuleMapID).getBufferSize() == Offset)
Offset = 0;
+ // Infer framework module if possible.
+ if (HS.getModuleMap().canInferFrameworkModule(ModuleMap->getDir())) {
+ SmallString<128> InferredFrameworkPath = ModuleMap->getDir()->getName();
+ llvm::sys::path::append(InferredFrameworkPath,
+ CI.getLangOpts().ModuleName + ".framework");
+ if (auto Dir = CI.getFileManager().getDirectory(InferredFrameworkPath))
+ (void)HS.getModuleMap().inferFrameworkModule(*Dir, IsSystem, nullptr);
+ }
+
return false;
}
@@ -752,10 +757,10 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
PreprocessorOptions &PPOpts = CI.getPreprocessorOpts();
StringRef PCHInclude = PPOpts.ImplicitPCHInclude;
std::string SpecificModuleCachePath = CI.getSpecificModuleCachePath();
- if (auto PCHDir = FileMgr.getDirectory(PCHInclude)) {
+ if (auto PCHDir = FileMgr.getOptionalDirectoryRef(PCHInclude)) {
std::error_code EC;
SmallString<128> DirNative;
- llvm::sys::path::native((*PCHDir)->getName(), DirNative);
+ llvm::sys::path::native(PCHDir->getName(), DirNative);
bool Found = false;
llvm::vfs::FileSystem &FS = FileMgr.getVirtualFileSystem();
for (llvm::vfs::directory_iterator Dir = FS.dir_begin(DirNative, EC),
@@ -789,10 +794,66 @@ bool FrontendAction::BeginSourceFile(CompilerInstance &CI,
&CI.getPreprocessor());
HasBegunSourceFile = true;
- // Initialize the main file entry.
+ // Handle C++20 header units.
+ // Here, the user has the option to specify that the header name should be
+ // looked up in the pre-processor search paths (and the main filename as
+ // passed by the driver might therefore be incomplete until that look-up).
+ if (CI.getLangOpts().CPlusPlusModules && Input.getKind().isHeaderUnit() &&
+ !Input.getKind().isPreprocessed()) {
+ StringRef FileName = Input.getFile();
+ InputKind Kind = Input.getKind();
+ if (Kind.getHeaderUnitKind() != InputKind::HeaderUnit_Abs) {
+ assert(CI.hasPreprocessor() &&
+ "trying to build a header unit without a Pre-processor?");
+ HeaderSearch &HS = CI.getPreprocessor().getHeaderSearchInfo();
+ // Relative searches begin from CWD.
+ const DirectoryEntry *Dir = nullptr;
+ if (auto DirOrErr = CI.getFileManager().getDirectory("."))
+ Dir = *DirOrErr;
+ SmallVector<std::pair<const FileEntry *, const DirectoryEntry *>, 1> CWD;
+ CWD.push_back({nullptr, Dir});
+ Optional<FileEntryRef> FE =
+ HS.LookupFile(FileName, SourceLocation(),
+ /*Angled*/ Input.getKind().getHeaderUnitKind() ==
+ InputKind::HeaderUnit_System,
+ nullptr, nullptr, CWD, nullptr, nullptr, nullptr,
+ nullptr, nullptr, nullptr);
+ if (!FE) {
+ CI.getDiagnostics().Report(diag::err_module_header_file_not_found)
+ << FileName;
+ return false;
+ }
+ // We now have the filename...
+ FileName = FE->getFileEntry().getName();
+ // ... still a header unit, but now use the path as written.
+ Kind = Input.getKind().withHeaderUnit(InputKind::HeaderUnit_Abs);
+ Input = FrontendInputFile(FileName, Kind, Input.isSystem());
+ }
+ // Unless the user has overridden the name, the header unit module name is
+ // the pathname for the file.
+ if (CI.getLangOpts().ModuleName.empty())
+ CI.getLangOpts().ModuleName = std::string(FileName);
+ CI.getLangOpts().CurrentModule = CI.getLangOpts().ModuleName;
+ }
+
if (!CI.InitializeSourceManager(Input))
return false;
+ if (CI.getLangOpts().CPlusPlusModules && Input.getKind().isHeaderUnit() &&
+ Input.getKind().isPreprocessed() && !usesPreprocessorOnly()) {
+ // We have an input filename like foo.iih, but we want to find the right
+ // module name (and original file, to build the map entry).
+ // Check if the first line specifies the original source file name with a
+ // linemarker.
+ std::string PresumedInputFile = std::string(getCurrentFileOrBufferName());
+ ReadOriginalFileName(CI, PresumedInputFile);
+ // Unless the user overrides this, the module name is the name by which the
+ // original file was known.
+ if (CI.getLangOpts().ModuleName.empty())
+ CI.getLangOpts().ModuleName = std::string(PresumedInputFile);
+ CI.getLangOpts().CurrentModule = CI.getLangOpts().ModuleName;
+ }
+
// For module map files, we first parse the module map and synthesize a
// "<module-includes>" buffer before more conventional processing.
if (Input.getKind().getFormat() == InputKind::ModuleMap) {
@@ -1014,7 +1075,7 @@ void FrontendAction::EndSourceFile() {
}
if (CI.getFrontendOpts().ShowStats) {
- llvm::errs() << "\nSTATISTICS FOR '" << getCurrentFile() << "':\n";
+ llvm::errs() << "\nSTATISTICS FOR '" << getCurrentFileOrBufferName() << "':\n";
CI.getPreprocessor().PrintStats();
CI.getPreprocessor().getIdentifierTable().PrintStats();
CI.getPreprocessor().getHeaderSearchInfo().PrintStats();
diff --git a/contrib/llvm-project/clang/lib/Frontend/FrontendActions.cpp b/contrib/llvm-project/clang/lib/Frontend/FrontendActions.cpp
index ad2e6039477f..f61c83a2a465 100644
--- a/contrib/llvm-project/clang/lib/Frontend/FrontendActions.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/FrontendActions.cpp
@@ -11,19 +11,21 @@
#include "clang/AST/Decl.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/LangStandard.h"
+#include "clang/Basic/Module.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Frontend/ASTConsumers.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/FrontendDiagnostic.h"
#include "clang/Frontend/MultiplexConsumer.h"
#include "clang/Frontend/Utils.h"
-#include "clang/Lex/DependencyDirectivesSourceMinimizer.h"
+#include "clang/Lex/DependencyDirectivesScanner.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Lex/PreprocessorOptions.h"
#include "clang/Sema/TemplateInstCallback.h"
#include "clang/Serialization/ASTReader.h"
#include "clang/Serialization/ASTWriter.h"
+#include "clang/Serialization/ModuleFile.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/FileSystem.h"
#include "llvm/Support/MemoryBuffer.h"
@@ -334,6 +336,21 @@ GenerateHeaderModuleAction::CreateOutputFile(CompilerInstance &CI,
return CI.createDefaultOutputFile(/*Binary=*/true, InFile, "pcm");
}
+bool GenerateHeaderUnitAction::BeginSourceFileAction(CompilerInstance &CI) {
+ if (!CI.getLangOpts().CPlusPlusModules) {
+ CI.getDiagnostics().Report(diag::err_module_interface_requires_cpp_modules);
+ return false;
+ }
+ CI.getLangOpts().setCompilingModule(LangOptions::CMK_HeaderUnit);
+ return GenerateModuleAction::BeginSourceFileAction(CI);
+}
+
+std::unique_ptr<raw_pwrite_stream>
+GenerateHeaderUnitAction::CreateOutputFile(CompilerInstance &CI,
+ StringRef InFile) {
+ return CI.createDefaultOutputFile(/*Binary=*/true, InFile, "pcm");
+}
+
SyntaxOnlyAction::~SyntaxOnlyAction() {
}
@@ -463,6 +480,8 @@ private:
return "InitializingStructuredBinding";
case CodeSynthesisContext::MarkingClassDllexported:
return "MarkingClassDllexported";
+ case CodeSynthesisContext::BuildingBuiltinDumpStructCall:
+ return "BuildingBuiltinDumpStructCall";
}
return "";
}
@@ -806,7 +825,28 @@ bool DumpModuleInfoAction::BeginInvocation(CompilerInstance &CI) {
return true;
}
+static StringRef ModuleKindName(Module::ModuleKind MK) {
+ switch (MK) {
+ case Module::ModuleMapModule:
+ return "Module Map Module";
+ case Module::ModuleInterfaceUnit:
+ return "Interface Unit";
+ case Module::ModulePartitionInterface:
+ return "Partition Interface";
+ case Module::ModulePartitionImplementation:
+ return "Partition Implementation";
+ case Module::ModuleHeaderUnit:
+ return "Header Unit";
+ case Module::GlobalModuleFragment:
+ return "Global Module Fragment";
+ case Module::PrivateModuleFragment:
+ return "Private Module Fragment";
+ }
+ llvm_unreachable("unknown module kind!");
+}
+
void DumpModuleInfoAction::ExecuteAction() {
+ assert(isCurrentFileAST() && "dumping non-AST?");
// Set up the output file.
std::unique_ptr<llvm::raw_fd_ostream> OutFile;
StringRef OutputFileName = getCompilerInstance().getFrontendOpts().OutputFile;
@@ -827,8 +867,87 @@ void DumpModuleInfoAction::ExecuteAction() {
Preprocessor &PP = getCompilerInstance().getPreprocessor();
DumpModuleInfoListener Listener(Out);
- HeaderSearchOptions &HSOpts =
- PP.getHeaderSearchInfo().getHeaderSearchOpts();
+ HeaderSearchOptions &HSOpts = PP.getHeaderSearchInfo().getHeaderSearchOpts();
+
+ // The FrontendAction::BeginSourceFile () method loads the AST so that much
+ // of the information is already available and modules should have been
+ // loaded.
+
+ const LangOptions &LO = getCurrentASTUnit().getLangOpts();
+ if (LO.CPlusPlusModules && !LO.CurrentModule.empty()) {
+
+ ASTReader *R = getCurrentASTUnit().getASTReader().get();
+ unsigned SubModuleCount = R->getTotalNumSubmodules();
+ serialization::ModuleFile &MF = R->getModuleManager().getPrimaryModule();
+ Out << " ====== C++20 Module structure ======\n";
+
+ if (MF.ModuleName != LO.CurrentModule)
+ Out << " Mismatched module names : " << MF.ModuleName << " and "
+ << LO.CurrentModule << "\n";
+
+ struct SubModInfo {
+ unsigned Idx;
+ Module *Mod;
+ Module::ModuleKind Kind;
+ std::string &Name;
+ bool Seen;
+ };
+ std::map<std::string, SubModInfo> SubModMap;
+ auto PrintSubMapEntry = [&](std::string Name, Module::ModuleKind Kind) {
+ Out << " " << ModuleKindName(Kind) << " '" << Name << "'";
+ auto I = SubModMap.find(Name);
+ if (I == SubModMap.end())
+ Out << " was not found in the sub modules!\n";
+ else {
+ I->second.Seen = true;
+ Out << " is at index #" << I->second.Idx << "\n";
+ }
+ };
+ Module *Primary = nullptr;
+ for (unsigned Idx = 0; Idx <= SubModuleCount; ++Idx) {
+ Module *M = R->getModule(Idx);
+ if (!M)
+ continue;
+ if (M->Name == LO.CurrentModule) {
+ Primary = M;
+ Out << " " << ModuleKindName(M->Kind) << " '" << LO.CurrentModule
+ << "' is the Primary Module at index #" << Idx << "\n";
+ SubModMap.insert({M->Name, {Idx, M, M->Kind, M->Name, true}});
+ } else
+ SubModMap.insert({M->Name, {Idx, M, M->Kind, M->Name, false}});
+ }
+ if (Primary) {
+ if (!Primary->submodules().empty())
+ Out << " Sub Modules:\n";
+ for (auto MI : Primary->submodules()) {
+ PrintSubMapEntry(MI->Name, MI->Kind);
+ }
+ if (!Primary->Imports.empty())
+ Out << " Imports:\n";
+ for (auto IMP : Primary->Imports) {
+ PrintSubMapEntry(IMP->Name, IMP->Kind);
+ }
+ if (!Primary->Exports.empty())
+ Out << " Exports:\n";
+ for (unsigned MN = 0, N = Primary->Exports.size(); MN != N; ++MN) {
+ if (Module *M = Primary->Exports[MN].getPointer()) {
+ PrintSubMapEntry(M->Name, M->Kind);
+ }
+ }
+ }
+ // Now let's print out any modules we did not see as part of the Primary.
+ for (auto SM : SubModMap) {
+ if (!SM.second.Seen && SM.second.Mod) {
+ Out << " " << ModuleKindName(SM.second.Kind) << " '" << SM.first
+ << "' at index #" << SM.second.Idx
+ << " has no direct reference in the Primary\n";
+ }
+ }
+ Out << " ====== ======\n";
+ }
+
+ // The reminder of the output is produced from the listener as the AST
+ // FileCcontrolBlock is (re-)parsed.
ASTReader::readASTFileControlBlock(
getCurrentFile(), FileMgr, getCompilerInstance().getPCHContainerReader(),
/*FindModuleFileExtensions=*/true, Listener,
@@ -961,6 +1080,7 @@ void PrintPreambleAction::ExecuteAction() {
case Language::OpenCLCXX:
case Language::CUDA:
case Language::HIP:
+ case Language::HLSL:
break;
case Language::Unknown:
@@ -1037,10 +1157,10 @@ void PrintDependencyDirectivesSourceMinimizerAction::ExecuteAction() {
SourceManager &SM = CI.getPreprocessor().getSourceManager();
llvm::MemoryBufferRef FromFile = SM.getBufferOrFake(SM.getMainFileID());
- llvm::SmallString<1024> Output;
- llvm::SmallVector<minimize_source_to_dependency_directives::Token, 32> Toks;
- if (minimizeSourceToDependencyDirectives(
- FromFile.getBuffer(), Output, Toks, &CI.getDiagnostics(),
+ llvm::SmallVector<dependency_directives_scan::Token, 16> Tokens;
+ llvm::SmallVector<dependency_directives_scan::Directive, 32> Directives;
+ if (scanSourceForDependencyDirectives(
+ FromFile.getBuffer(), Tokens, Directives, &CI.getDiagnostics(),
SM.getLocForStartOfFile(SM.getMainFileID()))) {
assert(CI.getDiagnostics().hasErrorOccurred() &&
"no errors reported for failure");
@@ -1059,7 +1179,8 @@ void PrintDependencyDirectivesSourceMinimizerAction::ExecuteAction() {
}
return;
}
- llvm::outs() << Output;
+ printDependencyDirectivesAsSource(FromFile.getBuffer(), Directives,
+ llvm::outs());
}
void GetDependenciesByModuleNameAction::ExecuteAction() {
diff --git a/contrib/llvm-project/clang/lib/Frontend/FrontendOptions.cpp b/contrib/llvm-project/clang/lib/Frontend/FrontendOptions.cpp
index 37ac428a8003..a7bdae343a94 100644
--- a/contrib/llvm-project/clang/lib/Frontend/FrontendOptions.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/FrontendOptions.cpp
@@ -27,7 +27,7 @@ InputKind FrontendOptions::getInputKindForExtension(StringRef Extension) {
.Cases("C", "cc", "cp", Language::CXX)
.Cases("cpp", "CPP", "c++", "cxx", "hpp", "hxx", Language::CXX)
.Case("cppm", Language::CXX)
- .Case("iim", InputKind(Language::CXX).getPreprocessed())
+ .Cases("iim", "iih", InputKind(Language::CXX).getPreprocessed())
.Case("cl", Language::OpenCL)
.Case("clcpp", Language::OpenCLCXX)
.Cases("cu", "cuh", Language::CUDA)
diff --git a/contrib/llvm-project/clang/lib/Frontend/InitPreprocessor.cpp b/contrib/llvm-project/clang/lib/Frontend/InitPreprocessor.cpp
index e259ab47c558..fe3736c07c3c 100644
--- a/contrib/llvm-project/clang/lib/Frontend/InitPreprocessor.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/InitPreprocessor.cpp
@@ -371,6 +371,50 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
const LangOptions &LangOpts,
const FrontendOptions &FEOpts,
MacroBuilder &Builder) {
+ if (LangOpts.HLSL) {
+ Builder.defineMacro("__hlsl_clang");
+ // HLSL Version
+ Builder.defineMacro("__HLSL_VERSION",
+ Twine((unsigned)LangOpts.getHLSLVersion()));
+
+ if (LangOpts.NativeHalfType)
+ Builder.defineMacro("__HLSL_ENABLE_16_BIT",
+ Twine((unsigned)LangOpts.getHLSLVersion()));
+
+ // Shader target information
+ // "enums" for shader stages
+ Builder.defineMacro("__SHADER_STAGE_VERTEX",
+ Twine((uint32_t)ShaderStage::Vertex));
+ Builder.defineMacro("__SHADER_STAGE_PIXEL",
+ Twine((uint32_t)ShaderStage::Pixel));
+ Builder.defineMacro("__SHADER_STAGE_GEOMETRY",
+ Twine((uint32_t)ShaderStage::Geometry));
+ Builder.defineMacro("__SHADER_STAGE_HULL",
+ Twine((uint32_t)ShaderStage::Hull));
+ Builder.defineMacro("__SHADER_STAGE_DOMAIN",
+ Twine((uint32_t)ShaderStage::Domain));
+ Builder.defineMacro("__SHADER_STAGE_COMPUTE",
+ Twine((uint32_t)ShaderStage::Compute));
+ Builder.defineMacro("__SHADER_STAGE_AMPLIFICATION",
+ Twine((uint32_t)ShaderStage::Amplification));
+ Builder.defineMacro("__SHADER_STAGE_MESH",
+ Twine((uint32_t)ShaderStage::Mesh));
+ Builder.defineMacro("__SHADER_STAGE_LIBRARY",
+ Twine((uint32_t)ShaderStage::Library));
+ // The current shader stage itself
+ uint32_t StageInteger = (uint32_t)TI.getTriple().getEnvironment() -
+ (uint32_t)llvm::Triple::Pixel;
+
+ Builder.defineMacro("__SHADER_TARGET_STAGE", Twine(StageInteger));
+ // Add target versions
+ if (TI.getTriple().getOS() == llvm::Triple::ShaderModel) {
+ VersionTuple Version = TI.getTriple().getOSVersion();
+ Builder.defineMacro("__SHADER_TARGET_MAJOR", Twine(Version.getMajor()));
+ unsigned Minor = Version.getMinor().value_or(0);
+ Builder.defineMacro("__SHADER_TARGET_MINOR", Twine(Minor));
+ }
+ return;
+ }
// C++ [cpp.predefined]p1:
// The following macro names shall be defined by the implementation:
@@ -538,6 +582,9 @@ static void InitializeStandardPredefinedMacros(const TargetInfo &TI,
Builder.defineMacro("__HIP_MEMORY_SCOPE_SYSTEM", "5");
if (LangOpts.CUDAIsDevice)
Builder.defineMacro("__HIP_DEVICE_COMPILE__");
+ if (LangOpts.GPUDefaultStream ==
+ LangOptions::GPUDefaultStreamKind::PerThread)
+ Builder.defineMacro("HIP_API_PER_THREAD_DEFAULT_STREAM");
}
}
@@ -558,10 +605,11 @@ static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts,
Builder.defineMacro("__cpp_unicode_literals", "200710L");
Builder.defineMacro("__cpp_user_defined_literals", "200809L");
Builder.defineMacro("__cpp_lambdas", "200907L");
- Builder.defineMacro("__cpp_constexpr",
- LangOpts.CPlusPlus20 ? "201907L" :
- LangOpts.CPlusPlus17 ? "201603L" :
- LangOpts.CPlusPlus14 ? "201304L" : "200704");
+ Builder.defineMacro("__cpp_constexpr", LangOpts.CPlusPlus2b ? "202110L"
+ : LangOpts.CPlusPlus20 ? "201907L"
+ : LangOpts.CPlusPlus17 ? "201603L"
+ : LangOpts.CPlusPlus14 ? "201304L"
+ : "200704");
Builder.defineMacro("__cpp_constexpr_in_decltype", "201711L");
Builder.defineMacro("__cpp_range_based_for",
LangOpts.CPlusPlus17 ? "201603L" : "200907");
@@ -642,6 +690,7 @@ static void InitializeCPlusPlusFeatureTestMacros(const LangOptions &LangOpts,
Builder.defineMacro("__cpp_implicit_move", "202011L");
Builder.defineMacro("__cpp_size_t_suffix", "202011L");
Builder.defineMacro("__cpp_if_consteval", "202106L");
+ Builder.defineMacro("__cpp_multidimensional_subscript", "202110L");
}
if (LangOpts.Char8)
Builder.defineMacro("__cpp_char8_t", "201811L");
@@ -773,19 +822,20 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
if (version >= VersionTuple(2, 0))
Builder.defineMacro("__OBJC_GNUSTEP_RUNTIME_ABI__", "20");
else
- Builder.defineMacro("__OBJC_GNUSTEP_RUNTIME_ABI__",
- "1" + Twine(std::min(8U, version.getMinor().getValueOr(0))));
+ Builder.defineMacro(
+ "__OBJC_GNUSTEP_RUNTIME_ABI__",
+ "1" + Twine(std::min(8U, version.getMinor().value_or(0))));
}
if (LangOpts.ObjCRuntime.getKind() == ObjCRuntime::ObjFW) {
VersionTuple tuple = LangOpts.ObjCRuntime.getVersion();
unsigned minor = 0;
- if (tuple.getMinor().hasValue())
+ if (tuple.getMinor())
minor = tuple.getMinor().getValue();
unsigned subminor = 0;
- if (tuple.getSubminor().hasValue())
+ if (tuple.getSubminor())
subminor = tuple.getSubminor().getValue();
Builder.defineMacro("__OBJFW_RUNTIME_ABI__",
@@ -1135,7 +1185,6 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
}
// Macros to control C99 numerics and <float.h>
- Builder.defineMacro("__FLT_EVAL_METHOD__", Twine(TI.getFloatEvalMethod()));
Builder.defineMacro("__FLT_RADIX__", "2");
Builder.defineMacro("__DECIMAL_DIG__", "__LDBL_DECIMAL_DIG__");
@@ -1205,6 +1254,7 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
case 52:
Builder.defineMacro("_OPENMP", "202111");
break;
+ case 50:
default:
// Default version is OpenMP 5.0
Builder.defineMacro("_OPENMP", "201811");
@@ -1343,5 +1393,5 @@ void clang::InitializePreprocessor(
InitOpts.PrecompiledPreambleBytes.second);
// Copy PredefinedBuffer into the Preprocessor.
- PP.setPredefines(Predefines.str());
+ PP.setPredefines(std::move(PredefineBuffer));
}
diff --git a/contrib/llvm-project/clang/lib/Frontend/LogDiagnosticPrinter.cpp b/contrib/llvm-project/clang/lib/Frontend/LogDiagnosticPrinter.cpp
index df8b23691a7d..d810b37ca57f 100644
--- a/contrib/llvm-project/clang/lib/Frontend/LogDiagnosticPrinter.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/LogDiagnosticPrinter.cpp
@@ -118,8 +118,7 @@ void LogDiagnosticPrinter::HandleDiagnostic(DiagnosticsEngine::Level Level,
const SourceManager &SM = Info.getSourceManager();
FileID FID = SM.getMainFileID();
if (FID.isValid()) {
- const FileEntry *FE = SM.getFileEntryForID(FID);
- if (FE && FE->isValid())
+ if (const FileEntry *FE = SM.getFileEntryForID(FID))
MainFilename = std::string(FE->getName());
}
}
@@ -148,8 +147,7 @@ void LogDiagnosticPrinter::HandleDiagnostic(DiagnosticsEngine::Level Level,
// At least print the file name if available:
FileID FID = SM.getFileID(Info.getLocation());
if (FID.isValid()) {
- const FileEntry *FE = SM.getFileEntryForID(FID);
- if (FE && FE->isValid())
+ if (const FileEntry *FE = SM.getFileEntryForID(FID))
DE.Filename = std::string(FE->getName());
}
} else {
diff --git a/contrib/llvm-project/clang/lib/Frontend/ModuleDependencyCollector.cpp b/contrib/llvm-project/clang/lib/Frontend/ModuleDependencyCollector.cpp
index 4301e49f1d80..7e19ed3d56e5 100644
--- a/contrib/llvm-project/clang/lib/Frontend/ModuleDependencyCollector.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/ModuleDependencyCollector.cpp
@@ -47,9 +47,9 @@ struct ModuleDependencyPPCallbacks : public PPCallbacks {
void InclusionDirective(SourceLocation HashLoc, const Token &IncludeTok,
StringRef FileName, bool IsAngled,
- CharSourceRange FilenameRange, const FileEntry *File,
- StringRef SearchPath, StringRef RelativePath,
- const Module *Imported,
+ CharSourceRange FilenameRange,
+ Optional<FileEntryRef> File, StringRef SearchPath,
+ StringRef RelativePath, const Module *Imported,
SrcMgr::CharacteristicKind FileType) override {
if (!File)
return;
diff --git a/contrib/llvm-project/clang/lib/Frontend/MultiplexConsumer.cpp b/contrib/llvm-project/clang/lib/Frontend/MultiplexConsumer.cpp
index 34bbc365e647..737877329c9c 100644
--- a/contrib/llvm-project/clang/lib/Frontend/MultiplexConsumer.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/MultiplexConsumer.cpp
@@ -73,6 +73,12 @@ void MultiplexASTDeserializationListener::ModuleRead(
Listener->ModuleRead(ID, Mod);
}
+void MultiplexASTDeserializationListener::ModuleImportRead(
+ serialization::SubmoduleID ID, SourceLocation ImportLoc) {
+ for (auto &Listener : Listeners)
+ Listener->ModuleImportRead(ID, ImportLoc);
+}
+
// This ASTMutationListener forwards its notifications to a set of
// child listeners.
class MultiplexASTMutationListener : public ASTMutationListener {
diff --git a/contrib/llvm-project/clang/lib/Frontend/PrecompiledPreamble.cpp b/contrib/llvm-project/clang/lib/Frontend/PrecompiledPreamble.cpp
index 8aa80a4c96fb..e3c346655049 100644
--- a/contrib/llvm-project/clang/lib/Frontend/PrecompiledPreamble.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/PrecompiledPreamble.cpp
@@ -11,10 +11,8 @@
//===----------------------------------------------------------------------===//
#include "clang/Frontend/PrecompiledPreamble.h"
-#include "clang/AST/DeclObjC.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/LangStandard.h"
-#include "clang/Basic/TargetInfo.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/CompilerInvocation.h"
#include "clang/Frontend/FrontendActions.h"
@@ -25,7 +23,6 @@
#include "clang/Lex/PreprocessorOptions.h"
#include "clang/Serialization/ASTWriter.h"
#include "llvm/ADT/SmallString.h"
-#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/Config/llvm-config.h"
@@ -99,13 +96,13 @@ public:
void InclusionDirective(SourceLocation HashLoc, const Token &IncludeTok,
StringRef FileName, bool IsAngled,
- CharSourceRange FilenameRange, const FileEntry *File,
- StringRef SearchPath, StringRef RelativePath,
- const Module *Imported,
+ CharSourceRange FilenameRange,
+ Optional<FileEntryRef> File, StringRef SearchPath,
+ StringRef RelativePath, const Module *Imported,
SrcMgr::CharacteristicKind FileType) override {
- // File is null if it wasn't found.
+ // File is None if it wasn't found.
// (We have some false negatives if PP recovered e.g. <foo> -> "foo")
- if (File != nullptr)
+ if (File)
return;
// If it's a rare absolute include, we know the full path already.
@@ -192,11 +189,55 @@ void TemporaryFiles::removeFile(StringRef File) {
llvm::sys::fs::remove(File);
}
+// A temp file that would be deleted on destructor call. If destructor is not
+// called for any reason, the file will be deleted at static objects'
+// destruction.
+// An assertion will fire if two TempPCHFiles are created with the same name,
+// so it's not intended to be used outside preamble-handling.
+class TempPCHFile {
+public:
+ // A main method used to construct TempPCHFile.
+ static std::unique_ptr<TempPCHFile> create() {
+ // FIXME: This is a hack so that we can override the preamble file during
+ // crash-recovery testing, which is the only case where the preamble files
+ // are not necessarily cleaned up.
+ if (const char *TmpFile = ::getenv("CINDEXTEST_PREAMBLE_FILE"))
+ return std::unique_ptr<TempPCHFile>(new TempPCHFile(TmpFile));
+
+ llvm::SmallString<64> File;
+ // Using a version of createTemporaryFile with a file descriptor guarantees
+ // that we would never get a race condition in a multi-threaded setting
+ // (i.e., multiple threads getting the same temporary path).
+ int FD;
+ if (auto EC =
+ llvm::sys::fs::createTemporaryFile("preamble", "pch", FD, File))
+ return nullptr;
+ // We only needed to make sure the file exists, close the file right away.
+ llvm::sys::Process::SafelyCloseFileDescriptor(FD);
+ return std::unique_ptr<TempPCHFile>(new TempPCHFile(File.str().str()));
+ }
+
+ TempPCHFile &operator=(const TempPCHFile &) = delete;
+ TempPCHFile(const TempPCHFile &) = delete;
+ ~TempPCHFile() { TemporaryFiles::getInstance().removeFile(FilePath); };
+
+ /// A path where temporary file is stored.
+ llvm::StringRef getFilePath() const { return FilePath; };
+
+private:
+ TempPCHFile(std::string FilePath) : FilePath(std::move(FilePath)) {
+ TemporaryFiles::getInstance().addFile(this->FilePath);
+ }
+
+ std::string FilePath;
+};
+
class PrecompilePreambleAction : public ASTFrontendAction {
public:
- PrecompilePreambleAction(std::string *InMemStorage,
+ PrecompilePreambleAction(std::shared_ptr<PCHBuffer> Buffer, bool WritePCHFile,
PreambleCallbacks &Callbacks)
- : InMemStorage(InMemStorage), Callbacks(Callbacks) {}
+ : Buffer(std::move(Buffer)), WritePCHFile(WritePCHFile),
+ Callbacks(Callbacks) {}
std::unique_ptr<ASTConsumer> CreateASTConsumer(CompilerInstance &CI,
StringRef InFile) override;
@@ -204,6 +245,12 @@ public:
bool hasEmittedPreamblePCH() const { return HasEmittedPreamblePCH; }
void setEmittedPreamblePCH(ASTWriter &Writer) {
+ if (FileOS) {
+ *FileOS << Buffer->Data;
+ // Make sure it hits disk now.
+ FileOS.reset();
+ }
+
this->HasEmittedPreamblePCH = true;
Callbacks.AfterPCHEmitted(Writer);
}
@@ -222,7 +269,9 @@ private:
friend class PrecompilePreambleConsumer;
bool HasEmittedPreamblePCH = false;
- std::string *InMemStorage;
+ std::shared_ptr<PCHBuffer> Buffer;
+ bool WritePCHFile; // otherwise the PCH is written into the PCHBuffer only.
+ std::unique_ptr<llvm::raw_pwrite_stream> FileOS; // null if in-memory
PreambleCallbacks &Callbacks;
};
@@ -232,12 +281,11 @@ public:
const Preprocessor &PP,
InMemoryModuleCache &ModuleCache,
StringRef isysroot,
- std::unique_ptr<raw_ostream> Out)
- : PCHGenerator(PP, ModuleCache, "", isysroot,
- std::make_shared<PCHBuffer>(),
+ std::shared_ptr<PCHBuffer> Buffer)
+ : PCHGenerator(PP, ModuleCache, "", isysroot, std::move(Buffer),
ArrayRef<std::shared_ptr<ModuleFileExtension>>(),
/*AllowASTWithErrors=*/true),
- Action(Action), Out(std::move(Out)) {}
+ Action(Action) {}
bool HandleTopLevelDecl(DeclGroupRef DG) override {
Action.Callbacks.HandleTopLevelDecl(DG);
@@ -248,15 +296,6 @@ public:
PCHGenerator::HandleTranslationUnit(Ctx);
if (!hasEmittedPCH())
return;
-
- // Write the generated bitstream to "Out".
- *Out << getPCH();
- // Make sure it hits disk now.
- Out->flush();
- // Free the buffer.
- llvm::SmallVector<char, 0> Empty;
- getPCH() = std::move(Empty);
-
Action.setEmittedPreamblePCH(getWriter());
}
@@ -266,7 +305,6 @@ public:
private:
PrecompilePreambleAction &Action;
- std::unique_ptr<raw_ostream> Out;
};
std::unique_ptr<ASTConsumer>
@@ -276,21 +314,18 @@ PrecompilePreambleAction::CreateASTConsumer(CompilerInstance &CI,
if (!GeneratePCHAction::ComputeASTConsumerArguments(CI, Sysroot))
return nullptr;
- std::unique_ptr<llvm::raw_ostream> OS;
- if (InMemStorage) {
- OS = std::make_unique<llvm::raw_string_ostream>(*InMemStorage);
- } else {
- std::string OutputFile;
- OS = GeneratePCHAction::CreateOutputFile(CI, InFile, OutputFile);
+ if (WritePCHFile) {
+ std::string OutputFile; // unused
+ FileOS = GeneratePCHAction::CreateOutputFile(CI, InFile, OutputFile);
+ if (!FileOS)
+ return nullptr;
}
- if (!OS)
- return nullptr;
if (!CI.getFrontendOpts().RelocatablePCH)
Sysroot.clear();
return std::make_unique<PrecompilePreambleConsumer>(
- *this, CI.getPreprocessor(), CI.getModuleCache(), Sysroot, std::move(OS));
+ *this, CI.getPreprocessor(), CI.getModuleCache(), Sysroot, Buffer);
}
template <class T> bool moveOnNoError(llvm::ErrorOr<T> Val, T &Output) {
@@ -308,6 +343,60 @@ PreambleBounds clang::ComputePreambleBounds(const LangOptions &LangOpts,
return Lexer::ComputePreamble(Buffer.getBuffer(), LangOpts, MaxLines);
}
+class PrecompiledPreamble::PCHStorage {
+public:
+ static std::unique_ptr<PCHStorage> file(std::unique_ptr<TempPCHFile> File) {
+ assert(File);
+ std::unique_ptr<PCHStorage> S(new PCHStorage());
+ S->File = std::move(File);
+ return S;
+ }
+ static std::unique_ptr<PCHStorage> inMemory(std::shared_ptr<PCHBuffer> Buf) {
+ std::unique_ptr<PCHStorage> S(new PCHStorage());
+ S->Memory = std::move(Buf);
+ return S;
+ }
+
+ enum class Kind { InMemory, TempFile };
+ Kind getKind() const {
+ if (Memory)
+ return Kind::InMemory;
+ if (File)
+ return Kind::TempFile;
+ llvm_unreachable("Neither Memory nor File?");
+ }
+ llvm::StringRef filePath() const {
+ assert(getKind() == Kind::TempFile);
+ return File->getFilePath();
+ }
+ llvm::StringRef memoryContents() const {
+ assert(getKind() == Kind::InMemory);
+ return StringRef(Memory->Data.data(), Memory->Data.size());
+ }
+
+ // Shrink in-memory buffers to fit.
+ // This incurs a copy, but preambles tend to be long-lived.
+ // Only safe to call once nothing can alias the buffer.
+ void shrink() {
+ if (!Memory)
+ return;
+ Memory->Data = decltype(Memory->Data)(Memory->Data);
+ }
+
+private:
+ PCHStorage() = default;
+ PCHStorage(const PCHStorage &) = delete;
+ PCHStorage &operator=(const PCHStorage &) = delete;
+
+ std::shared_ptr<PCHBuffer> Memory;
+ std::unique_ptr<TempPCHFile> File;
+};
+
+PrecompiledPreamble::~PrecompiledPreamble() = default;
+PrecompiledPreamble::PrecompiledPreamble(PrecompiledPreamble &&) = default;
+PrecompiledPreamble &
+PrecompiledPreamble::operator=(PrecompiledPreamble &&) = default;
+
llvm::ErrorOr<PrecompiledPreamble> PrecompiledPreamble::Build(
const CompilerInvocation &Invocation,
const llvm::MemoryBuffer *MainFileBuffer, PreambleBounds Bounds,
@@ -322,20 +411,19 @@ llvm::ErrorOr<PrecompiledPreamble> PrecompiledPreamble::Build(
PreprocessorOptions &PreprocessorOpts =
PreambleInvocation->getPreprocessorOpts();
- llvm::Optional<TempPCHFile> TempFile;
- if (!StoreInMemory) {
+ std::shared_ptr<PCHBuffer> Buffer = std::make_shared<PCHBuffer>();
+ std::unique_ptr<PCHStorage> Storage;
+ if (StoreInMemory) {
+ Storage = PCHStorage::inMemory(Buffer);
+ } else {
// Create a temporary file for the precompiled preamble. In rare
// circumstances, this can fail.
- llvm::ErrorOr<PrecompiledPreamble::TempPCHFile> PreamblePCHFile =
- PrecompiledPreamble::TempPCHFile::CreateNewPreamblePCHFile();
+ std::unique_ptr<TempPCHFile> PreamblePCHFile = TempPCHFile::create();
if (!PreamblePCHFile)
return BuildPreambleError::CouldntCreateTempFile;
- TempFile = std::move(*PreamblePCHFile);
+ Storage = PCHStorage::file(std::move(PreamblePCHFile));
}
- PCHStorage Storage = StoreInMemory ? PCHStorage(InMemoryPreamble())
- : PCHStorage(std::move(*TempFile));
-
// Save the preamble text for later; we'll need to compare against it for
// subsequent reparses.
std::vector<char> PreambleBytes(MainFileBuffer->getBufferStart(),
@@ -345,9 +433,8 @@ llvm::ErrorOr<PrecompiledPreamble> PrecompiledPreamble::Build(
// Tell the compiler invocation to generate a temporary precompiled header.
FrontendOpts.ProgramAction = frontend::GeneratePCH;
- FrontendOpts.OutputFile =
- std::string(StoreInMemory ? getInMemoryPreamblePath()
- : Storage.asFile().getFilePath());
+ FrontendOpts.OutputFile = std::string(
+ StoreInMemory ? getInMemoryPreamblePath() : Storage->filePath());
PreprocessorOpts.PrecompiledPreambleBytes.first = 0;
PreprocessorOpts.PrecompiledPreambleBytes.second = false;
// Inform preprocessor to record conditional stack when building the preamble.
@@ -409,9 +496,10 @@ llvm::ErrorOr<PrecompiledPreamble> PrecompiledPreamble::Build(
PreambleInputBuffer.release());
}
- std::unique_ptr<PrecompilePreambleAction> Act;
- Act.reset(new PrecompilePreambleAction(
- StoreInMemory ? &Storage.asMemory().Data : nullptr, Callbacks));
+ auto Act = std::make_unique<PrecompilePreambleAction>(
+ std::move(Buffer),
+ /*WritePCHFile=*/Storage->getKind() == PCHStorage::Kind::TempFile,
+ Callbacks);
if (!Act->BeginSourceFile(*Clang.get(), Clang->getFrontendOpts().Inputs[0]))
return BuildPreambleError::BeginSourceFileFailed;
@@ -441,6 +529,7 @@ llvm::ErrorOr<PrecompiledPreamble> PrecompiledPreamble::Build(
if (!Act->hasEmittedPreamblePCH())
return BuildPreambleError::CouldntEmitPCH;
+ Act.reset(); // Frees the PCH buffer, unless Storage keeps it in memory.
// Keep track of all of the files that the source manager knows about,
// so we can verify whether they have changed or not.
@@ -465,6 +554,11 @@ llvm::ErrorOr<PrecompiledPreamble> PrecompiledPreamble::Build(
}
}
+ // Shrinking the storage requires extra temporary memory.
+ // Destroying clang first reduces peak memory usage.
+ CICleanup.unregister();
+ Clang.reset();
+ Storage->shrink();
return PrecompiledPreamble(
std::move(Storage), std::move(PreambleBytes), PreambleEndsAtStartOfLine,
std::move(FilesInPreamble), std::move(MissingFiles));
@@ -475,16 +569,12 @@ PreambleBounds PrecompiledPreamble::getBounds() const {
}
std::size_t PrecompiledPreamble::getSize() const {
- switch (Storage.getKind()) {
- case PCHStorage::Kind::Empty:
- assert(false && "Calling getSize() on invalid PrecompiledPreamble. "
- "Was it std::moved?");
- return 0;
+ switch (Storage->getKind()) {
case PCHStorage::Kind::InMemory:
- return Storage.asMemory().Data.size();
+ return Storage->memoryContents().size();
case PCHStorage::Kind::TempFile: {
uint64_t Result;
- if (llvm::sys::fs::file_size(Storage.asFile().getFilePath(), Result))
+ if (llvm::sys::fs::file_size(Storage->filePath(), Result))
return 0;
assert(Result <= std::numeric_limits<std::size_t>::max() &&
@@ -621,7 +711,7 @@ void PrecompiledPreamble::OverridePreamble(
}
PrecompiledPreamble::PrecompiledPreamble(
- PCHStorage Storage, std::vector<char> PreambleBytes,
+ std::unique_ptr<PCHStorage> Storage, std::vector<char> PreambleBytes,
bool PreambleEndsAtStartOfLine,
llvm::StringMap<PreambleFileHash> FilesInPreamble,
llvm::StringSet<> MissingFiles)
@@ -629,142 +719,7 @@ PrecompiledPreamble::PrecompiledPreamble(
MissingFiles(std::move(MissingFiles)),
PreambleBytes(std::move(PreambleBytes)),
PreambleEndsAtStartOfLine(PreambleEndsAtStartOfLine) {
- assert(this->Storage.getKind() != PCHStorage::Kind::Empty);
-}
-
-llvm::ErrorOr<PrecompiledPreamble::TempPCHFile>
-PrecompiledPreamble::TempPCHFile::CreateNewPreamblePCHFile() {
- // FIXME: This is a hack so that we can override the preamble file during
- // crash-recovery testing, which is the only case where the preamble files
- // are not necessarily cleaned up.
- if (const char *TmpFile = ::getenv("CINDEXTEST_PREAMBLE_FILE"))
- return TempPCHFile(TmpFile);
-
- llvm::SmallString<64> File;
- // Using a version of createTemporaryFile with a file descriptor guarantees
- // that we would never get a race condition in a multi-threaded setting
- // (i.e., multiple threads getting the same temporary path).
- int FD;
- auto EC = llvm::sys::fs::createTemporaryFile("preamble", "pch", FD, File);
- if (EC)
- return EC;
- // We only needed to make sure the file exists, close the file right away.
- llvm::sys::Process::SafelyCloseFileDescriptor(FD);
- return TempPCHFile(std::string(std::move(File).str()));
-}
-
-PrecompiledPreamble::TempPCHFile::TempPCHFile(std::string FilePath)
- : FilePath(std::move(FilePath)) {
- TemporaryFiles::getInstance().addFile(*this->FilePath);
-}
-
-PrecompiledPreamble::TempPCHFile::TempPCHFile(TempPCHFile &&Other) {
- FilePath = std::move(Other.FilePath);
- Other.FilePath = None;
-}
-
-PrecompiledPreamble::TempPCHFile &PrecompiledPreamble::TempPCHFile::
-operator=(TempPCHFile &&Other) {
- RemoveFileIfPresent();
-
- FilePath = std::move(Other.FilePath);
- Other.FilePath = None;
- return *this;
-}
-
-PrecompiledPreamble::TempPCHFile::~TempPCHFile() { RemoveFileIfPresent(); }
-
-void PrecompiledPreamble::TempPCHFile::RemoveFileIfPresent() {
- if (FilePath) {
- TemporaryFiles::getInstance().removeFile(*FilePath);
- FilePath = None;
- }
-}
-
-llvm::StringRef PrecompiledPreamble::TempPCHFile::getFilePath() const {
- assert(FilePath && "TempPCHFile doesn't have a FilePath. Had it been moved?");
- return *FilePath;
-}
-
-PrecompiledPreamble::PCHStorage::PCHStorage(TempPCHFile File)
- : StorageKind(Kind::TempFile) {
- new (&asFile()) TempPCHFile(std::move(File));
-}
-
-PrecompiledPreamble::PCHStorage::PCHStorage(InMemoryPreamble Memory)
- : StorageKind(Kind::InMemory) {
- new (&asMemory()) InMemoryPreamble(std::move(Memory));
-}
-
-PrecompiledPreamble::PCHStorage::PCHStorage(PCHStorage &&Other) : PCHStorage() {
- *this = std::move(Other);
-}
-
-PrecompiledPreamble::PCHStorage &PrecompiledPreamble::PCHStorage::
-operator=(PCHStorage &&Other) {
- destroy();
-
- StorageKind = Other.StorageKind;
- switch (StorageKind) {
- case Kind::Empty:
- // do nothing;
- break;
- case Kind::TempFile:
- new (&asFile()) TempPCHFile(std::move(Other.asFile()));
- break;
- case Kind::InMemory:
- new (&asMemory()) InMemoryPreamble(std::move(Other.asMemory()));
- break;
- }
-
- Other.setEmpty();
- return *this;
-}
-
-PrecompiledPreamble::PCHStorage::~PCHStorage() { destroy(); }
-
-PrecompiledPreamble::PCHStorage::Kind
-PrecompiledPreamble::PCHStorage::getKind() const {
- return StorageKind;
-}
-
-PrecompiledPreamble::TempPCHFile &PrecompiledPreamble::PCHStorage::asFile() {
- assert(getKind() == Kind::TempFile);
- return *reinterpret_cast<TempPCHFile *>(&Storage);
-}
-
-const PrecompiledPreamble::TempPCHFile &
-PrecompiledPreamble::PCHStorage::asFile() const {
- return const_cast<PCHStorage *>(this)->asFile();
-}
-
-PrecompiledPreamble::InMemoryPreamble &
-PrecompiledPreamble::PCHStorage::asMemory() {
- assert(getKind() == Kind::InMemory);
- return *reinterpret_cast<InMemoryPreamble *>(&Storage);
-}
-
-const PrecompiledPreamble::InMemoryPreamble &
-PrecompiledPreamble::PCHStorage::asMemory() const {
- return const_cast<PCHStorage *>(this)->asMemory();
-}
-
-void PrecompiledPreamble::PCHStorage::destroy() {
- switch (StorageKind) {
- case Kind::Empty:
- return;
- case Kind::TempFile:
- asFile().~TempPCHFile();
- return;
- case Kind::InMemory:
- asMemory().~InMemoryPreamble();
- return;
- }
-}
-
-void PrecompiledPreamble::PCHStorage::setEmpty() {
- destroy();
- StorageKind = Kind::Empty;
+ assert(this->Storage != nullptr);
}
PrecompiledPreamble::PreambleFileHash
@@ -810,20 +765,23 @@ void PrecompiledPreamble::configurePreamble(
PreprocessorOpts.DisablePCHOrModuleValidation =
DisableValidationForModuleKind::PCH;
- setupPreambleStorage(Storage, PreprocessorOpts, VFS);
+ // Don't bother generating the long version of the predefines buffer.
+ // The preamble is going to overwrite it anyway.
+ PreprocessorOpts.UsePredefines = false;
+
+ setupPreambleStorage(*Storage, PreprocessorOpts, VFS);
}
void PrecompiledPreamble::setupPreambleStorage(
const PCHStorage &Storage, PreprocessorOptions &PreprocessorOpts,
IntrusiveRefCntPtr<llvm::vfs::FileSystem> &VFS) {
if (Storage.getKind() == PCHStorage::Kind::TempFile) {
- const TempPCHFile &PCHFile = Storage.asFile();
- PreprocessorOpts.ImplicitPCHInclude = std::string(PCHFile.getFilePath());
+ llvm::StringRef PCHPath = Storage.filePath();
+ PreprocessorOpts.ImplicitPCHInclude = PCHPath.str();
// Make sure we can access the PCH file even if we're using a VFS
IntrusiveRefCntPtr<llvm::vfs::FileSystem> RealFS =
llvm::vfs::getRealFileSystem();
- auto PCHPath = PCHFile.getFilePath();
if (VFS == RealFS || VFS->exists(PCHPath))
return;
auto Buf = RealFS->getBufferForFile(PCHPath);
@@ -844,7 +802,8 @@ void PrecompiledPreamble::setupPreambleStorage(
StringRef PCHPath = getInMemoryPreamblePath();
PreprocessorOpts.ImplicitPCHInclude = std::string(PCHPath);
- auto Buf = llvm::MemoryBuffer::getMemBuffer(Storage.asMemory().Data);
+ auto Buf = llvm::MemoryBuffer::getMemBuffer(
+ Storage.memoryContents(), PCHPath, /*RequiresNullTerminator=*/false);
VFS = createVFSOverlayForPreamblePCH(PCHPath, std::move(Buf), VFS);
}
}
diff --git a/contrib/llvm-project/clang/lib/Frontend/PrintPreprocessedOutput.cpp b/contrib/llvm-project/clang/lib/Frontend/PrintPreprocessedOutput.cpp
index 1d0022bda474..e3bd7178aefe 100644
--- a/contrib/llvm-project/clang/lib/Frontend/PrintPreprocessedOutput.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/PrintPreprocessedOutput.cpp
@@ -96,6 +96,7 @@ private:
bool UseLineDirectives;
bool IsFirstFileEntered;
bool MinimizeWhitespace;
+ bool DirectivesOnly;
Token PrevTok;
Token PrevPrevTok;
@@ -103,12 +104,13 @@ private:
public:
PrintPPOutputPPCallbacks(Preprocessor &pp, raw_ostream &os, bool lineMarkers,
bool defines, bool DumpIncludeDirectives,
- bool UseLineDirectives, bool MinimizeWhitespace)
+ bool UseLineDirectives, bool MinimizeWhitespace,
+ bool DirectivesOnly)
: PP(pp), SM(PP.getSourceManager()), ConcatInfo(PP), OS(os),
DisableLineMarkers(lineMarkers), DumpDefines(defines),
DumpIncludeDirectives(DumpIncludeDirectives),
UseLineDirectives(UseLineDirectives),
- MinimizeWhitespace(MinimizeWhitespace) {
+ MinimizeWhitespace(MinimizeWhitespace), DirectivesOnly(DirectivesOnly) {
CurLine = 0;
CurFilename += "<uninit>";
EmittedTokensOnThisLine = false;
@@ -143,9 +145,9 @@ public:
FileID PrevFID) override;
void InclusionDirective(SourceLocation HashLoc, const Token &IncludeTok,
StringRef FileName, bool IsAngled,
- CharSourceRange FilenameRange, const FileEntry *File,
- StringRef SearchPath, StringRef RelativePath,
- const Module *Imported,
+ CharSourceRange FilenameRange,
+ Optional<FileEntryRef> File, StringRef SearchPath,
+ StringRef RelativePath, const Module *Imported,
SrcMgr::CharacteristicKind FileType) override;
void Ident(SourceLocation Loc, StringRef str) override;
void PragmaMessage(SourceLocation Loc, StringRef Namespace,
@@ -189,7 +191,8 @@ public:
bool MoveToLine(const Token &Tok, bool RequireStartOfLine) {
PresumedLoc PLoc = SM.getPresumedLoc(Tok.getLocation());
unsigned TargetLine = PLoc.isValid() ? PLoc.getLine() : CurLine;
- bool IsFirstInFile = Tok.isAtStartOfLine() && PLoc.getLine() == 1;
+ bool IsFirstInFile =
+ Tok.isAtStartOfLine() && PLoc.isValid() && PLoc.getLine() == 1;
return MoveToLine(TargetLine, RequireStartOfLine) || IsFirstInFile;
}
@@ -391,7 +394,7 @@ void PrintPPOutputPPCallbacks::InclusionDirective(
StringRef FileName,
bool IsAngled,
CharSourceRange FilenameRange,
- const FileEntry *File,
+ Optional<FileEntryRef> File,
StringRef SearchPath,
StringRef RelativePath,
const Module *Imported,
@@ -466,12 +469,21 @@ void PrintPPOutputPPCallbacks::Ident(SourceLocation Loc, StringRef S) {
void PrintPPOutputPPCallbacks::MacroDefined(const Token &MacroNameTok,
const MacroDirective *MD) {
const MacroInfo *MI = MD->getMacroInfo();
- // Only print out macro definitions in -dD mode.
- if (!DumpDefines ||
+ // Print out macro definitions in -dD mode and when we have -fdirectives-only
+ // for C++20 header units.
+ if ((!DumpDefines && !DirectivesOnly) ||
// Ignore __FILE__ etc.
- MI->isBuiltinMacro()) return;
+ MI->isBuiltinMacro())
+ return;
- MoveToLine(MI->getDefinitionLoc(), /*RequireStartOfLine=*/true);
+ SourceLocation DefLoc = MI->getDefinitionLoc();
+ if (DirectivesOnly && !MI->isUsed()) {
+ SourceManager &SM = PP.getSourceManager();
+ if (SM.isWrittenInBuiltinFile(DefLoc) ||
+ SM.isWrittenInCommandLineFile(DefLoc))
+ return;
+ }
+ MoveToLine(DefLoc, /*RequireStartOfLine=*/true);
PrintMacroDefinition(*MacroNameTok.getIdentifierInfo(), *MI, PP, OS);
setEmittedDirectiveOnThisLine();
}
@@ -479,8 +491,10 @@ void PrintPPOutputPPCallbacks::MacroDefined(const Token &MacroNameTok,
void PrintPPOutputPPCallbacks::MacroUndefined(const Token &MacroNameTok,
const MacroDefinition &MD,
const MacroDirective *Undef) {
- // Only print out macro definitions in -dD mode.
- if (!DumpDefines) return;
+ // Print out macro definitions in -dD mode and when we have -fdirectives-only
+ // for C++20 header units.
+ if (!DumpDefines && !DirectivesOnly)
+ return;
MoveToLine(MacroNameTok.getLocation(), /*RequireStartOfLine=*/true);
OS << "#undef " << MacroNameTok.getIdentifierInfo()->getName();
@@ -958,7 +972,7 @@ void clang::DoPrintPreprocessedInput(Preprocessor &PP, raw_ostream *OS,
PrintPPOutputPPCallbacks *Callbacks = new PrintPPOutputPPCallbacks(
PP, *OS, !Opts.ShowLineMarkers, Opts.ShowMacros,
Opts.ShowIncludeDirectives, Opts.UseLineDirectives,
- Opts.MinimizeWhitespace);
+ Opts.MinimizeWhitespace, Opts.DirectivesOnly);
// Expand macros in pragmas with -fms-extensions. The assumption is that
// the majority of pragmas in such a file will be Microsoft pragmas.
@@ -994,6 +1008,8 @@ void clang::DoPrintPreprocessedInput(Preprocessor &PP, raw_ostream *OS,
// After we have configured the preprocessor, enter the main file.
PP.EnterMainSourceFile();
+ if (Opts.DirectivesOnly)
+ PP.SetMacroExpansionOnlyInDirectives();
// Consume all of the tokens that come from the predefines buffer. Those
// should not be emitted into the output and are guaranteed to be at the
diff --git a/contrib/llvm-project/clang/lib/Frontend/Rewrite/InclusionRewriter.cpp b/contrib/llvm-project/clang/lib/Frontend/Rewrite/InclusionRewriter.cpp
index 3e8d582f90c2..9141adec58c4 100644
--- a/contrib/llvm-project/clang/lib/Frontend/Rewrite/InclusionRewriter.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/Rewrite/InclusionRewriter.cpp
@@ -72,9 +72,9 @@ private:
SrcMgr::CharacteristicKind FileType) override;
void InclusionDirective(SourceLocation HashLoc, const Token &IncludeTok,
StringRef FileName, bool IsAngled,
- CharSourceRange FilenameRange, const FileEntry *File,
- StringRef SearchPath, StringRef RelativePath,
- const Module *Imported,
+ CharSourceRange FilenameRange,
+ Optional<FileEntryRef> File, StringRef SearchPath,
+ StringRef RelativePath, const Module *Imported,
SrcMgr::CharacteristicKind FileType) override;
void If(SourceLocation Loc, SourceRange ConditionRange,
ConditionValueKind ConditionValue) override;
@@ -186,7 +186,7 @@ void InclusionRewriter::InclusionDirective(SourceLocation HashLoc,
StringRef /*FileName*/,
bool /*IsAngled*/,
CharSourceRange /*FilenameRange*/,
- const FileEntry * /*File*/,
+ Optional<FileEntryRef> /*File*/,
StringRef /*SearchPath*/,
StringRef /*RelativePath*/,
const Module *Imported,
diff --git a/contrib/llvm-project/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp b/contrib/llvm-project/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp
index b4487f004715..2967bb3faa32 100644
--- a/contrib/llvm-project/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/Rewrite/RewriteModernObjC.cpp
@@ -24,6 +24,7 @@
#include "clang/Lex/Lexer.h"
#include "clang/Rewrite/Core/Rewriter.h"
#include "llvm/ADT/DenseSet.h"
+#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/MemoryBuffer.h"
@@ -600,7 +601,7 @@ namespace {
QualType StrType = Context->getConstantArrayType(
Context->CharTy, llvm::APInt(32, Str.size() + 1), nullptr,
ArrayType::Normal, 0);
- return StringLiteral::Create(*Context, Str, StringLiteral::Ascii,
+ return StringLiteral::Create(*Context, Str, StringLiteral::Ordinary,
/*Pascal=*/false, StrType, SourceLocation());
}
};
@@ -5356,16 +5357,15 @@ Stmt *RewriteModernObjC::SynthBlockInitExpr(BlockExpr *Exp,
Exp = new (Context) DeclRefExpr(*Context, FD, false, FD->getType(),
VK_LValue, SourceLocation());
bool isNestedCapturedVar = false;
- if (block)
- for (const auto &CI : block->captures()) {
- const VarDecl *variable = CI.getVariable();
- if (variable == ND && CI.isNested()) {
- assert (CI.isByRef() &&
- "SynthBlockInitExpr - captured block variable is not byref");
- isNestedCapturedVar = true;
- break;
- }
+ for (const auto &CI : block->captures()) {
+ const VarDecl *variable = CI.getVariable();
+ if (variable == ND && CI.isNested()) {
+ assert(CI.isByRef() &&
+ "SynthBlockInitExpr - captured block variable is not byref");
+ isNestedCapturedVar = true;
+ break;
}
+ }
// captured nested byref variable has its address passed. Do not take
// its address again.
if (!isNestedCapturedVar)
diff --git a/contrib/llvm-project/clang/lib/Frontend/Rewrite/RewriteObjC.cpp b/contrib/llvm-project/clang/lib/Frontend/Rewrite/RewriteObjC.cpp
index b2ecb42c43dd..520fbe940aa3 100644
--- a/contrib/llvm-project/clang/lib/Frontend/Rewrite/RewriteObjC.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/Rewrite/RewriteObjC.cpp
@@ -500,7 +500,7 @@ namespace {
QualType StrType = Context->getConstantArrayType(
Context->CharTy, llvm::APInt(32, Str.size() + 1), nullptr,
ArrayType::Normal, 0);
- return StringLiteral::Create(*Context, Str, StringLiteral::Ascii,
+ return StringLiteral::Create(*Context, Str, StringLiteral::Ordinary,
/*Pascal=*/false, StrType, SourceLocation());
}
};
diff --git a/contrib/llvm-project/clang/lib/Frontend/TextDiagnostic.cpp b/contrib/llvm-project/clang/lib/Frontend/TextDiagnostic.cpp
index 1c4a76e68953..6c0ea0cde358 100644
--- a/contrib/llvm-project/clang/lib/Frontend/TextDiagnostic.cpp
+++ b/contrib/llvm-project/clang/lib/Frontend/TextDiagnostic.cpp
@@ -798,8 +798,7 @@ void TextDiagnostic::emitDiagnosticLoc(FullSourceLoc Loc, PresumedLoc PLoc,
// At least print the file name if available:
FileID FID = Loc.getFileID();
if (FID.isValid()) {
- const FileEntry *FE = Loc.getFileEntry();
- if (FE && FE->isValid()) {
+ if (const FileEntry *FE = Loc.getFileEntry()) {
emitFilename(FE->getName(), Loc.getManager());
OS << ": ";
}
diff --git a/contrib/llvm-project/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp b/contrib/llvm-project/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
index 8a8a13743762..6927d2ed47aa 100644
--- a/contrib/llvm-project/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
+++ b/contrib/llvm-project/clang/lib/FrontendTool/ExecuteCompilerInvocation.cpp
@@ -15,6 +15,7 @@
#include "clang/CodeGen/CodeGenAction.h"
#include "clang/Config/config.h"
#include "clang/Driver/Options.h"
+#include "clang/ExtractAPI/FrontendActions.h"
#include "clang/Frontend/CompilerInstance.h"
#include "clang/Frontend/CompilerInvocation.h"
#include "clang/Frontend/FrontendActions.h"
@@ -66,6 +67,8 @@ CreateFrontendBaseAction(CompilerInstance &CI) {
return std::make_unique<GenerateModuleInterfaceAction>();
case GenerateHeaderModule:
return std::make_unique<GenerateHeaderModuleAction>();
+ case GenerateHeaderUnit:
+ return std::make_unique<GenerateHeaderUnitAction>();
case GeneratePCH: return std::make_unique<GeneratePCHAction>();
case GenerateInterfaceStubs:
return std::make_unique<GenerateInterfaceStubsAction>();
diff --git a/contrib/llvm-project/clang/lib/Headers/__clang_cuda_intrinsics.h b/contrib/llvm-project/clang/lib/Headers/__clang_cuda_intrinsics.h
index 3d2f5f40a435..cfd5eb869e34 100644
--- a/contrib/llvm-project/clang/lib/Headers/__clang_cuda_intrinsics.h
+++ b/contrib/llvm-project/clang/lib/Headers/__clang_cuda_intrinsics.h
@@ -234,7 +234,7 @@ inline __device__ unsigned int __match32_any_sync(unsigned int mask,
return __nvvm_match_any_sync_i32(mask, value);
}
-inline __device__ unsigned long long
+inline __device__ unsigned int
__match64_any_sync(unsigned int mask, unsigned long long value) {
return __nvvm_match_any_sync_i64(mask, value);
}
@@ -244,7 +244,7 @@ __match32_all_sync(unsigned int mask, unsigned int value, int *pred) {
return __nvvm_match_all_sync_i32p(mask, value, pred);
}
-inline __device__ unsigned long long
+inline __device__ unsigned int
__match64_all_sync(unsigned int mask, unsigned long long value, int *pred) {
return __nvvm_match_all_sync_i64p(mask, value, pred);
}
diff --git a/contrib/llvm-project/clang/lib/Headers/__wmmintrin_pclmul.h b/contrib/llvm-project/clang/lib/Headers/__wmmintrin_pclmul.h
index fef4b93dbb43..c9a6d50bdc89 100644
--- a/contrib/llvm-project/clang/lib/Headers/__wmmintrin_pclmul.h
+++ b/contrib/llvm-project/clang/lib/Headers/__wmmintrin_pclmul.h
@@ -22,23 +22,23 @@
/// \headerfile <x86intrin.h>
///
/// \code
-/// __m128i _mm_clmulepi64_si128(__m128i __X, __m128i __Y, const int __I);
+/// __m128i _mm_clmulepi64_si128(__m128i X, __m128i Y, const int I);
/// \endcode
///
/// This intrinsic corresponds to the <c> VPCLMULQDQ </c> instruction.
///
-/// \param __X
+/// \param X
/// A 128-bit vector of [2 x i64] containing one of the source operands.
-/// \param __Y
+/// \param Y
/// A 128-bit vector of [2 x i64] containing one of the source operands.
-/// \param __I
+/// \param I
/// An immediate value specifying which 64-bit values to select from the
-/// operands. Bit 0 is used to select a value from operand \a __X, and bit
-/// 4 is used to select a value from operand \a __Y: \n
-/// Bit[0]=0 indicates that bits[63:0] of operand \a __X are used. \n
-/// Bit[0]=1 indicates that bits[127:64] of operand \a __X are used. \n
-/// Bit[4]=0 indicates that bits[63:0] of operand \a __Y are used. \n
-/// Bit[4]=1 indicates that bits[127:64] of operand \a __Y are used.
+/// operands. Bit 0 is used to select a value from operand \a X, and bit
+/// 4 is used to select a value from operand \a Y: \n
+/// Bit[0]=0 indicates that bits[63:0] of operand \a X are used. \n
+/// Bit[0]=1 indicates that bits[127:64] of operand \a X are used. \n
+/// Bit[4]=0 indicates that bits[63:0] of operand \a Y are used. \n
+/// Bit[4]=1 indicates that bits[127:64] of operand \a Y are used.
/// \returns The 128-bit integer vector containing the result of the carry-less
/// multiplication of the selected 64-bit values.
#define _mm_clmulepi64_si128(X, Y, I) \
diff --git a/contrib/llvm-project/clang/lib/Headers/altivec.h b/contrib/llvm-project/clang/lib/Headers/altivec.h
index 55195b0781fb..0b1e76e81cc7 100644
--- a/contrib/llvm-project/clang/lib/Headers/altivec.h
+++ b/contrib/llvm-project/clang/lib/Headers/altivec.h
@@ -311,7 +311,7 @@ vec_add(vector unsigned __int128 __a, vector unsigned __int128 __b) {
static __inline__ vector unsigned char __attribute__((__always_inline__))
vec_add_u128(vector unsigned char __a, vector unsigned char __b) {
- return __builtin_altivec_vadduqm(__a, __b);
+ return (vector unsigned char)__builtin_altivec_vadduqm(__a, __b);
}
#elif defined(__VSX__)
static __inline__ vector signed long long __ATTRS_o_ai
@@ -325,9 +325,9 @@ vec_add(vector signed long long __a, vector signed long long __b) {
(vector unsigned int)__a + (vector unsigned int)__b;
vector unsigned int __carry = __builtin_altivec_vaddcuw(
(vector unsigned int)__a, (vector unsigned int)__b);
- __carry = __builtin_shufflevector((vector unsigned char)__carry,
- (vector unsigned char)__carry, 0, 0, 0, 7,
- 0, 0, 0, 0, 0, 0, 0, 15, 0, 0, 0, 0);
+ __carry = (vector unsigned int)__builtin_shufflevector(
+ (vector unsigned char)__carry, (vector unsigned char)__carry, 0, 0, 0, 7,
+ 0, 0, 0, 0, 0, 0, 0, 15, 0, 0, 0, 0);
return (vector signed long long)(__res + __carry);
#endif
}
@@ -358,7 +358,9 @@ static __inline__ vector double __ATTRS_o_ai vec_add(vector double __a,
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_adde(vector signed __int128 __a, vector signed __int128 __b,
vector signed __int128 __c) {
- return __builtin_altivec_vaddeuqm(__a, __b, __c);
+ return (vector signed __int128)__builtin_altivec_vaddeuqm(
+ (vector unsigned __int128)__a, (vector unsigned __int128)__b,
+ (vector unsigned __int128)__c);
}
static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -371,7 +373,9 @@ vec_adde(vector unsigned __int128 __a, vector unsigned __int128 __b,
static __inline__ vector unsigned char __attribute__((__always_inline__))
vec_adde_u128(vector unsigned char __a, vector unsigned char __b,
vector unsigned char __c) {
- return (vector unsigned char)__builtin_altivec_vaddeuqm(__a, __b, __c);
+ return (vector unsigned char)__builtin_altivec_vaddeuqm_c(
+ (vector unsigned char)__a, (vector unsigned char)__b,
+ (vector unsigned char)__c);
}
#endif
@@ -398,7 +402,9 @@ vec_adde(vector unsigned int __a, vector unsigned int __b,
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_addec(vector signed __int128 __a, vector signed __int128 __b,
vector signed __int128 __c) {
- return __builtin_altivec_vaddecuq(__a, __b, __c);
+ return (vector signed __int128)__builtin_altivec_vaddecuq(
+ (vector unsigned __int128)__a, (vector unsigned __int128)__b,
+ (vector unsigned __int128)__c);
}
static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -411,7 +417,9 @@ vec_addec(vector unsigned __int128 __a, vector unsigned __int128 __b,
static __inline__ vector unsigned char __attribute__((__always_inline__))
vec_addec_u128(vector unsigned char __a, vector unsigned char __b,
vector unsigned char __c) {
- return (vector unsigned char)__builtin_altivec_vaddecuq(__a, __b, __c);
+ return (vector unsigned char)__builtin_altivec_vaddecuq_c(
+ (vector unsigned char)__a, (vector unsigned char)__b,
+ (vector unsigned char)__c);
}
#ifdef __powerpc64__
@@ -600,7 +608,8 @@ vec_addc(vector unsigned __int128 __a, vector unsigned __int128 __b) {
static __inline__ vector unsigned char __attribute__((__always_inline__))
vec_addc_u128(vector unsigned char __a, vector unsigned char __b) {
- return (vector unsigned char)__builtin_altivec_vaddcuq(__a, __b);
+ return (vector unsigned char)__builtin_altivec_vaddcuq_c(
+ (vector unsigned char)__a, (vector unsigned char)__b);
}
#endif // defined(__POWER8_VECTOR__) && defined(__powerpc64__)
@@ -824,7 +833,9 @@ vec_vadduqm(vector unsigned __int128 __a, vector unsigned __int128 __b) {
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_vaddeuqm(vector signed __int128 __a, vector signed __int128 __b,
vector signed __int128 __c) {
- return __builtin_altivec_vaddeuqm(__a, __b, __c);
+ return (vector signed __int128)__builtin_altivec_vaddeuqm(
+ (vector unsigned __int128)__a, (vector unsigned __int128)__b,
+ (vector unsigned __int128)__c);
}
static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -837,7 +848,8 @@ vec_vaddeuqm(vector unsigned __int128 __a, vector unsigned __int128 __b,
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_vaddcuq(vector signed __int128 __a, vector signed __int128 __b) {
- return __builtin_altivec_vaddcuq(__a, __b);
+ return (vector signed __int128)__builtin_altivec_vaddcuq(
+ (vector unsigned __int128)__a, (vector unsigned __int128)__b);
}
static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -850,7 +862,9 @@ vec_vaddcuq(vector unsigned __int128 __a, vector unsigned __int128 __b) {
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_vaddecuq(vector signed __int128 __a, vector signed __int128 __b,
vector signed __int128 __c) {
- return __builtin_altivec_vaddecuq(__a, __b, __c);
+ return (vector signed __int128)__builtin_altivec_vaddecuq(
+ (vector unsigned __int128)__a, (vector unsigned __int128)__b,
+ (vector unsigned __int128)__c);
}
static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -1806,18 +1820,19 @@ vec_cmpeq(vector double __a, vector double __b) {
static __inline__ vector bool __int128 __ATTRS_o_ai
vec_cmpeq(vector signed __int128 __a, vector signed __int128 __b) {
return (vector bool __int128)__builtin_altivec_vcmpequq(
- (vector bool __int128)__a, (vector bool __int128)__b);
+ (vector unsigned __int128)__a, (vector unsigned __int128)__b);
}
static __inline__ vector bool __int128 __ATTRS_o_ai
vec_cmpeq(vector unsigned __int128 __a, vector unsigned __int128 __b) {
return (vector bool __int128)__builtin_altivec_vcmpequq(
- (vector bool __int128)__a, (vector bool __int128)__b);
+ (vector unsigned __int128)__a, (vector unsigned __int128)__b);
}
static __inline__ vector bool __int128 __ATTRS_o_ai
vec_cmpeq(vector bool __int128 __a, vector bool __int128 __b) {
- return (vector bool __int128)__builtin_altivec_vcmpequq(__a, __b);
+ return (vector bool __int128)__builtin_altivec_vcmpequq(
+ (vector unsigned __int128)__a, (vector unsigned __int128)__b);
}
#endif
@@ -1887,19 +1902,20 @@ vec_cmpne(vector float __a, vector float __b) {
#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
static __inline__ vector bool __int128 __ATTRS_o_ai
vec_cmpne(vector unsigned __int128 __a, vector unsigned __int128 __b) {
- return (vector bool __int128) ~(__builtin_altivec_vcmpequq(
- (vector bool __int128)__a, (vector bool __int128)__b));
+ return (vector bool __int128)~(__builtin_altivec_vcmpequq(
+ (vector unsigned __int128)__a, (vector unsigned __int128)__b));
}
static __inline__ vector bool __int128 __ATTRS_o_ai
vec_cmpne(vector signed __int128 __a, vector signed __int128 __b) {
- return (vector bool __int128) ~(__builtin_altivec_vcmpequq(
- (vector bool __int128)__a, (vector bool __int128)__b));
+ return (vector bool __int128)~(__builtin_altivec_vcmpequq(
+ (vector unsigned __int128)__a, (vector unsigned __int128)__b));
}
static __inline__ vector bool __int128 __ATTRS_o_ai
vec_cmpne(vector bool __int128 __a, vector bool __int128 __b) {
- return (vector bool __int128) ~(__builtin_altivec_vcmpequq(__a, __b));
+ return (vector bool __int128)~(__builtin_altivec_vcmpequq(
+ (vector unsigned __int128)__a, (vector unsigned __int128)__b));
}
#endif
@@ -1944,16 +1960,16 @@ vec_cmpnez(vector unsigned int __a, vector unsigned int __b) {
static __inline__ signed int __ATTRS_o_ai
vec_cntlz_lsbb(vector signed char __a) {
#ifdef __LITTLE_ENDIAN__
- return __builtin_altivec_vctzlsbb(__a);
+ return __builtin_altivec_vctzlsbb((vector unsigned char)__a);
#else
- return __builtin_altivec_vclzlsbb(__a);
+ return __builtin_altivec_vclzlsbb((vector unsigned char)__a);
#endif
}
static __inline__ signed int __ATTRS_o_ai
vec_cntlz_lsbb(vector unsigned char __a) {
#ifdef __LITTLE_ENDIAN__
- return __builtin_altivec_vctzlsbb(__a);
+ return __builtin_altivec_vctzlsbb((vector unsigned char)__a);
#else
return __builtin_altivec_vclzlsbb(__a);
#endif
@@ -1962,9 +1978,9 @@ vec_cntlz_lsbb(vector unsigned char __a) {
static __inline__ signed int __ATTRS_o_ai
vec_cnttz_lsbb(vector signed char __a) {
#ifdef __LITTLE_ENDIAN__
- return __builtin_altivec_vclzlsbb(__a);
+ return __builtin_altivec_vclzlsbb((vector unsigned char)__a);
#else
- return __builtin_altivec_vctzlsbb(__a);
+ return __builtin_altivec_vctzlsbb((vector unsigned char)__a);
#endif
}
@@ -1984,7 +2000,7 @@ vec_parity_lsbb(vector unsigned int __a) {
static __inline__ vector unsigned int __ATTRS_o_ai
vec_parity_lsbb(vector signed int __a) {
- return __builtin_altivec_vprtybw(__a);
+ return __builtin_altivec_vprtybw((vector unsigned int)__a);
}
#ifdef __SIZEOF_INT128__
@@ -1995,7 +2011,7 @@ vec_parity_lsbb(vector unsigned __int128 __a) {
static __inline__ vector unsigned __int128 __ATTRS_o_ai
vec_parity_lsbb(vector signed __int128 __a) {
- return __builtin_altivec_vprtybq(__a);
+ return __builtin_altivec_vprtybq((vector unsigned __int128)__a);
}
#endif
@@ -2006,7 +2022,7 @@ vec_parity_lsbb(vector unsigned long long __a) {
static __inline__ vector unsigned long long __ATTRS_o_ai
vec_parity_lsbb(vector signed long long __a) {
- return __builtin_altivec_vprtybd(__a);
+ return __builtin_altivec_vprtybd((vector unsigned long long)__a);
}
#else
@@ -2212,14 +2228,12 @@ vec_cmpgt(vector double __a, vector double __b) {
#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
static __inline__ vector bool __int128 __ATTRS_o_ai
vec_cmpgt(vector signed __int128 __a, vector signed __int128 __b) {
- return (vector bool __int128)__builtin_altivec_vcmpgtsq(
- (vector bool __int128)__a, (vector bool __int128)__b);
+ return (vector bool __int128)__builtin_altivec_vcmpgtsq(__a, __b);
}
static __inline__ vector bool __int128 __ATTRS_o_ai
vec_cmpgt(vector unsigned __int128 __a, vector unsigned __int128 __b) {
- return (vector bool __int128)__builtin_altivec_vcmpgtuq(
- (vector bool __int128)__a, (vector bool __int128)__b);
+ return (vector bool __int128)__builtin_altivec_vcmpgtuq(__a, __b);
}
#endif
@@ -2488,7 +2502,8 @@ vec_cmplt(vector unsigned long long __a, vector unsigned long long __b) {
static __inline__ vector unsigned char __ATTRS_o_ai
vec_popcnt(vector signed char __a) {
- return __builtin_altivec_vpopcntb(__a);
+ return (vector unsigned char)__builtin_altivec_vpopcntb(
+ (vector unsigned char)__a);
}
static __inline__ vector unsigned char __ATTRS_o_ai
vec_popcnt(vector unsigned char __a) {
@@ -2496,7 +2511,8 @@ vec_popcnt(vector unsigned char __a) {
}
static __inline__ vector unsigned short __ATTRS_o_ai
vec_popcnt(vector signed short __a) {
- return __builtin_altivec_vpopcnth(__a);
+ return (vector unsigned short)__builtin_altivec_vpopcnth(
+ (vector unsigned short)__a);
}
static __inline__ vector unsigned short __ATTRS_o_ai
vec_popcnt(vector unsigned short __a) {
@@ -2504,7 +2520,7 @@ vec_popcnt(vector unsigned short __a) {
}
static __inline__ vector unsigned int __ATTRS_o_ai
vec_popcnt(vector signed int __a) {
- return __builtin_altivec_vpopcntw(__a);
+ return __builtin_altivec_vpopcntw((vector unsigned int)__a);
}
static __inline__ vector unsigned int __ATTRS_o_ai
vec_popcnt(vector unsigned int __a) {
@@ -2512,7 +2528,7 @@ vec_popcnt(vector unsigned int __a) {
}
static __inline__ vector unsigned long long __ATTRS_o_ai
vec_popcnt(vector signed long long __a) {
- return __builtin_altivec_vpopcntd(__a);
+ return __builtin_altivec_vpopcntd((vector unsigned long long)__a);
}
static __inline__ vector unsigned long long __ATTRS_o_ai
vec_popcnt(vector unsigned long long __a) {
@@ -2524,7 +2540,7 @@ vec_popcnt(vector unsigned long long __a) {
static __inline__ vector signed char __ATTRS_o_ai
vec_cntlz(vector signed char __a) {
- return __builtin_altivec_vclzb(__a);
+ return (vector signed char)__builtin_altivec_vclzb((vector unsigned char)__a);
}
static __inline__ vector unsigned char __ATTRS_o_ai
vec_cntlz(vector unsigned char __a) {
@@ -2532,7 +2548,8 @@ vec_cntlz(vector unsigned char __a) {
}
static __inline__ vector signed short __ATTRS_o_ai
vec_cntlz(vector signed short __a) {
- return __builtin_altivec_vclzh(__a);
+ return (vector signed short)__builtin_altivec_vclzh(
+ (vector unsigned short)__a);
}
static __inline__ vector unsigned short __ATTRS_o_ai
vec_cntlz(vector unsigned short __a) {
@@ -2540,7 +2557,7 @@ vec_cntlz(vector unsigned short __a) {
}
static __inline__ vector signed int __ATTRS_o_ai
vec_cntlz(vector signed int __a) {
- return __builtin_altivec_vclzw(__a);
+ return (vector signed int)__builtin_altivec_vclzw((vector unsigned int)__a);
}
static __inline__ vector unsigned int __ATTRS_o_ai
vec_cntlz(vector unsigned int __a) {
@@ -2548,7 +2565,8 @@ vec_cntlz(vector unsigned int __a) {
}
static __inline__ vector signed long long __ATTRS_o_ai
vec_cntlz(vector signed long long __a) {
- return __builtin_altivec_vclzd(__a);
+ return (vector signed long long)__builtin_altivec_vclzd(
+ (vector unsigned long long)__a);
}
static __inline__ vector unsigned long long __ATTRS_o_ai
vec_cntlz(vector unsigned long long __a) {
@@ -2562,7 +2580,7 @@ vec_cntlz(vector unsigned long long __a) {
static __inline__ vector signed char __ATTRS_o_ai
vec_cnttz(vector signed char __a) {
- return __builtin_altivec_vctzb(__a);
+ return (vector signed char)__builtin_altivec_vctzb((vector unsigned char)__a);
}
static __inline__ vector unsigned char __ATTRS_o_ai
vec_cnttz(vector unsigned char __a) {
@@ -2570,7 +2588,8 @@ vec_cnttz(vector unsigned char __a) {
}
static __inline__ vector signed short __ATTRS_o_ai
vec_cnttz(vector signed short __a) {
- return __builtin_altivec_vctzh(__a);
+ return (vector signed short)__builtin_altivec_vctzh(
+ (vector unsigned short)__a);
}
static __inline__ vector unsigned short __ATTRS_o_ai
vec_cnttz(vector unsigned short __a) {
@@ -2578,7 +2597,7 @@ vec_cnttz(vector unsigned short __a) {
}
static __inline__ vector signed int __ATTRS_o_ai
vec_cnttz(vector signed int __a) {
- return __builtin_altivec_vctzw(__a);
+ return (vector signed int)__builtin_altivec_vctzw((vector unsigned int)__a);
}
static __inline__ vector unsigned int __ATTRS_o_ai
vec_cnttz(vector unsigned int __a) {
@@ -2586,7 +2605,8 @@ vec_cnttz(vector unsigned int __a) {
}
static __inline__ vector signed long long __ATTRS_o_ai
vec_cnttz(vector signed long long __a) {
- return __builtin_altivec_vctzd(__a);
+ return (vector signed long long)__builtin_altivec_vctzd(
+ (vector unsigned long long)__a);
}
static __inline__ vector unsigned long long __ATTRS_o_ai
vec_cnttz(vector unsigned long long __a) {
@@ -3144,7 +3164,8 @@ static __inline__ void __ATTRS_o_ai vec_xst_len_r(vector unsigned char __a,
vector unsigned char __mask =
(vector unsigned char)__builtin_altivec_lvsl(16 - __c, (int *)NULL);
vector unsigned char __res =
- __builtin_altivec_vperm_4si((vector int)__a, (vector int)__a, __mask);
+ (vector unsigned char)__builtin_altivec_vperm_4si(
+ (vector int)__a, (vector int)__a, __mask);
return __builtin_vsx_stxvll((vector int)__res, __b, (__c << 56));
}
#endif
@@ -3181,34 +3202,40 @@ static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a,
// the XL-compatible signatures are used for those functions.
#ifdef __XL_COMPAT_ALTIVEC__
#define vec_ctf(__a, __b) \
- _Generic((__a), vector int \
- : (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)), \
- vector unsigned int \
- : (vector float)__builtin_altivec_vcfux((vector unsigned int)(__a), \
- (__b)), \
- vector unsigned long long \
- : (__builtin_vsx_xvcvuxdsp((vector unsigned long long)(__a)) * \
- (vector float)(vector unsigned)((0x7f - (__b)) << 23)), \
- vector signed long long \
- : (__builtin_vsx_xvcvsxdsp((vector signed long long)(__a)) * \
- (vector float)(vector unsigned)((0x7f - (__b)) << 23)))
+ _Generic( \
+ (__a), vector int \
+ : (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)), \
+ vector unsigned int \
+ : (vector float)__builtin_altivec_vcfux((vector unsigned int)(__a), \
+ (__b)), \
+ vector unsigned long long \
+ : (vector float)(__builtin_vsx_xvcvuxdsp( \
+ (vector unsigned long long)(__a)) * \
+ (vector float)(vector unsigned)((0x7f - (__b)) << 23)), \
+ vector signed long long \
+ : (vector float)(__builtin_vsx_xvcvsxdsp( \
+ (vector signed long long)(__a)) * \
+ (vector float)(vector unsigned)((0x7f - (__b)) << 23)))
#else // __XL_COMPAT_ALTIVEC__
#define vec_ctf(__a, __b) \
- _Generic((__a), vector int \
- : (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)), \
- vector unsigned int \
- : (vector float)__builtin_altivec_vcfux((vector unsigned int)(__a), \
- (__b)), \
- vector unsigned long long \
- : (__builtin_convertvector((vector unsigned long long)(__a), \
- vector double) * \
- (vector double)(vector unsigned long long)((0x3ffULL - (__b)) \
- << 52)), \
- vector signed long long \
- : (__builtin_convertvector((vector signed long long)(__a), \
- vector double) * \
- (vector double)(vector unsigned long long)((0x3ffULL - (__b)) \
- << 52)))
+ _Generic( \
+ (__a), vector int \
+ : (vector float)__builtin_altivec_vcfsx((vector int)(__a), (__b)), \
+ vector unsigned int \
+ : (vector float)__builtin_altivec_vcfux((vector unsigned int)(__a), \
+ (__b)), \
+ vector unsigned long long \
+ : (vector float)(__builtin_convertvector( \
+ (vector unsigned long long)(__a), vector double) * \
+ (vector double)(vector unsigned long long)((0x3ffULL - \
+ (__b)) \
+ << 52)), \
+ vector signed long long \
+ : (vector float)(__builtin_convertvector((vector signed long long)(__a), \
+ vector double) * \
+ (vector double)(vector unsigned long long)((0x3ffULL - \
+ (__b)) \
+ << 52)))
#endif // __XL_COMPAT_ALTIVEC__
#else
#define vec_ctf(__a, __b) \
@@ -3255,26 +3282,29 @@ static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a,
#ifdef __XL_COMPAT_ALTIVEC__
#define vec_cts(__a, __b) \
_Generic((__a), vector float \
- : __builtin_altivec_vctsxs((vector float)(__a), (__b)), \
+ : (vector signed int)__builtin_altivec_vctsxs((vector float)(__a), \
+ (__b)), \
vector double \
: __extension__({ \
vector double __ret = \
(vector double)(__a) * \
(vector double)(vector unsigned long long)((0x3ffULL + (__b)) \
<< 52); \
- __builtin_vsx_xvcvdpsxws(__ret); \
+ (vector signed long long)__builtin_vsx_xvcvdpsxws(__ret); \
}))
#else // __XL_COMPAT_ALTIVEC__
#define vec_cts(__a, __b) \
_Generic((__a), vector float \
- : __builtin_altivec_vctsxs((vector float)(__a), (__b)), \
+ : (vector signed int)__builtin_altivec_vctsxs((vector float)(__a), \
+ (__b)), \
vector double \
: __extension__({ \
vector double __ret = \
(vector double)(__a) * \
(vector double)(vector unsigned long long)((0x3ffULL + (__b)) \
<< 52); \
- __builtin_convertvector(__ret, vector signed long long); \
+ (vector signed long long)__builtin_convertvector( \
+ __ret, vector signed long long); \
}))
#endif // __XL_COMPAT_ALTIVEC__
#else
@@ -3291,26 +3321,29 @@ static __inline__ vector double __ATTRS_o_ai vec_cpsgn(vector double __a,
#ifdef __XL_COMPAT_ALTIVEC__
#define vec_ctu(__a, __b) \
_Generic((__a), vector float \
- : __builtin_altivec_vctuxs((vector float)(__a), (__b)), \
+ : (vector unsigned int)__builtin_altivec_vctuxs( \
+ (vector float)(__a), (__b)), \
vector double \
: __extension__({ \
vector double __ret = \
(vector double)(__a) * \
(vector double)(vector unsigned long long)((0x3ffULL + __b) \
<< 52); \
- __builtin_vsx_xvcvdpuxws(__ret); \
+ (vector unsigned long long)__builtin_vsx_xvcvdpuxws(__ret); \
}))
#else // __XL_COMPAT_ALTIVEC__
#define vec_ctu(__a, __b) \
_Generic((__a), vector float \
- : __builtin_altivec_vctuxs((vector float)(__a), (__b)), \
+ : (vector unsigned int)__builtin_altivec_vctuxs( \
+ (vector float)(__a), (__b)), \
vector double \
: __extension__({ \
vector double __ret = \
(vector double)(__a) * \
(vector double)(vector unsigned long long)((0x3ffULL + __b) \
<< 52); \
- __builtin_convertvector(__ret, vector unsigned long long); \
+ (vector unsigned long long)__builtin_convertvector( \
+ __ret, vector unsigned long long); \
}))
#endif // __XL_COMPAT_ALTIVEC__
#else
@@ -6491,12 +6524,12 @@ vec_nand(vector signed char __a, vector signed char __b) {
static __inline__ vector signed char __ATTRS_o_ai
vec_nand(vector signed char __a, vector bool char __b) {
- return ~(__a & __b);
+ return ~(__a & (vector signed char)__b);
}
static __inline__ vector signed char __ATTRS_o_ai
vec_nand(vector bool char __a, vector signed char __b) {
- return ~(__a & __b);
+ return (vector signed char)~(__a & (vector bool char)__b);
}
static __inline__ vector unsigned char __ATTRS_o_ai
@@ -6506,12 +6539,12 @@ vec_nand(vector unsigned char __a, vector unsigned char __b) {
static __inline__ vector unsigned char __ATTRS_o_ai
vec_nand(vector unsigned char __a, vector bool char __b) {
- return ~(__a & __b);
+ return ~(__a & (vector unsigned char)__b);
}
static __inline__ vector unsigned char __ATTRS_o_ai
vec_nand(vector bool char __a, vector unsigned char __b) {
- return ~(__a & __b);
+ return (vector unsigned char)~(__a & (vector bool char)__b);
}
static __inline__ vector bool char __ATTRS_o_ai vec_nand(vector bool char __a,
@@ -6526,12 +6559,12 @@ vec_nand(vector signed short __a, vector signed short __b) {
static __inline__ vector signed short __ATTRS_o_ai
vec_nand(vector signed short __a, vector bool short __b) {
- return ~(__a & __b);
+ return ~(__a & (vector signed short)__b);
}
static __inline__ vector signed short __ATTRS_o_ai
vec_nand(vector bool short __a, vector signed short __b) {
- return ~(__a & __b);
+ return (vector signed short)~(__a & (vector bool short)__b);
}
static __inline__ vector unsigned short __ATTRS_o_ai
@@ -6541,7 +6574,7 @@ vec_nand(vector unsigned short __a, vector unsigned short __b) {
static __inline__ vector unsigned short __ATTRS_o_ai
vec_nand(vector unsigned short __a, vector bool short __b) {
- return ~(__a & __b);
+ return ~(__a & (vector unsigned short)__b);
}
static __inline__ vector bool short __ATTRS_o_ai
@@ -6556,12 +6589,12 @@ vec_nand(vector signed int __a, vector signed int __b) {
static __inline__ vector signed int __ATTRS_o_ai vec_nand(vector signed int __a,
vector bool int __b) {
- return ~(__a & __b);
+ return ~(__a & (vector signed int)__b);
}
static __inline__ vector signed int __ATTRS_o_ai
vec_nand(vector bool int __a, vector signed int __b) {
- return ~(__a & __b);
+ return (vector signed int)~(__a & (vector bool int)__b);
}
static __inline__ vector unsigned int __ATTRS_o_ai
@@ -6571,12 +6604,12 @@ vec_nand(vector unsigned int __a, vector unsigned int __b) {
static __inline__ vector unsigned int __ATTRS_o_ai
vec_nand(vector unsigned int __a, vector bool int __b) {
- return ~(__a & __b);
+ return ~(__a & (vector unsigned int)__b);
}
static __inline__ vector unsigned int __ATTRS_o_ai
vec_nand(vector bool int __a, vector unsigned int __b) {
- return ~(__a & __b);
+ return (vector unsigned int)~(__a & (vector bool int)__b);
}
static __inline__ vector bool int __ATTRS_o_ai vec_nand(vector bool int __a,
@@ -6597,12 +6630,12 @@ vec_nand(vector signed long long __a, vector signed long long __b) {
static __inline__ vector signed long long __ATTRS_o_ai
vec_nand(vector signed long long __a, vector bool long long __b) {
- return ~(__a & __b);
+ return ~(__a & (vector signed long long)__b);
}
static __inline__ vector signed long long __ATTRS_o_ai
vec_nand(vector bool long long __a, vector signed long long __b) {
- return ~(__a & __b);
+ return (vector signed long long)~(__a & (vector bool long long)__b);
}
static __inline__ vector unsigned long long __ATTRS_o_ai
@@ -6612,12 +6645,12 @@ vec_nand(vector unsigned long long __a, vector unsigned long long __b) {
static __inline__ vector unsigned long long __ATTRS_o_ai
vec_nand(vector unsigned long long __a, vector bool long long __b) {
- return ~(__a & __b);
+ return ~(__a & (vector unsigned long long)__b);
}
static __inline__ vector unsigned long long __ATTRS_o_ai
vec_nand(vector bool long long __a, vector unsigned long long __b) {
- return ~(__a & __b);
+ return (vector unsigned long long)~(__a & (vector bool long long)__b);
}
static __inline__ vector bool long long __ATTRS_o_ai
@@ -7005,12 +7038,12 @@ vec_orc(vector signed char __a, vector signed char __b) {
static __inline__ vector signed char __ATTRS_o_ai
vec_orc(vector signed char __a, vector bool char __b) {
- return __a | ~__b;
+ return __a | (vector signed char)~__b;
}
static __inline__ vector signed char __ATTRS_o_ai
vec_orc(vector bool char __a, vector signed char __b) {
- return __a | ~__b;
+ return (vector signed char)(__a | (vector bool char)~__b);
}
static __inline__ vector unsigned char __ATTRS_o_ai
@@ -7020,12 +7053,12 @@ vec_orc(vector unsigned char __a, vector unsigned char __b) {
static __inline__ vector unsigned char __ATTRS_o_ai
vec_orc(vector unsigned char __a, vector bool char __b) {
- return __a | ~__b;
+ return __a | (vector unsigned char)~__b;
}
static __inline__ vector unsigned char __ATTRS_o_ai
vec_orc(vector bool char __a, vector unsigned char __b) {
- return __a | ~__b;
+ return (vector unsigned char)(__a | (vector bool char)~__b);
}
static __inline__ vector bool char __ATTRS_o_ai vec_orc(vector bool char __a,
@@ -7040,12 +7073,12 @@ vec_orc(vector signed short __a, vector signed short __b) {
static __inline__ vector signed short __ATTRS_o_ai
vec_orc(vector signed short __a, vector bool short __b) {
- return __a | ~__b;
+ return __a | (vector signed short)~__b;
}
static __inline__ vector signed short __ATTRS_o_ai
vec_orc(vector bool short __a, vector signed short __b) {
- return __a | ~__b;
+ return (vector signed short)(__a | (vector bool short)~__b);
}
static __inline__ vector unsigned short __ATTRS_o_ai
@@ -7055,12 +7088,12 @@ vec_orc(vector unsigned short __a, vector unsigned short __b) {
static __inline__ vector unsigned short __ATTRS_o_ai
vec_orc(vector unsigned short __a, vector bool short __b) {
- return __a | ~__b;
+ return __a | (vector unsigned short)~__b;
}
static __inline__ vector unsigned short __ATTRS_o_ai
vec_orc(vector bool short __a, vector unsigned short __b) {
- return __a | ~__b;
+ return (vector unsigned short)(__a | (vector bool short)~__b);
}
static __inline__ vector bool short __ATTRS_o_ai
@@ -7075,12 +7108,12 @@ vec_orc(vector signed int __a, vector signed int __b) {
static __inline__ vector signed int __ATTRS_o_ai vec_orc(vector signed int __a,
vector bool int __b) {
- return __a | ~__b;
+ return __a | (vector signed int)~__b;
}
static __inline__ vector signed int __ATTRS_o_ai
vec_orc(vector bool int __a, vector signed int __b) {
- return __a | ~__b;
+ return (vector signed int)(__a | (vector bool int)~__b);
}
static __inline__ vector unsigned int __ATTRS_o_ai
@@ -7090,12 +7123,12 @@ vec_orc(vector unsigned int __a, vector unsigned int __b) {
static __inline__ vector unsigned int __ATTRS_o_ai
vec_orc(vector unsigned int __a, vector bool int __b) {
- return __a | ~__b;
+ return __a | (vector unsigned int)~__b;
}
static __inline__ vector unsigned int __ATTRS_o_ai
vec_orc(vector bool int __a, vector unsigned int __b) {
- return __a | ~__b;
+ return (vector unsigned int)(__a | (vector bool int)~__b);
}
static __inline__ vector bool int __ATTRS_o_ai vec_orc(vector bool int __a,
@@ -7105,12 +7138,12 @@ static __inline__ vector bool int __ATTRS_o_ai vec_orc(vector bool int __a,
static __inline__ vector float __ATTRS_o_ai
vec_orc(vector bool int __a, vector float __b) {
- return (vector float)(__a | ~(vector unsigned int)__b);
+ return (vector float)(__a | ~(vector bool int)__b);
}
static __inline__ vector float __ATTRS_o_ai
vec_orc(vector float __a, vector bool int __b) {
- return (vector float)((vector unsigned int)__a | ~__b);
+ return (vector float)((vector bool int)__a | ~__b);
}
static __inline__ vector float __ATTRS_o_ai vec_orc(vector float __a,
@@ -7125,12 +7158,12 @@ vec_orc(vector signed long long __a, vector signed long long __b) {
static __inline__ vector signed long long __ATTRS_o_ai
vec_orc(vector signed long long __a, vector bool long long __b) {
- return __a | ~__b;
+ return __a | (vector signed long long)~__b;
}
static __inline__ vector signed long long __ATTRS_o_ai
vec_orc(vector bool long long __a, vector signed long long __b) {
- return __a | ~__b;
+ return (vector signed long long)(__a | (vector bool long long)~__b);
}
static __inline__ vector unsigned long long __ATTRS_o_ai
@@ -7140,12 +7173,12 @@ vec_orc(vector unsigned long long __a, vector unsigned long long __b) {
static __inline__ vector unsigned long long __ATTRS_o_ai
vec_orc(vector unsigned long long __a, vector bool long long __b) {
- return __a | ~__b;
+ return __a | (vector unsigned long long)~__b;
}
static __inline__ vector unsigned long long __ATTRS_o_ai
vec_orc(vector bool long long __a, vector unsigned long long __b) {
- return __a | ~__b;
+ return (vector unsigned long long)(__a | (vector bool long long)~__b);
}
static __inline__ vector bool long long __ATTRS_o_ai
@@ -7155,17 +7188,17 @@ vec_orc(vector bool long long __a, vector bool long long __b) {
static __inline__ vector double __ATTRS_o_ai
vec_orc(vector double __a, vector bool long long __b) {
- return (vector double)((vector unsigned long long)__a | ~__b);
+ return (vector double)((vector bool long long)__a | ~__b);
}
static __inline__ vector double __ATTRS_o_ai
vec_orc(vector bool long long __a, vector double __b) {
- return (vector double)(__a | ~(vector unsigned long long)__b);
+ return (vector double)(__a | ~(vector bool long long)__b);
}
static __inline__ vector double __ATTRS_o_ai vec_orc(vector double __a,
vector double __b) {
- return (vector double)((vector bool long long)__a |
+ return (vector double)((vector unsigned long long)__a |
~(vector unsigned long long)__b);
}
#endif
@@ -8276,14 +8309,20 @@ vec_rl(vector signed long long __a, vector unsigned long long __b) {
static __inline__ vector unsigned long long __ATTRS_o_ai
vec_rl(vector unsigned long long __a, vector unsigned long long __b) {
- return __builtin_altivec_vrld(__a, __b);
+ return (vector unsigned long long)__builtin_altivec_vrld(
+ (vector long long)__a, __b);
}
#endif
#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_rl(vector signed __int128 __a, vector unsigned __int128 __b) {
- return (__b << __a)|(__b >> ((__CHAR_BIT__ * sizeof(vector signed __int128)) - __a));
+ return (vector signed __int128)(((vector unsigned __int128)__b
+ << (vector unsigned __int128)__a) |
+ ((vector unsigned __int128)__b >>
+ ((__CHAR_BIT__ *
+ sizeof(vector unsigned __int128)) -
+ (vector unsigned __int128)__a)));
}
static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -8317,7 +8356,9 @@ vec_rlmi(vector unsigned __int128 __a, vector unsigned __int128 __b,
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_rlmi(vector signed __int128 __a, vector signed __int128 __b,
vector signed __int128 __c) {
- return __builtin_altivec_vrlqmi(__a, __c, __b);
+ return (vector signed __int128)__builtin_altivec_vrlqmi(
+ (vector unsigned __int128)__a, (vector unsigned __int128)__c,
+ (vector unsigned __int128)__b);
}
#endif
@@ -8370,7 +8411,8 @@ vec_rlnm(vector signed __int128 __a, vector signed __int128 __b,
__builtin_shufflevector(TmpB, TmpC, -1, -1, -1, -1, -1, 31, 30, 15, -1,
-1, -1, -1, -1, -1, -1, -1);
#endif
- return __builtin_altivec_vrlqnm(__a, (vector unsigned __int128) MaskAndShift);
+ return (vector signed __int128)__builtin_altivec_vrlqnm(
+ (vector unsigned __int128)__a, (vector unsigned __int128)MaskAndShift);
}
#endif
@@ -12070,13 +12112,15 @@ vec_subc(vector unsigned __int128 __a, vector unsigned __int128 __b) {
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_subc(vector signed __int128 __a, vector signed __int128 __b) {
- return __builtin_altivec_vsubcuq(__a, __b);
+ return (vector signed __int128)__builtin_altivec_vsubcuq(
+ (vector unsigned __int128)__a, (vector unsigned __int128)__b);
}
#endif
static __inline__ vector unsigned char __attribute__((__always_inline__))
vec_subc_u128(vector unsigned char __a, vector unsigned char __b) {
- return (vector unsigned char)__builtin_altivec_vsubcuq(__a, __b);
+ return (vector unsigned char)__builtin_altivec_vsubcuq_c(
+ (vector unsigned char)__a, (vector unsigned char)__b);
}
#endif // __POWER8_VECTOR__
@@ -12298,7 +12342,7 @@ vec_vsubuqm(vector unsigned __int128 __a, vector unsigned __int128 __b) {
static __inline__ vector unsigned char __attribute__((__always_inline__))
vec_sub_u128(vector unsigned char __a, vector unsigned char __b) {
- return __builtin_altivec_vsubuqm(__a, __b);
+ return (vector unsigned char)__builtin_altivec_vsubuqm(__a, __b);
}
/* vec_vsubeuqm */
@@ -12307,7 +12351,9 @@ vec_sub_u128(vector unsigned char __a, vector unsigned char __b) {
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_vsubeuqm(vector signed __int128 __a, vector signed __int128 __b,
vector signed __int128 __c) {
- return __builtin_altivec_vsubeuqm(__a, __b, __c);
+ return (vector signed __int128)__builtin_altivec_vsubeuqm(
+ (vector unsigned __int128)__a, (vector unsigned __int128)__b,
+ (vector unsigned __int128)__c);
}
static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -12319,7 +12365,9 @@ vec_vsubeuqm(vector unsigned __int128 __a, vector unsigned __int128 __b,
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_sube(vector signed __int128 __a, vector signed __int128 __b,
vector signed __int128 __c) {
- return __builtin_altivec_vsubeuqm(__a, __b, __c);
+ return (vector signed __int128)__builtin_altivec_vsubeuqm(
+ (vector unsigned __int128)__a, (vector unsigned __int128)__b,
+ (vector unsigned __int128)__c);
}
static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -12332,7 +12380,9 @@ vec_sube(vector unsigned __int128 __a, vector unsigned __int128 __b,
static __inline__ vector unsigned char __attribute__((__always_inline__))
vec_sube_u128(vector unsigned char __a, vector unsigned char __b,
vector unsigned char __c) {
- return (vector unsigned char)__builtin_altivec_vsubeuqm(__a, __b, __c);
+ return (vector unsigned char)__builtin_altivec_vsubeuqm_c(
+ (vector unsigned char)__a, (vector unsigned char)__b,
+ (vector unsigned char)__c);
}
/* vec_vsubcuq */
@@ -12340,7 +12390,8 @@ vec_sube_u128(vector unsigned char __a, vector unsigned char __b,
#ifdef __SIZEOF_INT128__
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_vsubcuq(vector signed __int128 __a, vector signed __int128 __b) {
- return __builtin_altivec_vsubcuq(__a, __b);
+ return (vector signed __int128)__builtin_altivec_vsubcuq(
+ (vector unsigned __int128)__a, (vector unsigned __int128)__b);
}
static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -12353,7 +12404,9 @@ vec_vsubcuq(vector unsigned __int128 __a, vector unsigned __int128 __b) {
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_vsubecuq(vector signed __int128 __a, vector signed __int128 __b,
vector signed __int128 __c) {
- return __builtin_altivec_vsubecuq(__a, __b, __c);
+ return (vector signed __int128)__builtin_altivec_vsubecuq(
+ (vector unsigned __int128)__a, (vector unsigned __int128)__b,
+ (vector unsigned __int128)__c);
}
static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -12381,7 +12434,9 @@ vec_subec(vector unsigned int __a, vector unsigned int __b,
static __inline__ vector signed __int128 __ATTRS_o_ai
vec_subec(vector signed __int128 __a, vector signed __int128 __b,
vector signed __int128 __c) {
- return __builtin_altivec_vsubecuq(__a, __b, __c);
+ return (vector signed __int128)__builtin_altivec_vsubecuq(
+ (vector unsigned __int128)__a, (vector unsigned __int128)__b,
+ (vector unsigned __int128)__c);
}
static __inline__ vector unsigned __int128 __ATTRS_o_ai
@@ -12394,7 +12449,9 @@ vec_subec(vector unsigned __int128 __a, vector unsigned __int128 __b,
static __inline__ vector unsigned char __attribute__((__always_inline__))
vec_subec_u128(vector unsigned char __a, vector unsigned char __b,
vector unsigned char __c) {
- return (vector unsigned char)__builtin_altivec_vsubecuq(__a, __b, __c);
+ return (vector unsigned char)__builtin_altivec_vsubecuq_c(
+ (vector unsigned char)__a, (vector unsigned char)__b,
+ (vector unsigned char)__c);
}
#endif // __POWER8_VECTOR__
@@ -14900,17 +14957,20 @@ static __inline__ int __ATTRS_o_ai vec_all_eq(vector double __a,
#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
static __inline__ int __ATTRS_o_ai vec_all_eq(vector signed __int128 __a,
vector signed __int128 __b) {
- return __builtin_altivec_vcmpequq_p(__CR6_LT, __a, __b);
+ return __builtin_altivec_vcmpequq_p(__CR6_LT, (vector unsigned __int128)__a,
+ (vector signed __int128)__b);
}
static __inline__ int __ATTRS_o_ai vec_all_eq(vector unsigned __int128 __a,
vector unsigned __int128 __b) {
- return __builtin_altivec_vcmpequq_p(__CR6_LT, __a, __b);
+ return __builtin_altivec_vcmpequq_p(__CR6_LT, __a,
+ (vector signed __int128)__b);
}
static __inline__ int __ATTRS_o_ai vec_all_eq(vector bool __int128 __a,
vector bool __int128 __b) {
- return __builtin_altivec_vcmpequq_p(__CR6_LT, __a, __b);
+ return __builtin_altivec_vcmpequq_p(__CR6_LT, (vector unsigned __int128)__a,
+ (vector signed __int128)__b);
}
#endif
@@ -15850,17 +15910,20 @@ static __inline__ int __ATTRS_o_ai vec_all_ne(vector double __a,
#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
static __inline__ int __ATTRS_o_ai vec_all_ne(vector signed __int128 __a,
vector signed __int128 __b) {
- return __builtin_altivec_vcmpequq_p(__CR6_EQ, __a, __b);
+ return __builtin_altivec_vcmpequq_p(__CR6_EQ, (vector unsigned __int128)__a,
+ __b);
}
static __inline__ int __ATTRS_o_ai vec_all_ne(vector unsigned __int128 __a,
vector unsigned __int128 __b) {
- return __builtin_altivec_vcmpequq_p(__CR6_EQ, __a, __b);
+ return __builtin_altivec_vcmpequq_p(__CR6_EQ, __a,
+ (vector signed __int128)__b);
}
static __inline__ int __ATTRS_o_ai vec_all_ne(vector bool __int128 __a,
vector bool __int128 __b) {
- return __builtin_altivec_vcmpequq_p(__CR6_EQ, __a, __b);
+ return __builtin_altivec_vcmpequq_p(__CR6_EQ, (vector unsigned __int128)__a,
+ (vector signed __int128)__b);
}
#endif
@@ -16144,17 +16207,20 @@ static __inline__ int __ATTRS_o_ai vec_any_eq(vector double __a,
#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
static __inline__ int __ATTRS_o_ai vec_any_eq(vector signed __int128 __a,
vector signed __int128 __b) {
- return __builtin_altivec_vcmpequq_p(__CR6_EQ_REV, __a, __b);
+ return __builtin_altivec_vcmpequq_p(__CR6_EQ_REV,
+ (vector unsigned __int128)__a, __b);
}
static __inline__ int __ATTRS_o_ai vec_any_eq(vector unsigned __int128 __a,
vector unsigned __int128 __b) {
- return __builtin_altivec_vcmpequq_p(__CR6_EQ_REV, __a, __b);
+ return __builtin_altivec_vcmpequq_p(__CR6_EQ_REV, __a,
+ (vector signed __int128)__b);
}
static __inline__ int __ATTRS_o_ai vec_any_eq(vector bool __int128 __a,
vector bool __int128 __b) {
- return __builtin_altivec_vcmpequq_p(__CR6_EQ_REV, __a, __b);
+ return __builtin_altivec_vcmpequq_p(
+ __CR6_EQ_REV, (vector unsigned __int128)__a, (vector signed __int128)__b);
}
#endif
@@ -17124,17 +17190,20 @@ static __inline__ int __ATTRS_o_ai vec_any_ne(vector double __a,
#if defined(__POWER10_VECTOR__) && defined(__SIZEOF_INT128__)
static __inline__ int __ATTRS_o_ai vec_any_ne(vector signed __int128 __a,
vector signed __int128 __b) {
- return __builtin_altivec_vcmpequq_p(__CR6_LT_REV, __a, __b);
+ return __builtin_altivec_vcmpequq_p(__CR6_LT_REV,
+ (vector unsigned __int128)__a, __b);
}
static __inline__ int __ATTRS_o_ai vec_any_ne(vector unsigned __int128 __a,
vector unsigned __int128 __b) {
- return __builtin_altivec_vcmpequq_p(__CR6_LT_REV, __a, __b);
+ return __builtin_altivec_vcmpequq_p(__CR6_LT_REV, __a,
+ (vector signed __int128)__b);
}
static __inline__ int __ATTRS_o_ai vec_any_ne(vector bool __int128 __a,
vector bool __int128 __b) {
- return __builtin_altivec_vcmpequq_p(__CR6_LT_REV, __a, __b);
+ return __builtin_altivec_vcmpequq_p(
+ __CR6_LT_REV, (vector unsigned __int128)__a, (vector signed __int128)__b);
}
#endif
@@ -17297,13 +17366,17 @@ __builtin_crypto_vncipherlast(vector unsigned long long __a,
static __inline__ vector bool char __ATTRS_o_ai
vec_permxor(vector bool char __a, vector bool char __b,
vector bool char __c) {
- return __builtin_altivec_crypto_vpermxor(__a, __b, __c);
+ return (vector bool char)__builtin_altivec_crypto_vpermxor(
+ (vector unsigned char)__a, (vector unsigned char)__b,
+ (vector unsigned char)__c);
}
static __inline__ vector signed char __ATTRS_o_ai
vec_permxor(vector signed char __a, vector signed char __b,
vector signed char __c) {
- return __builtin_altivec_crypto_vpermxor(__a, __b, __c);
+ return (vector signed char)__builtin_altivec_crypto_vpermxor(
+ (vector unsigned char)__a, (vector unsigned char)__b,
+ (vector unsigned char)__c);
}
static __inline__ vector unsigned char __ATTRS_o_ai
@@ -17365,7 +17438,7 @@ __builtin_crypto_vpmsumb(vector unsigned long long __a,
static __inline__ vector signed char __ATTRS_o_ai
vec_vgbbd(vector signed char __a) {
- return __builtin_altivec_vgbbd((vector unsigned char)__a);
+ return (vector signed char)__builtin_altivec_vgbbd((vector unsigned char)__a);
}
#define vec_pmsum_be __builtin_crypto_vpmsumb
@@ -17378,23 +17451,25 @@ vec_vgbbd(vector unsigned char __a) {
static __inline__ vector signed long long __ATTRS_o_ai
vec_gbb(vector signed long long __a) {
- return __builtin_altivec_vgbbd((vector unsigned char)__a);
+ return (vector signed long long)__builtin_altivec_vgbbd(
+ (vector unsigned char)__a);
}
static __inline__ vector unsigned long long __ATTRS_o_ai
vec_gbb(vector unsigned long long __a) {
- return __builtin_altivec_vgbbd((vector unsigned char)__a);
+ return (vector unsigned long long)__builtin_altivec_vgbbd(
+ (vector unsigned char)__a);
}
static __inline__ vector long long __ATTRS_o_ai
vec_vbpermq(vector signed char __a, vector signed char __b) {
- return __builtin_altivec_vbpermq((vector unsigned char)__a,
- (vector unsigned char)__b);
+ return (vector long long)__builtin_altivec_vbpermq((vector unsigned char)__a,
+ (vector unsigned char)__b);
}
static __inline__ vector long long __ATTRS_o_ai
vec_vbpermq(vector unsigned char __a, vector unsigned char __b) {
- return __builtin_altivec_vbpermq(__a, __b);
+ return (vector long long)__builtin_altivec_vbpermq(__a, __b);
}
#if defined(__powerpc64__) && defined(__SIZEOF_INT128__)
@@ -17406,7 +17481,7 @@ vec_bperm(vector unsigned __int128 __a, vector unsigned char __b) {
#endif
static __inline__ vector unsigned char __ATTRS_o_ai
vec_bperm(vector unsigned char __a, vector unsigned char __b) {
- return __builtin_altivec_vbpermq(__a, __b);
+ return (vector unsigned char)__builtin_altivec_vbpermq(__a, __b);
}
#endif // __POWER8_VECTOR__
#ifdef __POWER9_VECTOR__
@@ -17777,26 +17852,26 @@ vec_xl_be(signed long long __offset, const unsigned __int128 *__ptr) {
#if defined(__POWER10_VECTOR__) && defined(__VSX__) && \
defined(__SIZEOF_INT128__)
-/* vect_xl_sext */
+/* vec_xl_sext */
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
+static __inline__ vector signed __int128 __ATTRS_o_ai
vec_xl_sext(ptrdiff_t __offset, const signed char *__pointer) {
- return (vector unsigned __int128)*(__pointer + __offset);
+ return (vector signed __int128)*(__pointer + __offset);
}
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
+static __inline__ vector signed __int128 __ATTRS_o_ai
vec_xl_sext(ptrdiff_t __offset, const signed short *__pointer) {
- return (vector unsigned __int128)*(__pointer + __offset);
+ return (vector signed __int128)*(__pointer + __offset);
}
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
+static __inline__ vector signed __int128 __ATTRS_o_ai
vec_xl_sext(ptrdiff_t __offset, const signed int *__pointer) {
- return (vector unsigned __int128)*(__pointer + __offset);
+ return (vector signed __int128)*(__pointer + __offset);
}
-static __inline__ vector unsigned __int128 __ATTRS_o_ai
+static __inline__ vector signed __int128 __ATTRS_o_ai
vec_xl_sext(ptrdiff_t __offset, const signed long long *__pointer) {
- return (vector unsigned __int128)*(__pointer + __offset);
+ return (vector signed __int128)*(__pointer + __offset);
}
/* vec_xl_zext */
@@ -18260,13 +18335,17 @@ vec_expandm(vector unsigned __int128 __a) {
#define vec_cntm(__a, __mp) \
_Generic((__a), vector unsigned char \
- : __builtin_altivec_vcntmbb((__a), (unsigned char)(__mp)), \
+ : __builtin_altivec_vcntmbb((vector unsigned char)(__a), \
+ (unsigned char)(__mp)), \
vector unsigned short \
- : __builtin_altivec_vcntmbh((__a), (unsigned char)(__mp)), \
+ : __builtin_altivec_vcntmbh((vector unsigned short)(__a), \
+ (unsigned char)(__mp)), \
vector unsigned int \
- : __builtin_altivec_vcntmbw((__a), (unsigned char)(__mp)), \
+ : __builtin_altivec_vcntmbw((vector unsigned int)(__a), \
+ (unsigned char)(__mp)), \
vector unsigned long long \
- : __builtin_altivec_vcntmbd((__a), (unsigned char)(__mp)))
+ : __builtin_altivec_vcntmbd((vector unsigned long long)(__a), \
+ (unsigned char)(__mp)))
/* vec_gen[b|h|w|d|q]m */
@@ -18327,43 +18406,52 @@ vec_cfuge(vector unsigned long long __a, vector unsigned long long __b) {
#ifdef __SIZEOF_INT128__
#define vec_ternarylogic(__a, __b, __c, __imm) \
_Generic((__a), vector unsigned char \
- : __builtin_vsx_xxeval((vector unsigned long long)(__a), \
- (vector unsigned long long)(__b), \
- (vector unsigned long long)(__c), (__imm)), \
+ : (vector unsigned char)__builtin_vsx_xxeval( \
+ (vector unsigned long long)(__a), \
+ (vector unsigned long long)(__b), \
+ (vector unsigned long long)(__c), (__imm)), \
vector unsigned short \
- : __builtin_vsx_xxeval((vector unsigned long long)(__a), \
- (vector unsigned long long)(__b), \
- (vector unsigned long long)(__c), (__imm)), \
+ : (vector unsigned short)__builtin_vsx_xxeval( \
+ (vector unsigned long long)(__a), \
+ (vector unsigned long long)(__b), \
+ (vector unsigned long long)(__c), (__imm)), \
vector unsigned int \
- : __builtin_vsx_xxeval((vector unsigned long long)(__a), \
- (vector unsigned long long)(__b), \
- (vector unsigned long long)(__c), (__imm)), \
+ : (vector unsigned int)__builtin_vsx_xxeval( \
+ (vector unsigned long long)(__a), \
+ (vector unsigned long long)(__b), \
+ (vector unsigned long long)(__c), (__imm)), \
vector unsigned long long \
- : __builtin_vsx_xxeval((vector unsigned long long)(__a), \
- (vector unsigned long long)(__b), \
- (vector unsigned long long)(__c), (__imm)), \
+ : (vector unsigned long long)__builtin_vsx_xxeval( \
+ (vector unsigned long long)(__a), \
+ (vector unsigned long long)(__b), \
+ (vector unsigned long long)(__c), (__imm)), \
vector unsigned __int128 \
- : __builtin_vsx_xxeval((vector unsigned long long)(__a), \
- (vector unsigned long long)(__b), \
- (vector unsigned long long)(__c), (__imm)))
+ : (vector unsigned __int128)__builtin_vsx_xxeval( \
+ (vector unsigned long long)(__a), \
+ (vector unsigned long long)(__b), \
+ (vector unsigned long long)(__c), (__imm)))
#else
#define vec_ternarylogic(__a, __b, __c, __imm) \
_Generic((__a), vector unsigned char \
- : __builtin_vsx_xxeval((vector unsigned long long)(__a), \
- (vector unsigned long long)(__b), \
- (vector unsigned long long)(__c), (__imm)), \
+ : (vector unsigned char)__builtin_vsx_xxeval( \
+ (vector unsigned long long)(__a), \
+ (vector unsigned long long)(__b), \
+ (vector unsigned long long)(__c), (__imm)), \
vector unsigned short \
- : __builtin_vsx_xxeval((vector unsigned long long)(__a), \
- (vector unsigned long long)(__b), \
- (vector unsigned long long)(__c), (__imm)), \
+ : (vector unsigned short)__builtin_vsx_xxeval( \
+ (vector unsigned long long)(__a), \
+ (vector unsigned long long)(__b), \
+ (vector unsigned long long)(__c), (__imm)), \
vector unsigned int \
- : __builtin_vsx_xxeval((vector unsigned long long)(__a), \
- (vector unsigned long long)(__b), \
- (vector unsigned long long)(__c), (__imm)), \
+ : (vector unsigned int)__builtin_vsx_xxeval( \
+ (vector unsigned long long)(__a), \
+ (vector unsigned long long)(__b), \
+ (vector unsigned long long)(__c), (__imm)), \
vector unsigned long long \
- : __builtin_vsx_xxeval((vector unsigned long long)(__a), \
- (vector unsigned long long)(__b), \
- (vector unsigned long long)(__c), (__imm)))
+ : (vector unsigned long long)__builtin_vsx_xxeval( \
+ (vector unsigned long long)(__a), \
+ (vector unsigned long long)(__b), \
+ (vector unsigned long long)(__c), (__imm)))
#endif /* __SIZEOF_INT128__ */
#endif /* __VSX__ */
@@ -18371,14 +18459,16 @@ vec_cfuge(vector unsigned long long __a, vector unsigned long long __b) {
#ifdef __VSX__
#define vec_genpcvm(__a, __imm) \
- _Generic((__a), vector unsigned char \
- : __builtin_vsx_xxgenpcvbm((__a), (int)(__imm)), \
- vector unsigned short \
- : __builtin_vsx_xxgenpcvhm((__a), (int)(__imm)), \
- vector unsigned int \
- : __builtin_vsx_xxgenpcvwm((__a), (int)(__imm)), \
- vector unsigned long long \
- : __builtin_vsx_xxgenpcvdm((__a), (int)(__imm)))
+ _Generic( \
+ (__a), vector unsigned char \
+ : __builtin_vsx_xxgenpcvbm((vector unsigned char)(__a), (int)(__imm)), \
+ vector unsigned short \
+ : __builtin_vsx_xxgenpcvhm((vector unsigned short)(__a), (int)(__imm)), \
+ vector unsigned int \
+ : __builtin_vsx_xxgenpcvwm((vector unsigned int)(__a), (int)(__imm)), \
+ vector unsigned long long \
+ : __builtin_vsx_xxgenpcvdm((vector unsigned long long)(__a), \
+ (int)(__imm)))
#endif /* __VSX__ */
/* vec_clr_first */
@@ -18386,18 +18476,22 @@ vec_cfuge(vector unsigned long long __a, vector unsigned long long __b) {
static __inline__ vector signed char __ATTRS_o_ai
vec_clr_first(vector signed char __a, unsigned int __n) {
#ifdef __LITTLE_ENDIAN__
- return __builtin_altivec_vclrrb(__a, __n);
+ return (vector signed char)__builtin_altivec_vclrrb((vector unsigned char)__a,
+ __n);
#else
- return __builtin_altivec_vclrlb( __a, __n);
+ return (vector signed char)__builtin_altivec_vclrlb((vector unsigned char)__a,
+ __n);
#endif
}
static __inline__ vector unsigned char __ATTRS_o_ai
vec_clr_first(vector unsigned char __a, unsigned int __n) {
#ifdef __LITTLE_ENDIAN__
- return __builtin_altivec_vclrrb((vector signed char)__a, __n);
+ return (vector unsigned char)__builtin_altivec_vclrrb(
+ (vector unsigned char)__a, __n);
#else
- return __builtin_altivec_vclrlb((vector signed char)__a, __n);
+ return (vector unsigned char)__builtin_altivec_vclrlb(
+ (vector unsigned char)__a, __n);
#endif
}
@@ -18406,18 +18500,22 @@ vec_clr_first(vector unsigned char __a, unsigned int __n) {
static __inline__ vector signed char __ATTRS_o_ai
vec_clr_last(vector signed char __a, unsigned int __n) {
#ifdef __LITTLE_ENDIAN__
- return __builtin_altivec_vclrlb(__a, __n);
+ return (vector signed char)__builtin_altivec_vclrlb((vector unsigned char)__a,
+ __n);
#else
- return __builtin_altivec_vclrrb( __a, __n);
+ return (vector signed char)__builtin_altivec_vclrrb((vector unsigned char)__a,
+ __n);
#endif
}
static __inline__ vector unsigned char __ATTRS_o_ai
vec_clr_last(vector unsigned char __a, unsigned int __n) {
#ifdef __LITTLE_ENDIAN__
- return __builtin_altivec_vclrlb((vector signed char)__a, __n);
+ return (vector unsigned char)__builtin_altivec_vclrlb(
+ (vector unsigned char)__a, __n);
#else
- return __builtin_altivec_vclrrb((vector signed char)__a, __n);
+ return (vector unsigned char)__builtin_altivec_vclrrb(
+ (vector unsigned char)__a, __n);
#endif
}
@@ -18469,13 +18567,75 @@ vec_mod(vector unsigned __int128 __a, vector unsigned __int128 __b) {
}
#endif
-/* vec_sldbi */
-
-#define vec_sldb(__a, __b, __c) __builtin_altivec_vsldbi(__a, __b, (__c & 0x7))
-
-/* vec_srdbi */
-
-#define vec_srdb(__a, __b, __c) __builtin_altivec_vsrdbi(__a, __b, (__c & 0x7))
+/* vec_sldb */
+#define vec_sldb(__a, __b, __c) \
+ _Generic( \
+ (__a), vector unsigned char \
+ : (vector unsigned char)__builtin_altivec_vsldbi( \
+ (vector unsigned char)__a, (vector unsigned char)__b, \
+ (__c & 0x7)), \
+ vector signed char \
+ : (vector signed char)__builtin_altivec_vsldbi( \
+ (vector unsigned char)__a, (vector unsigned char)__b, \
+ (__c & 0x7)), \
+ vector unsigned short \
+ : (vector unsigned short)__builtin_altivec_vsldbi( \
+ (vector unsigned char)__a, (vector unsigned char)__b, \
+ (__c & 0x7)), \
+ vector signed short \
+ : (vector signed short)__builtin_altivec_vsldbi( \
+ (vector unsigned char)__a, (vector unsigned char)__b, \
+ (__c & 0x7)), \
+ vector unsigned int \
+ : (vector unsigned int)__builtin_altivec_vsldbi( \
+ (vector unsigned char)__a, (vector unsigned char)__b, \
+ (__c & 0x7)), \
+ vector signed int \
+ : (vector signed int)__builtin_altivec_vsldbi((vector unsigned char)__a, \
+ (vector unsigned char)__b, \
+ (__c & 0x7)), \
+ vector unsigned long long \
+ : (vector unsigned long long)__builtin_altivec_vsldbi( \
+ (vector unsigned char)__a, (vector unsigned char)__b, \
+ (__c & 0x7)), \
+ vector signed long long \
+ : (vector signed long long)__builtin_altivec_vsldbi( \
+ (vector unsigned char)__a, (vector unsigned char)__b, (__c & 0x7)))
+
+/* vec_srdb */
+#define vec_srdb(__a, __b, __c) \
+ _Generic( \
+ (__a), vector unsigned char \
+ : (vector unsigned char)__builtin_altivec_vsrdbi( \
+ (vector unsigned char)__a, (vector unsigned char)__b, \
+ (__c & 0x7)), \
+ vector signed char \
+ : (vector signed char)__builtin_altivec_vsrdbi( \
+ (vector unsigned char)__a, (vector unsigned char)__b, \
+ (__c & 0x7)), \
+ vector unsigned short \
+ : (vector unsigned short)__builtin_altivec_vsrdbi( \
+ (vector unsigned char)__a, (vector unsigned char)__b, \
+ (__c & 0x7)), \
+ vector signed short \
+ : (vector signed short)__builtin_altivec_vsrdbi( \
+ (vector unsigned char)__a, (vector unsigned char)__b, \
+ (__c & 0x7)), \
+ vector unsigned int \
+ : (vector unsigned int)__builtin_altivec_vsrdbi( \
+ (vector unsigned char)__a, (vector unsigned char)__b, \
+ (__c & 0x7)), \
+ vector signed int \
+ : (vector signed int)__builtin_altivec_vsrdbi((vector unsigned char)__a, \
+ (vector unsigned char)__b, \
+ (__c & 0x7)), \
+ vector unsigned long long \
+ : (vector unsigned long long)__builtin_altivec_vsrdbi( \
+ (vector unsigned char)__a, (vector unsigned char)__b, \
+ (__c & 0x7)), \
+ vector signed long long \
+ : (vector signed long long)__builtin_altivec_vsrdbi( \
+ (vector unsigned char)__a, (vector unsigned char)__b, (__c & 0x7)))
/* vec_insertl */
@@ -18704,16 +18864,46 @@ vec_extracth(vector unsigned long long __a, vector unsigned long long __b,
#ifdef __VSX__
/* vec_permx */
-
#define vec_permx(__a, __b, __c, __d) \
- __builtin_vsx_xxpermx((__a), (__b), (__c), (__d))
+ _Generic( \
+ (__a), vector unsigned char \
+ : (vector unsigned char)__builtin_vsx_xxpermx( \
+ (vector unsigned char)__a, (vector unsigned char)__b, __c, __d), \
+ vector signed char \
+ : (vector signed char)__builtin_vsx_xxpermx( \
+ (vector unsigned char)__a, (vector unsigned char)__b, __c, __d), \
+ vector unsigned short \
+ : (vector unsigned short)__builtin_vsx_xxpermx( \
+ (vector unsigned char)__a, (vector unsigned char)__b, __c, __d), \
+ vector signed short \
+ : (vector signed short)__builtin_vsx_xxpermx( \
+ (vector unsigned char)__a, (vector unsigned char)__b, __c, __d), \
+ vector unsigned int \
+ : (vector unsigned int)__builtin_vsx_xxpermx( \
+ (vector unsigned char)__a, (vector unsigned char)__b, __c, __d), \
+ vector signed int \
+ : (vector signed int)__builtin_vsx_xxpermx( \
+ (vector unsigned char)__a, (vector unsigned char)__b, __c, __d), \
+ vector unsigned long long \
+ : (vector unsigned long long)__builtin_vsx_xxpermx( \
+ (vector unsigned char)__a, (vector unsigned char)__b, __c, __d), \
+ vector signed long long \
+ : (vector signed long long)__builtin_vsx_xxpermx( \
+ (vector unsigned char)__a, (vector unsigned char)__b, __c, __d), \
+ vector float \
+ : (vector float)__builtin_vsx_xxpermx( \
+ (vector unsigned char)__a, (vector unsigned char)__b, __c, __d), \
+ vector double \
+ : (vector double)__builtin_vsx_xxpermx( \
+ (vector unsigned char)__a, (vector unsigned char)__b, __c, __d))
/* vec_blendv */
static __inline__ vector signed char __ATTRS_o_ai
vec_blendv(vector signed char __a, vector signed char __b,
vector unsigned char __c) {
- return __builtin_vsx_xxblendvb(__a, __b, __c);
+ return (vector signed char)__builtin_vsx_xxblendvb(
+ (vector unsigned char)__a, (vector unsigned char)__b, __c);
}
static __inline__ vector unsigned char __ATTRS_o_ai
@@ -18725,7 +18915,8 @@ vec_blendv(vector unsigned char __a, vector unsigned char __b,
static __inline__ vector signed short __ATTRS_o_ai
vec_blendv(vector signed short __a, vector signed short __b,
vector unsigned short __c) {
- return __builtin_vsx_xxblendvh(__a, __b, __c);
+ return (vector signed short)__builtin_vsx_xxblendvh(
+ (vector unsigned short)__a, (vector unsigned short)__b, __c);
}
static __inline__ vector unsigned short __ATTRS_o_ai
@@ -18737,7 +18928,8 @@ vec_blendv(vector unsigned short __a, vector unsigned short __b,
static __inline__ vector signed int __ATTRS_o_ai
vec_blendv(vector signed int __a, vector signed int __b,
vector unsigned int __c) {
- return __builtin_vsx_xxblendvw(__a, __b, __c);
+ return (vector signed int)__builtin_vsx_xxblendvw(
+ (vector unsigned int)__a, (vector unsigned int)__b, __c);
}
static __inline__ vector unsigned int __ATTRS_o_ai
@@ -18749,33 +18941,68 @@ vec_blendv(vector unsigned int __a, vector unsigned int __b,
static __inline__ vector signed long long __ATTRS_o_ai
vec_blendv(vector signed long long __a, vector signed long long __b,
vector unsigned long long __c) {
- return __builtin_vsx_xxblendvd(__a, __b, __c);
+ return (vector signed long long)__builtin_vsx_xxblendvd(
+ (vector unsigned long long)__a, (vector unsigned long long)__b, __c);
}
static __inline__ vector unsigned long long __ATTRS_o_ai
vec_blendv(vector unsigned long long __a, vector unsigned long long __b,
vector unsigned long long __c) {
- return __builtin_vsx_xxblendvd(__a, __b, __c);
+ return (vector unsigned long long)__builtin_vsx_xxblendvd(__a, __b, __c);
}
static __inline__ vector float __ATTRS_o_ai
vec_blendv(vector float __a, vector float __b, vector unsigned int __c) {
- return __builtin_vsx_xxblendvw(__a, __b, __c);
+ return (vector float)__builtin_vsx_xxblendvw((vector unsigned int)__a,
+ (vector unsigned int)__b, __c);
}
static __inline__ vector double __ATTRS_o_ai
vec_blendv(vector double __a, vector double __b,
vector unsigned long long __c) {
- return __builtin_vsx_xxblendvd(__a, __b, __c);
+ return (vector double)__builtin_vsx_xxblendvd(
+ (vector unsigned long long)__a, (vector unsigned long long)__b, __c);
}
-/* vec_replace_elt */
-
-#define vec_replace_elt __builtin_altivec_vec_replace_elt
-
-/* vec_replace_unaligned */
+#define vec_replace_unaligned(__a, __b, __c) \
+ _Generic((__a), vector signed int \
+ : __builtin_altivec_vinsw((vector unsigned char)__a, \
+ (unsigned int)__b, __c), \
+ vector unsigned int \
+ : __builtin_altivec_vinsw((vector unsigned char)__a, \
+ (unsigned int)__b, __c), \
+ vector unsigned long long \
+ : __builtin_altivec_vinsd((vector unsigned char)__a, \
+ (unsigned long long)__b, __c), \
+ vector signed long long \
+ : __builtin_altivec_vinsd((vector unsigned char)__a, \
+ (unsigned long long)__b, __c), \
+ vector float \
+ : __builtin_altivec_vinsw((vector unsigned char)__a, \
+ (unsigned int)__b, __c), \
+ vector double \
+ : __builtin_altivec_vinsd((vector unsigned char)__a, \
+ (unsigned long long)__b, __c))
-#define vec_replace_unaligned __builtin_altivec_vec_replace_unaligned
+#define vec_replace_elt(__a, __b, __c) \
+ _Generic((__a), vector signed int \
+ : (vector signed int)__builtin_altivec_vinsw_elt( \
+ (vector unsigned char)__a, (unsigned int)__b, __c), \
+ vector unsigned int \
+ : (vector unsigned int)__builtin_altivec_vinsw_elt( \
+ (vector unsigned char)__a, (unsigned int)__b, __c), \
+ vector unsigned long long \
+ : (vector unsigned long long)__builtin_altivec_vinsd_elt( \
+ (vector unsigned char)__a, (unsigned long long)__b, __c), \
+ vector signed long long \
+ : (vector signed long long)__builtin_altivec_vinsd_elt( \
+ (vector unsigned char)__a, (unsigned long long)__b, __c), \
+ vector float \
+ : (vector float)__builtin_altivec_vinsw_elt( \
+ (vector unsigned char)__a, (unsigned int)__b, __c), \
+ vector double \
+ : (vector double)__builtin_altivec_vinsd_elt( \
+ (vector unsigned char)__a, (unsigned long long)__b, __c))
/* vec_splati */
@@ -18852,27 +19079,33 @@ vec_test_lsbb_all_zeros(vector unsigned char __a) {
static __inline__ vector unsigned char __ATTRS_o_ai
vec_stril(vector unsigned char __a) {
#ifdef __LITTLE_ENDIAN__
- return __builtin_altivec_vstribr((vector signed char)__a);
+ return (vector unsigned char)__builtin_altivec_vstribr(
+ (vector unsigned char)__a);
#else
- return __builtin_altivec_vstribl((vector signed char)__a);
+ return (vector unsigned char)__builtin_altivec_vstribl(
+ (vector unsigned char)__a);
#endif
}
static __inline__ vector signed char __ATTRS_o_ai
vec_stril(vector signed char __a) {
#ifdef __LITTLE_ENDIAN__
- return __builtin_altivec_vstribr(__a);
+ return (vector signed char)__builtin_altivec_vstribr(
+ (vector unsigned char)__a);
#else
- return __builtin_altivec_vstribl(__a);
+ return (vector signed char)__builtin_altivec_vstribl(
+ (vector unsigned char)__a);
#endif
}
static __inline__ vector unsigned short __ATTRS_o_ai
vec_stril(vector unsigned short __a) {
#ifdef __LITTLE_ENDIAN__
- return __builtin_altivec_vstrihr((vector signed short)__a);
+ return (vector unsigned short)__builtin_altivec_vstrihr(
+ (vector signed short)__a);
#else
- return __builtin_altivec_vstrihl((vector signed short)__a);
+ return (vector unsigned short)__builtin_altivec_vstrihl(
+ (vector signed short)__a);
#endif
}
@@ -18889,17 +19122,17 @@ vec_stril(vector signed short __a) {
static __inline__ int __ATTRS_o_ai vec_stril_p(vector unsigned char __a) {
#ifdef __LITTLE_ENDIAN__
- return __builtin_altivec_vstribr_p(__CR6_EQ, (vector signed char)__a);
+ return __builtin_altivec_vstribr_p(__CR6_EQ, (vector unsigned char)__a);
#else
- return __builtin_altivec_vstribl_p(__CR6_EQ, (vector signed char)__a);
+ return __builtin_altivec_vstribl_p(__CR6_EQ, (vector unsigned char)__a);
#endif
}
static __inline__ int __ATTRS_o_ai vec_stril_p(vector signed char __a) {
#ifdef __LITTLE_ENDIAN__
- return __builtin_altivec_vstribr_p(__CR6_EQ, __a);
+ return __builtin_altivec_vstribr_p(__CR6_EQ, (vector unsigned char)__a);
#else
- return __builtin_altivec_vstribl_p(__CR6_EQ, __a);
+ return __builtin_altivec_vstribl_p(__CR6_EQ, (vector unsigned char)__a);
#endif
}
@@ -18924,27 +19157,33 @@ static __inline__ int __ATTRS_o_ai vec_stril_p(vector signed short __a) {
static __inline__ vector unsigned char __ATTRS_o_ai
vec_strir(vector unsigned char __a) {
#ifdef __LITTLE_ENDIAN__
- return __builtin_altivec_vstribl((vector signed char)__a);
+ return (vector unsigned char)__builtin_altivec_vstribl(
+ (vector unsigned char)__a);
#else
- return __builtin_altivec_vstribr((vector signed char)__a);
+ return (vector unsigned char)__builtin_altivec_vstribr(
+ (vector unsigned char)__a);
#endif
}
static __inline__ vector signed char __ATTRS_o_ai
vec_strir(vector signed char __a) {
#ifdef __LITTLE_ENDIAN__
- return __builtin_altivec_vstribl(__a);
+ return (vector signed char)__builtin_altivec_vstribl(
+ (vector unsigned char)__a);
#else
- return __builtin_altivec_vstribr(__a);
+ return (vector signed char)__builtin_altivec_vstribr(
+ (vector unsigned char)__a);
#endif
}
static __inline__ vector unsigned short __ATTRS_o_ai
vec_strir(vector unsigned short __a) {
#ifdef __LITTLE_ENDIAN__
- return __builtin_altivec_vstrihl((vector signed short)__a);
+ return (vector unsigned short)__builtin_altivec_vstrihl(
+ (vector signed short)__a);
#else
- return __builtin_altivec_vstrihr((vector signed short)__a);
+ return (vector unsigned short)__builtin_altivec_vstrihr(
+ (vector signed short)__a);
#endif
}
@@ -18961,17 +19200,17 @@ vec_strir(vector signed short __a) {
static __inline__ int __ATTRS_o_ai vec_strir_p(vector unsigned char __a) {
#ifdef __LITTLE_ENDIAN__
- return __builtin_altivec_vstribl_p(__CR6_EQ, (vector signed char)__a);
+ return __builtin_altivec_vstribl_p(__CR6_EQ, (vector unsigned char)__a);
#else
- return __builtin_altivec_vstribr_p(__CR6_EQ, (vector signed char)__a);
+ return __builtin_altivec_vstribr_p(__CR6_EQ, (vector unsigned char)__a);
#endif
}
static __inline__ int __ATTRS_o_ai vec_strir_p(vector signed char __a) {
#ifdef __LITTLE_ENDIAN__
- return __builtin_altivec_vstribl_p(__CR6_EQ, __a);
+ return __builtin_altivec_vstribl_p(__CR6_EQ, (vector unsigned char)__a);
#else
- return __builtin_altivec_vstribr_p(__CR6_EQ, __a);
+ return __builtin_altivec_vstribr_p(__CR6_EQ, (vector unsigned char)__a);
#endif
}
diff --git a/contrib/llvm-project/clang/lib/Headers/amxintrin.h b/contrib/llvm-project/clang/lib/Headers/amxintrin.h
index 4940666e8083..ec67a87e39ca 100644
--- a/contrib/llvm-project/clang/lib/Headers/amxintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/amxintrin.h
@@ -439,8 +439,6 @@ static __inline__ void __tile_dpbuud(__tile1024i *dst, __tile1024i src0,
///
/// This intrinsic corresponds to the <c> TILESTORED </c> instruction.
///
-/// \param dst
-/// A destination tile. Max size is 1024 Bytes.
/// \param base
/// A pointer to base address.
/// \param stride
diff --git a/contrib/llvm-project/clang/lib/Headers/avx2intrin.h b/contrib/llvm-project/clang/lib/Headers/avx2intrin.h
index e33514a60ff3..f8521e7d72b5 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx2intrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx2intrin.h
@@ -92,25 +92,25 @@ _mm256_add_epi64(__m256i __a, __m256i __b)
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_adds_epi8(__m256i __a, __m256i __b)
{
- return (__m256i)__builtin_ia32_paddsb256((__v32qi)__a, (__v32qi)__b);
+ return (__m256i)__builtin_elementwise_add_sat((__v32qs)__a, (__v32qs)__b);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_adds_epi16(__m256i __a, __m256i __b)
{
- return (__m256i)__builtin_ia32_paddsw256((__v16hi)__a, (__v16hi)__b);
+ return (__m256i)__builtin_elementwise_add_sat((__v16hi)__a, (__v16hi)__b);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_adds_epu8(__m256i __a, __m256i __b)
{
- return (__m256i)__builtin_ia32_paddusb256((__v32qi)__a, (__v32qi)__b);
+ return (__m256i)__builtin_elementwise_add_sat((__v32qu)__a, (__v32qu)__b);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_adds_epu16(__m256i __a, __m256i __b)
{
- return (__m256i)__builtin_ia32_paddusw256((__v16hi)__a, (__v16hi)__b);
+ return (__m256i)__builtin_elementwise_add_sat((__v16hu)__a, (__v16hu)__b);
}
#define _mm256_alignr_epi8(a, b, n) \
@@ -628,25 +628,25 @@ _mm256_sub_epi64(__m256i __a, __m256i __b)
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_subs_epi8(__m256i __a, __m256i __b)
{
- return (__m256i)__builtin_ia32_psubsb256((__v32qi)__a, (__v32qi)__b);
+ return (__m256i)__builtin_elementwise_sub_sat((__v32qs)__a, (__v32qs)__b);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_subs_epi16(__m256i __a, __m256i __b)
{
- return (__m256i)__builtin_ia32_psubsw256((__v16hi)__a, (__v16hi)__b);
+ return (__m256i)__builtin_elementwise_sub_sat((__v16hi)__a, (__v16hi)__b);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_subs_epu8(__m256i __a, __m256i __b)
{
- return (__m256i)__builtin_ia32_psubusb256((__v32qi)__a, (__v32qi)__b);
+ return (__m256i)__builtin_elementwise_sub_sat((__v32qu)__a, (__v32qu)__b);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_subs_epu16(__m256i __a, __m256i __b)
{
- return (__m256i)__builtin_ia32_psubusw256((__v16hi)__a, (__v16hi)__b);
+ return (__m256i)__builtin_elementwise_sub_sat((__v16hu)__a, (__v16hu)__b);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512bwintrin.h b/contrib/llvm-project/clang/lib/Headers/avx512bwintrin.h
index 522ef100bab1..aaeb9364801c 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512bwintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512bwintrin.h
@@ -617,7 +617,7 @@ _mm512_maskz_packus_epi16(__mmask64 __M, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_adds_epi8 (__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_paddsb512((__v64qi)__A, (__v64qi)__B);
+ return (__m512i)__builtin_elementwise_add_sat((__v64qs)__A, (__v64qs)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -639,7 +639,7 @@ _mm512_maskz_adds_epi8 (__mmask64 __U, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_adds_epi16 (__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_paddsw512((__v32hi)__A, (__v32hi)__B);
+ return (__m512i)__builtin_elementwise_add_sat((__v32hi)__A, (__v32hi)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -661,7 +661,7 @@ _mm512_maskz_adds_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_adds_epu8 (__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_paddusb512((__v64qi) __A, (__v64qi) __B);
+ return (__m512i)__builtin_elementwise_add_sat((__v64qu) __A, (__v64qu) __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -683,7 +683,7 @@ _mm512_maskz_adds_epu8 (__mmask64 __U, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_adds_epu16 (__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_paddusw512((__v32hi) __A, (__v32hi) __B);
+ return (__m512i)__builtin_elementwise_add_sat((__v32hu) __A, (__v32hu) __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -950,7 +950,7 @@ _mm512_maskz_shuffle_epi8(__mmask64 __U, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_subs_epi8 (__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_psubsb512((__v64qi)__A, (__v64qi)__B);
+ return (__m512i)__builtin_elementwise_sub_sat((__v64qs)__A, (__v64qs)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -972,7 +972,7 @@ _mm512_maskz_subs_epi8 (__mmask64 __U, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_subs_epi16 (__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_psubsw512((__v32hi)__A, (__v32hi)__B);
+ return (__m512i)__builtin_elementwise_sub_sat((__v32hi)__A, (__v32hi)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -994,7 +994,7 @@ _mm512_maskz_subs_epi16 (__mmask32 __U, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_subs_epu8 (__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_psubusb512((__v64qi) __A, (__v64qi) __B);
+ return (__m512i)__builtin_elementwise_sub_sat((__v64qu) __A, (__v64qu) __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1016,7 +1016,7 @@ _mm512_maskz_subs_epu8 (__mmask64 __U, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_subs_epu16 (__m512i __A, __m512i __B)
{
- return (__m512i)__builtin_ia32_psubusw512((__v32hi) __A, (__v32hi) __B);
+ return (__m512i)__builtin_elementwise_sub_sat((__v32hu) __A, (__v32hu) __B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1506,7 +1506,7 @@ _mm512_maskz_sll_epi16(__mmask32 __U, __m512i __A, __m128i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_slli_epi16(__m512i __A, unsigned int __B)
{
- return (__m512i)__builtin_ia32_psllwi512((__v32hi)__A, __B);
+ return (__m512i)__builtin_ia32_psllwi512((__v32hi)__A, (int)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1598,7 +1598,7 @@ _mm512_maskz_sra_epi16(__mmask32 __U, __m512i __A, __m128i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_srai_epi16(__m512i __A, unsigned int __B)
{
- return (__m512i)__builtin_ia32_psrawi512((__v32hi)__A, __B);
+ return (__m512i)__builtin_ia32_psrawi512((__v32hi)__A, (int)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1643,7 +1643,7 @@ _mm512_maskz_srl_epi16(__mmask32 __U, __m512i __A, __m128i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_srli_epi16(__m512i __A, unsigned int __B)
{
- return (__m512i)__builtin_ia32_psrlwi512((__v32hi)__A, __B);
+ return (__m512i)__builtin_ia32_psrlwi512((__v32hi)__A, (int)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -1659,7 +1659,7 @@ static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_maskz_srli_epi16(__mmask32 __U, __m512i __A, int __B)
{
return (__m512i)__builtin_ia32_selectw_512((__mmask32)__U,
- (__v32hi)_mm512_srli_epi16(__A, __B),
+ (__v32hi)_mm512_srli_epi16(__A, (unsigned int)__B),
(__v32hi)_mm512_setzero_si512());
}
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512fintrin.h b/contrib/llvm-project/clang/lib/Headers/avx512fintrin.h
index 50e0e287d9fc..61bc89c2b895 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512fintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512fintrin.h
@@ -1780,7 +1780,7 @@ _mm512_floor_ps(__m512 __A)
{
return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
_MM_FROUND_FLOOR,
- (__v16sf) __A, -1,
+ (__v16sf) __A, (unsigned short)-1,
_MM_FROUND_CUR_DIRECTION);
}
@@ -1798,7 +1798,7 @@ _mm512_floor_pd(__m512d __A)
{
return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
_MM_FROUND_FLOOR,
- (__v8df) __A, -1,
+ (__v8df) __A, (unsigned char)-1,
_MM_FROUND_CUR_DIRECTION);
}
@@ -1825,7 +1825,7 @@ _mm512_ceil_ps(__m512 __A)
{
return (__m512) __builtin_ia32_rndscaleps_mask ((__v16sf) __A,
_MM_FROUND_CEIL,
- (__v16sf) __A, -1,
+ (__v16sf) __A, (unsigned short)-1,
_MM_FROUND_CUR_DIRECTION);
}
@@ -1834,7 +1834,7 @@ _mm512_ceil_pd(__m512d __A)
{
return (__m512d) __builtin_ia32_rndscalepd_mask ((__v8df) __A,
_MM_FROUND_CEIL,
- (__v8df) __A, -1,
+ (__v8df) __A, (unsigned char)-1,
_MM_FROUND_CUR_DIRECTION);
}
@@ -5117,7 +5117,7 @@ _mm512_maskz_rolv_epi64 (__mmask8 __U, __m512i __A, __m512i __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_slli_epi32(__m512i __A, unsigned int __B)
{
- return (__m512i)__builtin_ia32_pslldi512((__v16si)__A, __B);
+ return (__m512i)__builtin_ia32_pslldi512((__v16si)__A, (int)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -5139,7 +5139,7 @@ _mm512_maskz_slli_epi32(__mmask16 __U, __m512i __A, unsigned int __B) {
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_slli_epi64(__m512i __A, unsigned int __B)
{
- return (__m512i)__builtin_ia32_psllqi512((__v8di)__A, __B);
+ return (__m512i)__builtin_ia32_psllqi512((__v8di)__A, (int)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -5161,7 +5161,7 @@ _mm512_maskz_slli_epi64(__mmask8 __U, __m512i __A, unsigned int __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_srli_epi32(__m512i __A, unsigned int __B)
{
- return (__m512i)__builtin_ia32_psrldi512((__v16si)__A, __B);
+ return (__m512i)__builtin_ia32_psrldi512((__v16si)__A, (int)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -5183,7 +5183,7 @@ _mm512_maskz_srli_epi32(__mmask16 __U, __m512i __A, unsigned int __B) {
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_srli_epi64(__m512i __A, unsigned int __B)
{
- return (__m512i)__builtin_ia32_psrlqi512((__v8di)__A, __B);
+ return (__m512i)__builtin_ia32_psrlqi512((__v8di)__A, (int)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -5929,41 +5929,44 @@ _mm512_maskz_srlv_epi64(__mmask8 __U, __m512i __X, __m512i __Y)
(__v8di)_mm512_setzero_si512());
}
-#define _mm512_ternarylogic_epi32(A, B, C, imm) \
- ((__m512i)__builtin_ia32_pternlogd512_mask((__v16si)(__m512i)(A), \
- (__v16si)(__m512i)(B), \
- (__v16si)(__m512i)(C), (int)(imm), \
- (__mmask16)-1))
-
-#define _mm512_mask_ternarylogic_epi32(A, U, B, C, imm) \
- ((__m512i)__builtin_ia32_pternlogd512_mask((__v16si)(__m512i)(A), \
- (__v16si)(__m512i)(B), \
- (__v16si)(__m512i)(C), (int)(imm), \
- (__mmask16)(U)))
-
-#define _mm512_maskz_ternarylogic_epi32(U, A, B, C, imm) \
- ((__m512i)__builtin_ia32_pternlogd512_maskz((__v16si)(__m512i)(A), \
- (__v16si)(__m512i)(B), \
- (__v16si)(__m512i)(C), \
- (int)(imm), (__mmask16)(U)))
-
-#define _mm512_ternarylogic_epi64(A, B, C, imm) \
- ((__m512i)__builtin_ia32_pternlogq512_mask((__v8di)(__m512i)(A), \
- (__v8di)(__m512i)(B), \
- (__v8di)(__m512i)(C), (int)(imm), \
- (__mmask8)-1))
-
-#define _mm512_mask_ternarylogic_epi64(A, U, B, C, imm) \
- ((__m512i)__builtin_ia32_pternlogq512_mask((__v8di)(__m512i)(A), \
- (__v8di)(__m512i)(B), \
- (__v8di)(__m512i)(C), (int)(imm), \
- (__mmask8)(U)))
-
-#define _mm512_maskz_ternarylogic_epi64(U, A, B, C, imm) \
- ((__m512i)__builtin_ia32_pternlogq512_maskz((__v8di)(__m512i)(A), \
- (__v8di)(__m512i)(B), \
- (__v8di)(__m512i)(C), (int)(imm), \
- (__mmask8)(U)))
+/// \enum _MM_TERNLOG_ENUM
+/// A helper to represent the ternary logic operations among vector \a A,
+/// \a B and \a C. The representation is passed to \a imm.
+typedef enum {
+ _MM_TERNLOG_A = 0xF0,
+ _MM_TERNLOG_B = 0xCC,
+ _MM_TERNLOG_C = 0xAA
+} _MM_TERNLOG_ENUM;
+
+#define _mm512_ternarylogic_epi32(A, B, C, imm) \
+ ((__m512i)__builtin_ia32_pternlogd512_mask( \
+ (__v16si)(__m512i)(A), (__v16si)(__m512i)(B), (__v16si)(__m512i)(C), \
+ (unsigned char)(imm), (__mmask16)-1))
+
+#define _mm512_mask_ternarylogic_epi32(A, U, B, C, imm) \
+ ((__m512i)__builtin_ia32_pternlogd512_mask( \
+ (__v16si)(__m512i)(A), (__v16si)(__m512i)(B), (__v16si)(__m512i)(C), \
+ (unsigned char)(imm), (__mmask16)(U)))
+
+#define _mm512_maskz_ternarylogic_epi32(U, A, B, C, imm) \
+ ((__m512i)__builtin_ia32_pternlogd512_maskz( \
+ (__v16si)(__m512i)(A), (__v16si)(__m512i)(B), (__v16si)(__m512i)(C), \
+ (unsigned char)(imm), (__mmask16)(U)))
+
+#define _mm512_ternarylogic_epi64(A, B, C, imm) \
+ ((__m512i)__builtin_ia32_pternlogq512_mask( \
+ (__v8di)(__m512i)(A), (__v8di)(__m512i)(B), (__v8di)(__m512i)(C), \
+ (unsigned char)(imm), (__mmask8)-1))
+
+#define _mm512_mask_ternarylogic_epi64(A, U, B, C, imm) \
+ ((__m512i)__builtin_ia32_pternlogq512_mask( \
+ (__v8di)(__m512i)(A), (__v8di)(__m512i)(B), (__v8di)(__m512i)(C), \
+ (unsigned char)(imm), (__mmask8)(U)))
+
+#define _mm512_maskz_ternarylogic_epi64(U, A, B, C, imm) \
+ ((__m512i)__builtin_ia32_pternlogq512_maskz( \
+ (__v8di)(__m512i)(A), (__v8di)(__m512i)(B), (__v8di)(__m512i)(C), \
+ (unsigned char)(imm), (__mmask8)(U)))
#ifdef __x86_64__
#define _mm_cvt_roundsd_i64(A, R) \
@@ -6603,7 +6606,7 @@ _mm_maskz_scalef_ss (__mmask8 __U, __m128 __A, __m128 __B)
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_srai_epi32(__m512i __A, unsigned int __B)
{
- return (__m512i)__builtin_ia32_psradi512((__v16si)__A, __B);
+ return (__m512i)__builtin_ia32_psradi512((__v16si)__A, (int)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -6626,7 +6629,7 @@ _mm512_maskz_srai_epi32(__mmask16 __U, __m512i __A,
static __inline__ __m512i __DEFAULT_FN_ATTRS512
_mm512_srai_epi64(__m512i __A, unsigned int __B)
{
- return (__m512i)__builtin_ia32_psraqi512((__v8di)__A, __B);
+ return (__m512i)__builtin_ia32_psraqi512((__v8di)__A, (int)__B);
}
static __inline__ __m512i __DEFAULT_FN_ATTRS512
@@ -9316,11 +9319,11 @@ _mm512_mask_abs_pd(__m512d __W, __mmask8 __K, __m512d __A)
*/
static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_add_epi64(__m512i __W) {
- return __builtin_ia32_reduce_add_q512(__W);
+ return __builtin_reduce_add((__v8di)__W);
}
static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_mul_epi64(__m512i __W) {
- return __builtin_ia32_reduce_mul_q512(__W);
+ return __builtin_reduce_mul((__v8di)__W);
}
static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_and_epi64(__m512i __W) {
@@ -9334,18 +9337,18 @@ static __inline__ long long __DEFAULT_FN_ATTRS512 _mm512_reduce_or_epi64(__m512i
static __inline__ long long __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_add_epi64(__mmask8 __M, __m512i __W) {
__W = _mm512_maskz_mov_epi64(__M, __W);
- return __builtin_ia32_reduce_add_q512(__W);
+ return __builtin_reduce_add((__v8di)__W);
}
static __inline__ long long __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_mul_epi64(__mmask8 __M, __m512i __W) {
__W = _mm512_mask_mov_epi64(_mm512_set1_epi64(1), __M, __W);
- return __builtin_ia32_reduce_mul_q512(__W);
+ return __builtin_reduce_mul((__v8di)__W);
}
static __inline__ long long __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_and_epi64(__mmask8 __M, __m512i __W) {
- __W = _mm512_mask_mov_epi64(_mm512_set1_epi64(~0ULL), __M, __W);
+ __W = _mm512_mask_mov_epi64(_mm512_set1_epi64(-1LL), __M, __W);
return __builtin_reduce_and((__v8di)__W);
}
@@ -9380,12 +9383,12 @@ _mm512_mask_reduce_mul_pd(__mmask8 __M, __m512d __W) {
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_reduce_add_epi32(__m512i __W) {
- return __builtin_ia32_reduce_add_d512((__v16si)__W);
+ return __builtin_reduce_add((__v16si)__W);
}
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_reduce_mul_epi32(__m512i __W) {
- return __builtin_ia32_reduce_mul_d512((__v16si)__W);
+ return __builtin_reduce_mul((__v16si)__W);
}
static __inline__ int __DEFAULT_FN_ATTRS512
@@ -9401,18 +9404,18 @@ _mm512_reduce_or_epi32(__m512i __W) {
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_add_epi32( __mmask16 __M, __m512i __W) {
__W = _mm512_maskz_mov_epi32(__M, __W);
- return __builtin_ia32_reduce_add_d512((__v16si)__W);
+ return __builtin_reduce_add((__v16si)__W);
}
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_mul_epi32( __mmask16 __M, __m512i __W) {
__W = _mm512_mask_mov_epi32(_mm512_set1_epi32(1), __M, __W);
- return __builtin_ia32_reduce_mul_d512((__v16si)__W);
+ return __builtin_reduce_mul((__v16si)__W);
}
static __inline__ int __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_and_epi32( __mmask16 __M, __m512i __W) {
- __W = _mm512_mask_mov_epi32(_mm512_set1_epi32(~0U), __M, __W);
+ __W = _mm512_mask_mov_epi32(_mm512_set1_epi32(-1), __M, __W);
return __builtin_reduce_and((__v16si)__W);
}
@@ -9484,7 +9487,7 @@ _mm512_mask_reduce_min_epi64(__mmask8 __M, __m512i __V) {
static __inline__ unsigned long long __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_min_epu64(__mmask8 __M, __m512i __V) {
- __V = _mm512_mask_mov_epi64(_mm512_set1_epi64(~0ULL), __M, __V);
+ __V = _mm512_mask_mov_epi64(_mm512_set1_epi64(-1LL), __M, __V);
return __builtin_reduce_min((__v8du)__V);
}
static __inline__ int __DEFAULT_FN_ATTRS512
@@ -9527,7 +9530,7 @@ _mm512_mask_reduce_min_epi32(__mmask16 __M, __m512i __V) {
static __inline__ unsigned int __DEFAULT_FN_ATTRS512
_mm512_mask_reduce_min_epu32(__mmask16 __M, __m512i __V) {
- __V = _mm512_mask_mov_epi32(_mm512_set1_epi32(~0U), __M, __V);
+ __V = _mm512_mask_mov_epi32(_mm512_set1_epi32(-1), __M, __V);
return __builtin_reduce_min((__v16su)__V);
}
@@ -9598,7 +9601,7 @@ _mm512_cvtsi512_si32(__m512i __A) {
///
/// This intrinsic corresponds to the <c> VGATHERDPD </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 7
/// i := j*64
/// m := j*32
@@ -9606,7 +9609,7 @@ _mm512_cvtsi512_si32(__m512i __A) {
/// dst[i+63:i] := MEM[addr+63:addr]
/// ENDFOR
/// dst[MAX:512] := 0
-/// \endoperation
+/// \endcode
#define _mm512_i32logather_pd(vindex, base_addr, scale) \
_mm512_i32gather_pd(_mm512_castsi512_si256(vindex), (base_addr), (scale))
@@ -9618,7 +9621,7 @@ _mm512_cvtsi512_si32(__m512i __A) {
///
/// This intrinsic corresponds to the <c> VGATHERDPD </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 7
/// i := j*64
/// m := j*32
@@ -9630,7 +9633,7 @@ _mm512_cvtsi512_si32(__m512i __A) {
/// FI
/// ENDFOR
/// dst[MAX:512] := 0
-/// \endoperation
+/// \endcode
#define _mm512_mask_i32logather_pd(src, mask, vindex, base_addr, scale) \
_mm512_mask_i32gather_pd((src), (mask), _mm512_castsi512_si256(vindex), \
(base_addr), (scale))
@@ -9641,7 +9644,7 @@ _mm512_cvtsi512_si32(__m512i __A) {
///
/// This intrinsic corresponds to the <c> VPGATHERDQ </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 7
/// i := j*64
/// m := j*32
@@ -9649,7 +9652,7 @@ _mm512_cvtsi512_si32(__m512i __A) {
/// dst[i+63:i] := MEM[addr+63:addr]
/// ENDFOR
/// dst[MAX:512] := 0
-/// \endoperation
+/// \endcode
#define _mm512_i32logather_epi64(vindex, base_addr, scale) \
_mm512_i32gather_epi64(_mm512_castsi512_si256(vindex), (base_addr), (scale))
@@ -9660,7 +9663,7 @@ _mm512_cvtsi512_si32(__m512i __A) {
///
/// This intrinsic corresponds to the <c> VPGATHERDQ </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 7
/// i := j*64
/// m := j*32
@@ -9672,7 +9675,7 @@ _mm512_cvtsi512_si32(__m512i __A) {
/// FI
/// ENDFOR
/// dst[MAX:512] := 0
-/// \endoperation
+/// \endcode
#define _mm512_mask_i32logather_epi64(src, mask, vindex, base_addr, scale) \
_mm512_mask_i32gather_epi64((src), (mask), _mm512_castsi512_si256(vindex), \
(base_addr), (scale))
@@ -9683,14 +9686,14 @@ _mm512_cvtsi512_si32(__m512i __A) {
///
/// This intrinsic corresponds to the <c> VSCATTERDPD </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 7
/// i := j*64
/// m := j*32
/// addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
/// MEM[addr+63:addr] := v1[i+63:i]
/// ENDFOR
-/// \endoperation
+/// \endcode
#define _mm512_i32loscatter_pd(base_addr, vindex, v1, scale) \
_mm512_i32scatter_pd((base_addr), _mm512_castsi512_si256(vindex), (v1), (scale))
@@ -9702,7 +9705,7 @@ _mm512_cvtsi512_si32(__m512i __A) {
///
/// This intrinsic corresponds to the <c> VSCATTERDPD </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 7
/// i := j*64
/// m := j*32
@@ -9711,7 +9714,7 @@ _mm512_cvtsi512_si32(__m512i __A) {
/// MEM[addr+63:addr] := a[i+63:i]
/// FI
/// ENDFOR
-/// \endoperation
+/// \endcode
#define _mm512_mask_i32loscatter_pd(base_addr, mask, vindex, v1, scale) \
_mm512_mask_i32scatter_pd((base_addr), (mask), \
_mm512_castsi512_si256(vindex), (v1), (scale))
@@ -9722,14 +9725,14 @@ _mm512_cvtsi512_si32(__m512i __A) {
///
/// This intrinsic corresponds to the <c> VPSCATTERDQ </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 7
/// i := j*64
/// m := j*32
/// addr := base_addr + SignExtend64(vindex[m+31:m]) * ZeroExtend64(scale) * 8
/// MEM[addr+63:addr] := a[i+63:i]
/// ENDFOR
-/// \endoperation
+/// \endcode
#define _mm512_i32loscatter_epi64(base_addr, vindex, v1, scale) \
_mm512_i32scatter_epi64((base_addr), \
_mm512_castsi512_si256(vindex), (v1), (scale))
@@ -9741,7 +9744,7 @@ _mm512_cvtsi512_si32(__m512i __A) {
///
/// This intrinsic corresponds to the <c> VPSCATTERDQ </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 7
/// i := j*64
/// m := j*32
@@ -9750,7 +9753,7 @@ _mm512_cvtsi512_si32(__m512i __A) {
/// MEM[addr+63:addr] := a[i+63:i]
/// FI
/// ENDFOR
-/// \endoperation
+/// \endcode
#define _mm512_mask_i32loscatter_epi64(base_addr, mask, vindex, v1, scale) \
_mm512_mask_i32scatter_epi64((base_addr), (mask), \
_mm512_castsi512_si256(vindex), (v1), (scale))
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512vlbf16intrin.h b/contrib/llvm-project/clang/lib/Headers/avx512vlbf16intrin.h
index 6a5a86071f0b..1cdbb28484ac 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512vlbf16intrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512vlbf16intrin.h
@@ -417,7 +417,7 @@ static __inline__ __bfloat16 __DEFAULT_FN_ATTRS128 _mm_cvtness_sbh(float __A) {
__v4sf __V = {__A, 0, 0, 0};
__v8hi __R = __builtin_ia32_cvtneps2bf16_128_mask(
(__v4sf)__V, (__v8hi)_mm_undefined_si128(), (__mmask8)-1);
- return __R[0];
+ return (__bfloat16)__R[0];
}
/// Convert Packed BF16 Data to Packed float Data.
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512vlbwintrin.h b/contrib/llvm-project/clang/lib/Headers/avx512vlbwintrin.h
index 7873516053ec..521ccab27e04 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512vlbwintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512vlbwintrin.h
@@ -1942,7 +1942,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_slli_epi16(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_slli_epi16(__A, __B),
+ (__v8hi)_mm_slli_epi16(__A, (int)__B),
(__v8hi)__W);
}
@@ -1950,7 +1950,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_slli_epi16 (__mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_slli_epi16(__A, __B),
+ (__v8hi)_mm_slli_epi16(__A, (int)__B),
(__v8hi)_mm_setzero_si128());
}
@@ -1959,7 +1959,7 @@ _mm256_mask_slli_epi16(__m256i __W, __mmask16 __U, __m256i __A,
unsigned int __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_slli_epi16(__A, __B),
+ (__v16hi)_mm256_slli_epi16(__A, (int)__B),
(__v16hi)__W);
}
@@ -1967,7 +1967,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_slli_epi16(__mmask16 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_slli_epi16(__A, __B),
+ (__v16hi)_mm256_slli_epi16(__A, (int)__B),
(__v16hi)_mm256_setzero_si256());
}
@@ -2095,7 +2095,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_srai_epi16(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_srai_epi16(__A, __B),
+ (__v8hi)_mm_srai_epi16(__A, (int)__B),
(__v8hi)__W);
}
@@ -2103,7 +2103,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_srai_epi16(__mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectw_128((__mmask8)__U,
- (__v8hi)_mm_srai_epi16(__A, __B),
+ (__v8hi)_mm_srai_epi16(__A, (int)__B),
(__v8hi)_mm_setzero_si128());
}
@@ -2112,7 +2112,7 @@ _mm256_mask_srai_epi16(__m256i __W, __mmask16 __U, __m256i __A,
unsigned int __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_srai_epi16(__A, __B),
+ (__v16hi)_mm256_srai_epi16(__A, (int)__B),
(__v16hi)__W);
}
@@ -2120,7 +2120,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_srai_epi16(__mmask16 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectw_256((__mmask16)__U,
- (__v16hi)_mm256_srai_epi16(__A, __B),
+ (__v16hi)_mm256_srai_epi16(__A, (int)__B),
(__v16hi)_mm256_setzero_si256());
}
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512vlintrin.h b/contrib/llvm-project/clang/lib/Headers/avx512vlintrin.h
index 178c9dbc0e6e..3e8355f145d6 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512vlintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512vlintrin.h
@@ -4525,7 +4525,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_slli_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
- (__v4si)_mm_slli_epi32(__A, __B),
+ (__v4si)_mm_slli_epi32(__A, (int)__B),
(__v4si)__W);
}
@@ -4533,7 +4533,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_slli_epi32(__mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
- (__v4si)_mm_slli_epi32(__A, __B),
+ (__v4si)_mm_slli_epi32(__A, (int)__B),
(__v4si)_mm_setzero_si128());
}
@@ -4541,7 +4541,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_slli_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
- (__v8si)_mm256_slli_epi32(__A, __B),
+ (__v8si)_mm256_slli_epi32(__A, (int)__B),
(__v8si)__W);
}
@@ -4549,7 +4549,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_slli_epi32(__mmask8 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
- (__v8si)_mm256_slli_epi32(__A, __B),
+ (__v8si)_mm256_slli_epi32(__A, (int)__B),
(__v8si)_mm256_setzero_si256());
}
@@ -4589,7 +4589,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_slli_epi64(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
- (__v2di)_mm_slli_epi64(__A, __B),
+ (__v2di)_mm_slli_epi64(__A, (int)__B),
(__v2di)__W);
}
@@ -4597,7 +4597,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_slli_epi64(__mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
- (__v2di)_mm_slli_epi64(__A, __B),
+ (__v2di)_mm_slli_epi64(__A, (int)__B),
(__v2di)_mm_setzero_si128());
}
@@ -4605,7 +4605,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_slli_epi64(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
- (__v4di)_mm256_slli_epi64(__A, __B),
+ (__v4di)_mm256_slli_epi64(__A, (int)__B),
(__v4di)__W);
}
@@ -4613,7 +4613,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_slli_epi64(__mmask8 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
- (__v4di)_mm256_slli_epi64(__A, __B),
+ (__v4di)_mm256_slli_epi64(__A, (int)__B),
(__v4di)_mm256_setzero_si256());
}
@@ -4869,7 +4869,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_srli_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
- (__v4si)_mm_srli_epi32(__A, __B),
+ (__v4si)_mm_srli_epi32(__A, (int)__B),
(__v4si)__W);
}
@@ -4877,7 +4877,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_srli_epi32(__mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
- (__v4si)_mm_srli_epi32(__A, __B),
+ (__v4si)_mm_srli_epi32(__A, (int)__B),
(__v4si)_mm_setzero_si128());
}
@@ -4885,7 +4885,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_srli_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
- (__v8si)_mm256_srli_epi32(__A, __B),
+ (__v8si)_mm256_srli_epi32(__A, (int)__B),
(__v8si)__W);
}
@@ -4893,7 +4893,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_srli_epi32(__mmask8 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
- (__v8si)_mm256_srli_epi32(__A, __B),
+ (__v8si)_mm256_srli_epi32(__A, (int)__B),
(__v8si)_mm256_setzero_si256());
}
@@ -4933,7 +4933,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_srli_epi64(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
- (__v2di)_mm_srli_epi64(__A, __B),
+ (__v2di)_mm_srli_epi64(__A, (int)__B),
(__v2di)__W);
}
@@ -4941,7 +4941,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_srli_epi64(__mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectq_128((__mmask8)__U,
- (__v2di)_mm_srli_epi64(__A, __B),
+ (__v2di)_mm_srli_epi64(__A, (int)__B),
(__v2di)_mm_setzero_si128());
}
@@ -4949,7 +4949,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_srli_epi64(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
- (__v4di)_mm256_srli_epi64(__A, __B),
+ (__v4di)_mm256_srli_epi64(__A, (int)__B),
(__v4di)__W);
}
@@ -4957,7 +4957,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_srli_epi64(__mmask8 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectq_256((__mmask8)__U,
- (__v4di)_mm256_srli_epi64(__A, __B),
+ (__v4di)_mm256_srli_epi64(__A, (int)__B),
(__v4di)_mm256_setzero_si256());
}
@@ -6408,7 +6408,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_mask_srai_epi32(__m128i __W, __mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
- (__v4si)_mm_srai_epi32(__A, __B),
+ (__v4si)_mm_srai_epi32(__A, (int)__B),
(__v4si)__W);
}
@@ -6416,7 +6416,7 @@ static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_maskz_srai_epi32(__mmask8 __U, __m128i __A, unsigned int __B)
{
return (__m128i)__builtin_ia32_selectd_128((__mmask8)__U,
- (__v4si)_mm_srai_epi32(__A, __B),
+ (__v4si)_mm_srai_epi32(__A, (int)__B),
(__v4si)_mm_setzero_si128());
}
@@ -6424,7 +6424,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_mask_srai_epi32(__m256i __W, __mmask8 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
- (__v8si)_mm256_srai_epi32(__A, __B),
+ (__v8si)_mm256_srai_epi32(__A, (int)__B),
(__v8si)__W);
}
@@ -6432,7 +6432,7 @@ static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_maskz_srai_epi32(__mmask8 __U, __m256i __A, unsigned int __B)
{
return (__m256i)__builtin_ia32_selectd_256((__mmask8)__U,
- (__v8si)_mm256_srai_epi32(__A, __B),
+ (__v8si)_mm256_srai_epi32(__A, (int)__B),
(__v8si)_mm256_setzero_si256());
}
@@ -6483,7 +6483,7 @@ _mm256_maskz_sra_epi64(__mmask8 __U, __m256i __A, __m128i __B)
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_srai_epi64(__m128i __A, unsigned int __imm)
{
- return (__m128i)__builtin_ia32_psraqi128((__v2di)__A, __imm);
+ return (__m128i)__builtin_ia32_psraqi128((__v2di)__A, (int)__imm);
}
static __inline__ __m128i __DEFAULT_FN_ATTRS128
@@ -6505,7 +6505,7 @@ _mm_maskz_srai_epi64(__mmask8 __U, __m128i __A, unsigned int __imm)
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_srai_epi64(__m256i __A, unsigned int __imm)
{
- return (__m256i)__builtin_ia32_psraqi256((__v4di)__A, __imm);
+ return (__m256i)__builtin_ia32_psraqi256((__v4di)__A, (int)__imm);
}
static __inline__ __m256i __DEFAULT_FN_ATTRS256
@@ -6525,79 +6525,65 @@ _mm256_maskz_srai_epi64(__mmask8 __U, __m256i __A, unsigned int __imm)
(__v4di)_mm256_setzero_si256());
}
-#define _mm_ternarylogic_epi32(A, B, C, imm) \
- ((__m128i)__builtin_ia32_pternlogd128_mask((__v4si)(__m128i)(A), \
- (__v4si)(__m128i)(B), \
- (__v4si)(__m128i)(C), (int)(imm), \
- (__mmask8)-1))
-
-#define _mm_mask_ternarylogic_epi32(A, U, B, C, imm) \
- ((__m128i)__builtin_ia32_pternlogd128_mask((__v4si)(__m128i)(A), \
- (__v4si)(__m128i)(B), \
- (__v4si)(__m128i)(C), (int)(imm), \
- (__mmask8)(U)))
-
-#define _mm_maskz_ternarylogic_epi32(U, A, B, C, imm) \
- ((__m128i)__builtin_ia32_pternlogd128_maskz((__v4si)(__m128i)(A), \
- (__v4si)(__m128i)(B), \
- (__v4si)(__m128i)(C), (int)(imm), \
- (__mmask8)(U)))
-
-#define _mm256_ternarylogic_epi32(A, B, C, imm) \
- ((__m256i)__builtin_ia32_pternlogd256_mask((__v8si)(__m256i)(A), \
- (__v8si)(__m256i)(B), \
- (__v8si)(__m256i)(C), (int)(imm), \
- (__mmask8)-1))
-
-#define _mm256_mask_ternarylogic_epi32(A, U, B, C, imm) \
- ((__m256i)__builtin_ia32_pternlogd256_mask((__v8si)(__m256i)(A), \
- (__v8si)(__m256i)(B), \
- (__v8si)(__m256i)(C), (int)(imm), \
- (__mmask8)(U)))
-
-#define _mm256_maskz_ternarylogic_epi32(U, A, B, C, imm) \
- ((__m256i)__builtin_ia32_pternlogd256_maskz((__v8si)(__m256i)(A), \
- (__v8si)(__m256i)(B), \
- (__v8si)(__m256i)(C), (int)(imm), \
- (__mmask8)(U)))
-
-#define _mm_ternarylogic_epi64(A, B, C, imm) \
- ((__m128i)__builtin_ia32_pternlogq128_mask((__v2di)(__m128i)(A), \
- (__v2di)(__m128i)(B), \
- (__v2di)(__m128i)(C), (int)(imm), \
- (__mmask8)-1))
-
-#define _mm_mask_ternarylogic_epi64(A, U, B, C, imm) \
- ((__m128i)__builtin_ia32_pternlogq128_mask((__v2di)(__m128i)(A), \
- (__v2di)(__m128i)(B), \
- (__v2di)(__m128i)(C), (int)(imm), \
- (__mmask8)(U)))
-
-#define _mm_maskz_ternarylogic_epi64(U, A, B, C, imm) \
- ((__m128i)__builtin_ia32_pternlogq128_maskz((__v2di)(__m128i)(A), \
- (__v2di)(__m128i)(B), \
- (__v2di)(__m128i)(C), (int)(imm), \
- (__mmask8)(U)))
-
-#define _mm256_ternarylogic_epi64(A, B, C, imm) \
- ((__m256i)__builtin_ia32_pternlogq256_mask((__v4di)(__m256i)(A), \
- (__v4di)(__m256i)(B), \
- (__v4di)(__m256i)(C), (int)(imm), \
- (__mmask8)-1))
-
-#define _mm256_mask_ternarylogic_epi64(A, U, B, C, imm) \
- ((__m256i)__builtin_ia32_pternlogq256_mask((__v4di)(__m256i)(A), \
- (__v4di)(__m256i)(B), \
- (__v4di)(__m256i)(C), (int)(imm), \
- (__mmask8)(U)))
-
-#define _mm256_maskz_ternarylogic_epi64(U, A, B, C, imm) \
- ((__m256i)__builtin_ia32_pternlogq256_maskz((__v4di)(__m256i)(A), \
- (__v4di)(__m256i)(B), \
- (__v4di)(__m256i)(C), (int)(imm), \
- (__mmask8)(U)))
-
-
+#define _mm_ternarylogic_epi32(A, B, C, imm) \
+ ((__m128i)__builtin_ia32_pternlogd128_mask( \
+ (__v4si)(__m128i)(A), (__v4si)(__m128i)(B), (__v4si)(__m128i)(C), \
+ (unsigned char)(imm), (__mmask8)-1))
+
+#define _mm_mask_ternarylogic_epi32(A, U, B, C, imm) \
+ ((__m128i)__builtin_ia32_pternlogd128_mask( \
+ (__v4si)(__m128i)(A), (__v4si)(__m128i)(B), (__v4si)(__m128i)(C), \
+ (unsigned char)(imm), (__mmask8)(U)))
+
+#define _mm_maskz_ternarylogic_epi32(U, A, B, C, imm) \
+ ((__m128i)__builtin_ia32_pternlogd128_maskz( \
+ (__v4si)(__m128i)(A), (__v4si)(__m128i)(B), (__v4si)(__m128i)(C), \
+ (unsigned char)(imm), (__mmask8)(U)))
+
+#define _mm256_ternarylogic_epi32(A, B, C, imm) \
+ ((__m256i)__builtin_ia32_pternlogd256_mask( \
+ (__v8si)(__m256i)(A), (__v8si)(__m256i)(B), (__v8si)(__m256i)(C), \
+ (unsigned char)(imm), (__mmask8)-1))
+
+#define _mm256_mask_ternarylogic_epi32(A, U, B, C, imm) \
+ ((__m256i)__builtin_ia32_pternlogd256_mask( \
+ (__v8si)(__m256i)(A), (__v8si)(__m256i)(B), (__v8si)(__m256i)(C), \
+ (unsigned char)(imm), (__mmask8)(U)))
+
+#define _mm256_maskz_ternarylogic_epi32(U, A, B, C, imm) \
+ ((__m256i)__builtin_ia32_pternlogd256_maskz( \
+ (__v8si)(__m256i)(A), (__v8si)(__m256i)(B), (__v8si)(__m256i)(C), \
+ (unsigned char)(imm), (__mmask8)(U)))
+
+#define _mm_ternarylogic_epi64(A, B, C, imm) \
+ ((__m128i)__builtin_ia32_pternlogq128_mask( \
+ (__v2di)(__m128i)(A), (__v2di)(__m128i)(B), (__v2di)(__m128i)(C), \
+ (unsigned char)(imm), (__mmask8)-1))
+
+#define _mm_mask_ternarylogic_epi64(A, U, B, C, imm) \
+ ((__m128i)__builtin_ia32_pternlogq128_mask( \
+ (__v2di)(__m128i)(A), (__v2di)(__m128i)(B), (__v2di)(__m128i)(C), \
+ (unsigned char)(imm), (__mmask8)(U)))
+
+#define _mm_maskz_ternarylogic_epi64(U, A, B, C, imm) \
+ ((__m128i)__builtin_ia32_pternlogq128_maskz( \
+ (__v2di)(__m128i)(A), (__v2di)(__m128i)(B), (__v2di)(__m128i)(C), \
+ (unsigned char)(imm), (__mmask8)(U)))
+
+#define _mm256_ternarylogic_epi64(A, B, C, imm) \
+ ((__m256i)__builtin_ia32_pternlogq256_mask( \
+ (__v4di)(__m256i)(A), (__v4di)(__m256i)(B), (__v4di)(__m256i)(C), \
+ (unsigned char)(imm), (__mmask8)-1))
+
+#define _mm256_mask_ternarylogic_epi64(A, U, B, C, imm) \
+ ((__m256i)__builtin_ia32_pternlogq256_mask( \
+ (__v4di)(__m256i)(A), (__v4di)(__m256i)(B), (__v4di)(__m256i)(C), \
+ (unsigned char)(imm), (__mmask8)(U)))
+
+#define _mm256_maskz_ternarylogic_epi64(U, A, B, C, imm) \
+ ((__m256i)__builtin_ia32_pternlogq256_maskz( \
+ (__v4di)(__m256i)(A), (__v4di)(__m256i)(B), (__v4di)(__m256i)(C), \
+ (unsigned char)(imm), (__mmask8)(U)))
#define _mm256_shuffle_f32x4(A, B, imm) \
((__m256)__builtin_ia32_shuf_f32x4_256((__v8sf)(__m256)(A), \
diff --git a/contrib/llvm-project/clang/lib/Headers/avx512vlvnniintrin.h b/contrib/llvm-project/clang/lib/Headers/avx512vlvnniintrin.h
index 0fb29af262f7..8bc0694e64a0 100644
--- a/contrib/llvm-project/clang/lib/Headers/avx512vlvnniintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avx512vlvnniintrin.h
@@ -25,7 +25,7 @@
///
/// This intrinsic corresponds to the <c> VPDPBUSD </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 7
/// tmp1.word := Signed(ZeroExtend16(A.byte[4*j]) * SignExtend16(B.byte[4*j]))
/// tmp2.word := Signed(ZeroExtend16(A.byte[4*j+1]) * SignExtend16(B.byte[4*j+1]))
@@ -34,7 +34,7 @@
/// DST.dword[j] := S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4
/// ENDFOR
/// DST[MAX:256] := 0
-/// \endoperation
+/// \endcode
#define _mm256_dpbusd_epi32(S, A, B) \
((__m256i)__builtin_ia32_vpdpbusd256((__v8si)(S), (__v8si)(A), (__v8si)(B)))
@@ -45,7 +45,7 @@
///
/// This intrinsic corresponds to the <c> VPDPBUSDS </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 7
/// tmp1.word := Signed(ZeroExtend16(A.byte[4*j]) * SignExtend16(B.byte[4*j]))
/// tmp2.word := Signed(ZeroExtend16(A.byte[4*j+1]) * SignExtend16(B.byte[4*j+1]))
@@ -54,7 +54,7 @@
/// DST.dword[j] := Saturate32(S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)
/// ENDFOR
/// DST[MAX:256] := 0
-/// \endoperation
+/// \endcode
#define _mm256_dpbusds_epi32(S, A, B) \
((__m256i)__builtin_ia32_vpdpbusds256((__v8si)(S), (__v8si)(A), (__v8si)(B)))
@@ -65,14 +65,14 @@
///
/// This intrinsic corresponds to the <c> VPDPWSSD </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 7
/// tmp1.dword := SignExtend32(A.word[2*j]) * SignExtend32(B.word[2*j])
/// tmp2.dword := SignExtend32(A.word[2*j+1]) * SignExtend32(B.word[2*j+1])
/// DST.dword[j] := S.dword[j] + tmp1 + tmp2
/// ENDFOR
/// DST[MAX:256] := 0
-/// \endoperation
+/// \endcode
#define _mm256_dpwssd_epi32(S, A, B) \
((__m256i)__builtin_ia32_vpdpwssd256((__v8si)(S), (__v8si)(A), (__v8si)(B)))
@@ -83,14 +83,14 @@
///
/// This intrinsic corresponds to the <c> VPDPWSSDS </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 7
/// tmp1.dword := SignExtend32(A.word[2*j]) * SignExtend32(B.word[2*j])
/// tmp2.dword := SignExtend32(A.word[2*j+1]) * SignExtend32(B.word[2*j+1])
/// DST.dword[j] := Saturate32(S.dword[j] + tmp1 + tmp2)
/// ENDFOR
/// DST[MAX:256] := 0
-/// \endoperation
+/// \endcode
#define _mm256_dpwssds_epi32(S, A, B) \
((__m256i)__builtin_ia32_vpdpwssds256((__v8si)(S), (__v8si)(A), (__v8si)(B)))
@@ -101,7 +101,7 @@
///
/// This intrinsic corresponds to the <c> VPDPBUSD </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 3
/// tmp1.word := Signed(ZeroExtend16(A.byte[4*j]) * SignExtend16(B.byte[4*j]))
/// tmp2.word := Signed(ZeroExtend16(A.byte[4*j+1]) * SignExtend16(B.byte[4*j+1]))
@@ -110,7 +110,7 @@
/// DST.dword[j] := S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4
/// ENDFOR
/// DST[MAX:128] := 0
-/// \endoperation
+/// \endcode
#define _mm_dpbusd_epi32(S, A, B) \
((__m128i)__builtin_ia32_vpdpbusd128((__v4si)(S), (__v4si)(A), (__v4si)(B)))
@@ -121,7 +121,7 @@
///
/// This intrinsic corresponds to the <c> VPDPBUSDS </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 3
/// tmp1.word := Signed(ZeroExtend16(A.byte[4*j]) * SignExtend16(B.byte[4*j]))
/// tmp2.word := Signed(ZeroExtend16(A.byte[4*j+1]) * SignExtend16(B.byte[4*j+1]))
@@ -130,7 +130,7 @@
/// DST.dword[j] := Saturate32(S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)
/// ENDFOR
/// DST[MAX:128] := 0
-/// \endoperation
+/// \endcode
#define _mm_dpbusds_epi32(S, A, B) \
((__m128i)__builtin_ia32_vpdpbusds128((__v4si)(S), (__v4si)(A), (__v4si)(B)))
@@ -141,14 +141,14 @@
///
/// This intrinsic corresponds to the <c> VPDPWSSD </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 3
/// tmp1.dword := SignExtend32(A.word[2*j]) * SignExtend32(B.word[2*j])
/// tmp2.dword := SignExtend32(A.word[2*j+1]) * SignExtend32(B.word[2*j+1])
/// DST.dword[j] := S.dword[j] + tmp1 + tmp2
/// ENDFOR
/// DST[MAX:128] := 0
-/// \endoperation
+/// \endcode
#define _mm_dpwssd_epi32(S, A, B) \
((__m128i)__builtin_ia32_vpdpwssd128((__v4si)(S), (__v4si)(A), (__v4si)(B)))
@@ -159,14 +159,14 @@
///
/// This intrinsic corresponds to the <c> VPDPWSSDS </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 3
/// tmp1.dword := SignExtend32(A.word[2*j]) * SignExtend32(B.word[2*j])
/// tmp2.dword := SignExtend32(A.word[2*j+1]) * SignExtend32(B.word[2*j+1])
/// DST.dword[j] := Saturate32(S.dword[j] + tmp1 + tmp2)
/// ENDFOR
/// DST[MAX:128] := 0
-/// \endoperation
+/// \endcode
#define _mm_dpwssds_epi32(S, A, B) \
((__m128i)__builtin_ia32_vpdpwssds128((__v4si)(S), (__v4si)(A), (__v4si)(B)))
diff --git a/contrib/llvm-project/clang/lib/Headers/avxintrin.h b/contrib/llvm-project/clang/lib/Headers/avxintrin.h
index 17fe63691177..a8f953c260c2 100644
--- a/contrib/llvm-project/clang/lib/Headers/avxintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avxintrin.h
@@ -1504,7 +1504,10 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
/// 00: Bits [31:0] and [159:128] are copied from the selected operand. \n
/// 01: Bits [63:32] and [191:160] are copied from the selected operand. \n
/// 10: Bits [95:64] and [223:192] are copied from the selected operand. \n
-/// 11: Bits [127:96] and [255:224] are copied from the selected operand.
+/// 11: Bits [127:96] and [255:224] are copied from the selected operand. \n
+/// Note: To generate a mask, you can use the \c _MM_SHUFFLE macro.
+/// <c>_MM_SHUFFLE(b6, b4, b2, b0)</c> can create an 8-bit mask of the form
+/// <c>[b6, b4, b2, b0]</c>.
/// \returns A 256-bit vector of [8 x float] containing the shuffled values.
#define _mm256_shuffle_ps(a, b, mask) \
((__m256)__builtin_ia32_shufps256((__v8sf)(__m256)(a), \
@@ -1953,12 +1956,16 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
///
/// \headerfile <x86intrin.h>
///
+/// \code
+/// int _mm256_extract_epi32(__m256i X, const int N);
+/// \endcode
+///
/// This intrinsic corresponds to the <c> VEXTRACTF128+COMPOSITE </c>
/// instruction.
///
-/// \param __a
+/// \param X
/// A 256-bit vector of [8 x i32].
-/// \param __imm
+/// \param N
/// An immediate integer operand with bits [2:0] determining which vector
/// element is extracted and returned.
/// \returns A 32-bit integer containing the extracted 32 bits of extended
@@ -1971,12 +1978,16 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
///
/// \headerfile <x86intrin.h>
///
+/// \code
+/// int _mm256_extract_epi16(__m256i X, const int N);
+/// \endcode
+///
/// This intrinsic corresponds to the <c> VEXTRACTF128+COMPOSITE </c>
/// instruction.
///
-/// \param __a
+/// \param X
/// A 256-bit integer vector of [16 x i16].
-/// \param __imm
+/// \param N
/// An immediate integer operand with bits [3:0] determining which vector
/// element is extracted and returned.
/// \returns A 32-bit integer containing the extracted 16 bits of zero extended
@@ -1990,12 +2001,16 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
///
/// \headerfile <x86intrin.h>
///
+/// \code
+/// int _mm256_extract_epi8(__m256i X, const int N);
+/// \endcode
+///
/// This intrinsic corresponds to the <c> VEXTRACTF128+COMPOSITE </c>
/// instruction.
///
-/// \param __a
+/// \param X
/// A 256-bit integer vector of [32 x i8].
-/// \param __imm
+/// \param N
/// An immediate integer operand with bits [4:0] determining which vector
/// element is extracted and returned.
/// \returns A 32-bit integer containing the extracted 8 bits of zero extended
@@ -2010,12 +2025,16 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
///
/// \headerfile <x86intrin.h>
///
+/// \code
+/// long long _mm256_extract_epi64(__m256i X, const int N);
+/// \endcode
+///
/// This intrinsic corresponds to the <c> VEXTRACTF128+COMPOSITE </c>
/// instruction.
///
-/// \param __a
+/// \param X
/// A 256-bit integer vector of [4 x i64].
-/// \param __imm
+/// \param N
/// An immediate integer operand with bits [1:0] determining which vector
/// element is extracted and returned.
/// \returns A 64-bit integer containing the extracted 64 bits of extended
@@ -2030,18 +2049,22 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
///
/// \headerfile <x86intrin.h>
///
+/// \code
+/// __m256i _mm256_insert_epi32(__m256i X, int I, const int N);
+/// \endcode
+///
/// This intrinsic corresponds to the <c> VINSERTF128+COMPOSITE </c>
/// instruction.
///
-/// \param __a
+/// \param X
/// A vector of [8 x i32] to be used by the insert operation.
-/// \param __b
+/// \param I
/// An integer value. The replacement value for the insert operation.
-/// \param __imm
+/// \param N
/// An immediate integer specifying the index of the vector element to be
/// replaced.
-/// \returns A copy of vector \a __a, after replacing its element indexed by
-/// \a __imm with \a __b.
+/// \returns A copy of vector \a X, after replacing its element indexed by
+/// \a N with \a I.
#define _mm256_insert_epi32(X, I, N) \
((__m256i)__builtin_ia32_vec_set_v8si((__v8si)(__m256i)(X), \
(int)(I), (int)(N)))
@@ -2053,18 +2076,22 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
///
/// \headerfile <x86intrin.h>
///
+/// \code
+/// __m256i _mm256_insert_epi16(__m256i X, int I, const int N);
+/// \endcode
+///
/// This intrinsic corresponds to the <c> VINSERTF128+COMPOSITE </c>
/// instruction.
///
-/// \param __a
+/// \param X
/// A vector of [16 x i16] to be used by the insert operation.
-/// \param __b
+/// \param I
/// An i16 integer value. The replacement value for the insert operation.
-/// \param __imm
+/// \param N
/// An immediate integer specifying the index of the vector element to be
/// replaced.
-/// \returns A copy of vector \a __a, after replacing its element indexed by
-/// \a __imm with \a __b.
+/// \returns A copy of vector \a X, after replacing its element indexed by
+/// \a N with \a I.
#define _mm256_insert_epi16(X, I, N) \
((__m256i)__builtin_ia32_vec_set_v16hi((__v16hi)(__m256i)(X), \
(int)(I), (int)(N)))
@@ -2075,18 +2102,22 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
///
/// \headerfile <x86intrin.h>
///
+/// \code
+/// __m256i _mm256_insert_epi8(__m256i X, int I, const int N);
+/// \endcode
+///
/// This intrinsic corresponds to the <c> VINSERTF128+COMPOSITE </c>
/// instruction.
///
-/// \param __a
+/// \param X
/// A vector of [32 x i8] to be used by the insert operation.
-/// \param __b
+/// \param I
/// An i8 integer value. The replacement value for the insert operation.
-/// \param __imm
+/// \param N
/// An immediate integer specifying the index of the vector element to be
/// replaced.
-/// \returns A copy of vector \a __a, after replacing its element indexed by
-/// \a __imm with \a __b.
+/// \returns A copy of vector \a X, after replacing its element indexed by
+/// \a N with \a I.
#define _mm256_insert_epi8(X, I, N) \
((__m256i)__builtin_ia32_vec_set_v32qi((__v32qi)(__m256i)(X), \
(int)(I), (int)(N)))
@@ -2098,18 +2129,22 @@ _mm256_blendv_ps(__m256 __a, __m256 __b, __m256 __c)
///
/// \headerfile <x86intrin.h>
///
+/// \code
+/// __m256i _mm256_insert_epi64(__m256i X, int I, const int N);
+/// \endcode
+///
/// This intrinsic corresponds to the <c> VINSERTF128+COMPOSITE </c>
/// instruction.
///
-/// \param __a
+/// \param X
/// A vector of [4 x i64] to be used by the insert operation.
-/// \param __b
+/// \param I
/// A 64-bit integer value. The replacement value for the insert operation.
-/// \param __imm
+/// \param N
/// An immediate integer specifying the index of the vector element to be
/// replaced.
-/// \returns A copy of vector \a __a, after replacing its element indexed by
-/// \a __imm with \a __b.
+/// \returns A copy of vector \a X, after replacing its element indexed by
+/// \a N with \a I.
#define _mm256_insert_epi64(X, I, N) \
((__m256i)__builtin_ia32_vec_set_v4di((__v4di)(__m256i)(X), \
(long long)(I), (int)(N)))
@@ -3177,7 +3212,7 @@ _mm256_loadu_si256(__m256i_u const *__p)
/// A pointer to a 256-bit integer vector containing integer values.
/// \returns A 256-bit integer vector containing the moved values.
static __inline __m256i __DEFAULT_FN_ATTRS
-_mm256_lddqu_si256(__m256i const *__p)
+_mm256_lddqu_si256(__m256i_u const *__p)
{
return (__m256i)__builtin_ia32_lddqu256((char const *)__p);
}
diff --git a/contrib/llvm-project/clang/lib/Headers/avxvnniintrin.h b/contrib/llvm-project/clang/lib/Headers/avxvnniintrin.h
index ad45cb7962e5..b7de562b57c0 100644
--- a/contrib/llvm-project/clang/lib/Headers/avxvnniintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/avxvnniintrin.h
@@ -50,7 +50,7 @@
///
/// This intrinsic corresponds to the <c> VPDPBUSD </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 7
/// tmp1.word := Signed(ZeroExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j]))
/// tmp2.word := Signed(ZeroExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1]))
@@ -59,7 +59,7 @@
/// DST.dword[j] := __S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4
/// ENDFOR
/// DST[MAX:256] := 0
-/// \endoperation
+/// \endcode
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_dpbusd_avx_epi32(__m256i __S, __m256i __A, __m256i __B)
{
@@ -73,7 +73,7 @@ _mm256_dpbusd_avx_epi32(__m256i __S, __m256i __A, __m256i __B)
///
/// This intrinsic corresponds to the <c> VPDPBUSDS </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 7
/// tmp1.word := Signed(ZeroExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j]))
/// tmp2.word := Signed(ZeroExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1]))
@@ -82,7 +82,7 @@ _mm256_dpbusd_avx_epi32(__m256i __S, __m256i __A, __m256i __B)
/// DST.dword[j] := Saturate32(__S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)
/// ENDFOR
/// DST[MAX:256] := 0
-/// \endoperation
+/// \endcode
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_dpbusds_avx_epi32(__m256i __S, __m256i __A, __m256i __B)
{
@@ -96,14 +96,14 @@ _mm256_dpbusds_avx_epi32(__m256i __S, __m256i __A, __m256i __B)
///
/// This intrinsic corresponds to the <c> VPDPWSSD </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 7
/// tmp1.dword := SignExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])
/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])
/// DST.dword[j] := __S.dword[j] + tmp1 + tmp2
/// ENDFOR
/// DST[MAX:256] := 0
-/// \endoperation
+/// \endcode
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_dpwssd_avx_epi32(__m256i __S, __m256i __A, __m256i __B)
{
@@ -117,14 +117,14 @@ _mm256_dpwssd_avx_epi32(__m256i __S, __m256i __A, __m256i __B)
///
/// This intrinsic corresponds to the <c> VPDPWSSDS </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 7
/// tmp1.dword := SignExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])
/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])
/// DST.dword[j] := Saturate32(__S.dword[j] + tmp1 + tmp2)
/// ENDFOR
/// DST[MAX:256] := 0
-/// \endoperation
+/// \endcode
static __inline__ __m256i __DEFAULT_FN_ATTRS256
_mm256_dpwssds_avx_epi32(__m256i __S, __m256i __A, __m256i __B)
{
@@ -138,7 +138,7 @@ _mm256_dpwssds_avx_epi32(__m256i __S, __m256i __A, __m256i __B)
///
/// This intrinsic corresponds to the <c> VPDPBUSD </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 3
/// tmp1.word := Signed(ZeroExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j]))
/// tmp2.word := Signed(ZeroExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1]))
@@ -147,7 +147,7 @@ _mm256_dpwssds_avx_epi32(__m256i __S, __m256i __A, __m256i __B)
/// DST.dword[j] := __S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4
/// ENDFOR
/// DST[MAX:128] := 0
-/// \endoperation
+/// \endcode
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_dpbusd_avx_epi32(__m128i __S, __m128i __A, __m128i __B)
{
@@ -161,7 +161,7 @@ _mm_dpbusd_avx_epi32(__m128i __S, __m128i __A, __m128i __B)
///
/// This intrinsic corresponds to the <c> VPDPBUSDS </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 3
/// tmp1.word := Signed(ZeroExtend16(__A.byte[4*j]) * SignExtend16(__B.byte[4*j]))
/// tmp2.word := Signed(ZeroExtend16(__A.byte[4*j+1]) * SignExtend16(__B.byte[4*j+1]))
@@ -170,7 +170,7 @@ _mm_dpbusd_avx_epi32(__m128i __S, __m128i __A, __m128i __B)
/// DST.dword[j] := Saturate32(__S.dword[j] + tmp1 + tmp2 + tmp3 + tmp4)
/// ENDFOR
/// DST[MAX:128] := 0
-/// \endoperation
+/// \endcode
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_dpbusds_avx_epi32(__m128i __S, __m128i __A, __m128i __B)
{
@@ -184,14 +184,14 @@ _mm_dpbusds_avx_epi32(__m128i __S, __m128i __A, __m128i __B)
///
/// This intrinsic corresponds to the <c> VPDPWSSD </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 3
/// tmp1.dword := SignExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])
/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])
/// DST.dword[j] := __S.dword[j] + tmp1 + tmp2
/// ENDFOR
/// DST[MAX:128] := 0
-/// \endoperation
+/// \endcode
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_dpwssd_avx_epi32(__m128i __S, __m128i __A, __m128i __B)
{
@@ -205,14 +205,14 @@ _mm_dpwssd_avx_epi32(__m128i __S, __m128i __A, __m128i __B)
///
/// This intrinsic corresponds to the <c> VPDPWSSDS </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// FOR j := 0 to 3
/// tmp1.dword := SignExtend32(__A.word[2*j]) * SignExtend32(__B.word[2*j])
/// tmp2.dword := SignExtend32(__A.word[2*j+1]) * SignExtend32(__B.word[2*j+1])
/// DST.dword[j] := Saturate32(__S.dword[j] + tmp1 + tmp2)
/// ENDFOR
/// DST[MAX:128] := 0
-/// \endoperation
+/// \endcode
static __inline__ __m128i __DEFAULT_FN_ATTRS128
_mm_dpwssds_avx_epi32(__m128i __S, __m128i __A, __m128i __B)
{
diff --git a/contrib/llvm-project/clang/lib/Headers/bmiintrin.h b/contrib/llvm-project/clang/lib/Headers/bmiintrin.h
index f583c215f919..ffb94bea639a 100644
--- a/contrib/llvm-project/clang/lib/Headers/bmiintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/bmiintrin.h
@@ -47,6 +47,7 @@ __tzcnt_u16(unsigned short __X)
/// An unsigned 32-bit integer whose trailing zeros are to be counted.
/// \returns An unsigned 32-bit integer containing the number of trailing zero
/// bits in the operand.
+/// \see _mm_tzcnt_32
static __inline__ unsigned int __RELAXED_FN_ATTRS
__tzcnt_u32(unsigned int __X)
{
@@ -63,10 +64,11 @@ __tzcnt_u32(unsigned int __X)
/// An unsigned 32-bit integer whose trailing zeros are to be counted.
/// \returns An 32-bit integer containing the number of trailing zero bits in
/// the operand.
+/// \see __tzcnt_u32
static __inline__ int __RELAXED_FN_ATTRS
_mm_tzcnt_32(unsigned int __X)
{
- return __builtin_ia32_tzcnt_u32(__X);
+ return (int)__builtin_ia32_tzcnt_u32(__X);
}
#define _tzcnt_u32(a) (__tzcnt_u32((a)))
@@ -83,6 +85,7 @@ _mm_tzcnt_32(unsigned int __X)
/// An unsigned 64-bit integer whose trailing zeros are to be counted.
/// \returns An unsigned 64-bit integer containing the number of trailing zero
/// bits in the operand.
+/// \see _mm_tzcnt_64
static __inline__ unsigned long long __RELAXED_FN_ATTRS
__tzcnt_u64(unsigned long long __X)
{
@@ -99,10 +102,11 @@ __tzcnt_u64(unsigned long long __X)
/// An unsigned 64-bit integer whose trailing zeros are to be counted.
/// \returns An 64-bit integer containing the number of trailing zero bits in
/// the operand.
+/// \see __tzcnt_u64
static __inline__ long long __RELAXED_FN_ATTRS
_mm_tzcnt_64(unsigned long long __X)
{
- return __builtin_ia32_tzcnt_u64(__X);
+ return (long long)__builtin_ia32_tzcnt_u64(__X);
}
#define _tzcnt_u64(a) (__tzcnt_u64((a)))
diff --git a/contrib/llvm-project/clang/lib/Headers/cetintrin.h b/contrib/llvm-project/clang/lib/Headers/cetintrin.h
index 019cab0261e7..a68df5b1d2e7 100644
--- a/contrib/llvm-project/clang/lib/Headers/cetintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/cetintrin.h
@@ -19,7 +19,7 @@
__attribute__((__always_inline__, __nodebug__, __target__("shstk")))
static __inline__ void __DEFAULT_FN_ATTRS _incsspd(int __a) {
- __builtin_ia32_incsspd(__a);
+ __builtin_ia32_incsspd((unsigned int)__a);
}
#ifdef __x86_64__
@@ -34,7 +34,7 @@ static __inline__ void __DEFAULT_FN_ATTRS _inc_ssp(unsigned int __a) {
}
#else /* __x86_64__ */
static __inline__ void __DEFAULT_FN_ATTRS _inc_ssp(unsigned int __a) {
- __builtin_ia32_incsspd((int)__a);
+ __builtin_ia32_incsspd(__a);
}
#endif /* __x86_64__ */
@@ -42,9 +42,12 @@ static __inline__ unsigned int __DEFAULT_FN_ATTRS _rdsspd(unsigned int __a) {
return __builtin_ia32_rdsspd(__a);
}
-static __inline__ unsigned int __DEFAULT_FN_ATTRS _rdsspd_i32() {
+static __inline__ unsigned int __DEFAULT_FN_ATTRS _rdsspd_i32(void) {
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wuninitialized"
unsigned int t;
return __builtin_ia32_rdsspd(t);
+#pragma clang diagnostic pop
}
#ifdef __x86_64__
@@ -52,9 +55,12 @@ static __inline__ unsigned long long __DEFAULT_FN_ATTRS _rdsspq(unsigned long lo
return __builtin_ia32_rdsspq(__a);
}
-static __inline__ unsigned long long __DEFAULT_FN_ATTRS _rdsspq_i64() {
+static __inline__ unsigned long long __DEFAULT_FN_ATTRS _rdsspq_i64(void) {
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wuninitialized"
unsigned long long t;
return __builtin_ia32_rdsspq(t);
+#pragma clang diagnostic pop
}
#endif /* __x86_64__ */
@@ -68,7 +74,7 @@ static __inline__ unsigned int __DEFAULT_FN_ATTRS _get_ssp(void) {
}
#endif /* __x86_64__ */
-static __inline__ void __DEFAULT_FN_ATTRS _saveprevssp() {
+static __inline__ void __DEFAULT_FN_ATTRS _saveprevssp(void) {
__builtin_ia32_saveprevssp();
}
@@ -96,7 +102,7 @@ static __inline__ void __DEFAULT_FN_ATTRS _wrussq(unsigned long long __a, void *
}
#endif /* __x86_64__ */
-static __inline__ void __DEFAULT_FN_ATTRS _setssbsy() {
+static __inline__ void __DEFAULT_FN_ATTRS _setssbsy(void) {
__builtin_ia32_setssbsy();
}
diff --git a/contrib/llvm-project/clang/lib/Headers/emmintrin.h b/contrib/llvm-project/clang/lib/Headers/emmintrin.h
index 4618b808efc4..a3f56e832b32 100644
--- a/contrib/llvm-project/clang/lib/Headers/emmintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/emmintrin.h
@@ -20,16 +20,17 @@ typedef double __m128d __attribute__((__vector_size__(16), __aligned__(16)));
typedef long long __m128i __attribute__((__vector_size__(16), __aligned__(16)));
typedef double __m128d_u __attribute__((__vector_size__(16), __aligned__(1)));
-typedef long long __m128i_u __attribute__((__vector_size__(16), __aligned__(1)));
+typedef long long __m128i_u
+ __attribute__((__vector_size__(16), __aligned__(1)));
/* Type defines. */
-typedef double __v2df __attribute__ ((__vector_size__ (16)));
-typedef long long __v2di __attribute__ ((__vector_size__ (16)));
+typedef double __v2df __attribute__((__vector_size__(16)));
+typedef long long __v2di __attribute__((__vector_size__(16)));
typedef short __v8hi __attribute__((__vector_size__(16)));
typedef char __v16qi __attribute__((__vector_size__(16)));
/* Unsigned types */
-typedef unsigned long long __v2du __attribute__ ((__vector_size__ (16)));
+typedef unsigned long long __v2du __attribute__((__vector_size__(16)));
typedef unsigned short __v8hu __attribute__((__vector_size__(16)));
typedef unsigned char __v16qu __attribute__((__vector_size__(16)));
@@ -38,8 +39,12 @@ typedef unsigned char __v16qu __attribute__((__vector_size__(16)));
typedef signed char __v16qs __attribute__((__vector_size__(16)));
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse2"), __min_vector_width__(128)))
-#define __DEFAULT_FN_ATTRS_MMX __attribute__((__always_inline__, __nodebug__, __target__("mmx,sse2"), __min_vector_width__(64)))
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, __target__("sse2"), \
+ __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS_MMX \
+ __attribute__((__always_inline__, __nodebug__, __target__("mmx,sse2"), \
+ __min_vector_width__(64)))
/// Adds lower double-precision values in both operands and returns the
/// sum in the lower 64 bits of the result. The upper 64 bits of the result
@@ -56,9 +61,8 @@ typedef signed char __v16qs __attribute__((__vector_size__(16)));
/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
/// sum of the lower 64 bits of both operands. The upper 64 bits are copied
/// from the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_add_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_add_sd(__m128d __a,
+ __m128d __b) {
__a[0] += __b[0];
return __a;
}
@@ -75,9 +79,8 @@ _mm_add_sd(__m128d __a, __m128d __b)
/// A 128-bit vector of [2 x double] containing one of the source operands.
/// \returns A 128-bit vector of [2 x double] containing the sums of both
/// operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_add_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_add_pd(__m128d __a,
+ __m128d __b) {
return (__m128d)((__v2df)__a + (__v2df)__b);
}
@@ -98,9 +101,8 @@ _mm_add_pd(__m128d __a, __m128d __b)
/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
/// difference of the lower 64 bits of both operands. The upper 64 bits are
/// copied from the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_sub_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sub_sd(__m128d __a,
+ __m128d __b) {
__a[0] -= __b[0];
return __a;
}
@@ -117,9 +119,8 @@ _mm_sub_sd(__m128d __a, __m128d __b)
/// A 128-bit vector of [2 x double] containing the subtrahend.
/// \returns A 128-bit vector of [2 x double] containing the differences between
/// both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_sub_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sub_pd(__m128d __a,
+ __m128d __b) {
return (__m128d)((__v2df)__a - (__v2df)__b);
}
@@ -139,9 +140,8 @@ _mm_sub_pd(__m128d __a, __m128d __b)
/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
/// product of the lower 64 bits of both operands. The upper 64 bits are
/// copied from the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_mul_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mul_sd(__m128d __a,
+ __m128d __b) {
__a[0] *= __b[0];
return __a;
}
@@ -158,9 +158,8 @@ _mm_mul_sd(__m128d __a, __m128d __b)
/// A 128-bit vector of [2 x double] containing one of the operands.
/// \returns A 128-bit vector of [2 x double] containing the products of both
/// operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_mul_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_mul_pd(__m128d __a,
+ __m128d __b) {
return (__m128d)((__v2df)__a * (__v2df)__b);
}
@@ -181,9 +180,8 @@ _mm_mul_pd(__m128d __a, __m128d __b)
/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
/// quotient of the lower 64 bits of both operands. The upper 64 bits are
/// copied from the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_div_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_div_sd(__m128d __a,
+ __m128d __b) {
__a[0] /= __b[0];
return __a;
}
@@ -201,9 +199,8 @@ _mm_div_sd(__m128d __a, __m128d __b)
/// A 128-bit vector of [2 x double] containing the divisor.
/// \returns A 128-bit vector of [2 x double] containing the quotients of both
/// operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_div_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_div_pd(__m128d __a,
+ __m128d __b) {
return (__m128d)((__v2df)__a / (__v2df)__b);
}
@@ -226,11 +223,10 @@ _mm_div_pd(__m128d __a, __m128d __b)
/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
/// square root of the lower 64 bits of operand \a __b, and whose upper 64
/// bits are copied from the upper 64 bits of operand \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_sqrt_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sqrt_sd(__m128d __a,
+ __m128d __b) {
__m128d __c = __builtin_ia32_sqrtsd((__v2df)__b);
- return __extension__ (__m128d) { __c[0], __a[1] };
+ return __extension__(__m128d){__c[0], __a[1]};
}
/// Calculates the square root of the each of two values stored in a
@@ -244,9 +240,7 @@ _mm_sqrt_sd(__m128d __a, __m128d __b)
/// A 128-bit vector of [2 x double].
/// \returns A 128-bit vector of [2 x double] containing the square roots of the
/// values in the operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_sqrt_pd(__m128d __a)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_sqrt_pd(__m128d __a) {
return __builtin_ia32_sqrtpd((__v2df)__a);
}
@@ -268,9 +262,8 @@ _mm_sqrt_pd(__m128d __a)
/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
/// minimum value between both operands. The upper 64 bits are copied from
/// the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_min_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_min_sd(__m128d __a,
+ __m128d __b) {
return __builtin_ia32_minsd((__v2df)__a, (__v2df)__b);
}
@@ -288,9 +281,8 @@ _mm_min_sd(__m128d __a, __m128d __b)
/// A 128-bit vector of [2 x double] containing one of the operands.
/// \returns A 128-bit vector of [2 x double] containing the minimum values
/// between both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_min_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_min_pd(__m128d __a,
+ __m128d __b) {
return __builtin_ia32_minpd((__v2df)__a, (__v2df)__b);
}
@@ -312,9 +304,8 @@ _mm_min_pd(__m128d __a, __m128d __b)
/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
/// maximum value between both operands. The upper 64 bits are copied from
/// the upper 64 bits of the first source operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_max_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_max_sd(__m128d __a,
+ __m128d __b) {
return __builtin_ia32_maxsd((__v2df)__a, (__v2df)__b);
}
@@ -332,9 +323,8 @@ _mm_max_sd(__m128d __a, __m128d __b)
/// A 128-bit vector of [2 x double] containing one of the operands.
/// \returns A 128-bit vector of [2 x double] containing the maximum values
/// between both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_max_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_max_pd(__m128d __a,
+ __m128d __b) {
return __builtin_ia32_maxpd((__v2df)__a, (__v2df)__b);
}
@@ -350,9 +340,8 @@ _mm_max_pd(__m128d __a, __m128d __b)
/// A 128-bit vector of [2 x double] containing one of the source operands.
/// \returns A 128-bit vector of [2 x double] containing the bitwise AND of the
/// values between both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_and_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_and_pd(__m128d __a,
+ __m128d __b) {
return (__m128d)((__v2du)__a & (__v2du)__b);
}
@@ -371,9 +360,8 @@ _mm_and_pd(__m128d __a, __m128d __b)
/// \returns A 128-bit vector of [2 x double] containing the bitwise AND of the
/// values in the second operand and the one's complement of the first
/// operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_andnot_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_andnot_pd(__m128d __a,
+ __m128d __b) {
return (__m128d)(~(__v2du)__a & (__v2du)__b);
}
@@ -389,9 +377,8 @@ _mm_andnot_pd(__m128d __a, __m128d __b)
/// A 128-bit vector of [2 x double] containing one of the source operands.
/// \returns A 128-bit vector of [2 x double] containing the bitwise OR of the
/// values between both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_or_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_or_pd(__m128d __a,
+ __m128d __b) {
return (__m128d)((__v2du)__a | (__v2du)__b);
}
@@ -407,9 +394,8 @@ _mm_or_pd(__m128d __a, __m128d __b)
/// A 128-bit vector of [2 x double] containing one of the source operands.
/// \returns A 128-bit vector of [2 x double] containing the bitwise XOR of the
/// values between both operands.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_xor_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_xor_pd(__m128d __a,
+ __m128d __b) {
return (__m128d)((__v2du)__a ^ (__v2du)__b);
}
@@ -426,9 +412,8 @@ _mm_xor_pd(__m128d __a, __m128d __b)
/// \param __b
/// A 128-bit vector of [2 x double].
/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpeq_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpeq_pd(__m128d __a,
+ __m128d __b) {
return (__m128d)__builtin_ia32_cmpeqpd((__v2df)__a, (__v2df)__b);
}
@@ -446,9 +431,8 @@ _mm_cmpeq_pd(__m128d __a, __m128d __b)
/// \param __b
/// A 128-bit vector of [2 x double].
/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmplt_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmplt_pd(__m128d __a,
+ __m128d __b) {
return (__m128d)__builtin_ia32_cmpltpd((__v2df)__a, (__v2df)__b);
}
@@ -467,9 +451,8 @@ _mm_cmplt_pd(__m128d __a, __m128d __b)
/// \param __b
/// A 128-bit vector of [2 x double].
/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmple_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmple_pd(__m128d __a,
+ __m128d __b) {
return (__m128d)__builtin_ia32_cmplepd((__v2df)__a, (__v2df)__b);
}
@@ -488,9 +471,8 @@ _mm_cmple_pd(__m128d __a, __m128d __b)
/// \param __b
/// A 128-bit vector of [2 x double].
/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpgt_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpgt_pd(__m128d __a,
+ __m128d __b) {
return (__m128d)__builtin_ia32_cmpltpd((__v2df)__b, (__v2df)__a);
}
@@ -509,9 +491,8 @@ _mm_cmpgt_pd(__m128d __a, __m128d __b)
/// \param __b
/// A 128-bit vector of [2 x double].
/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpge_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpge_pd(__m128d __a,
+ __m128d __b) {
return (__m128d)__builtin_ia32_cmplepd((__v2df)__b, (__v2df)__a);
}
@@ -532,9 +513,8 @@ _mm_cmpge_pd(__m128d __a, __m128d __b)
/// \param __b
/// A 128-bit vector of [2 x double].
/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpord_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpord_pd(__m128d __a,
+ __m128d __b) {
return (__m128d)__builtin_ia32_cmpordpd((__v2df)__a, (__v2df)__b);
}
@@ -556,9 +536,8 @@ _mm_cmpord_pd(__m128d __a, __m128d __b)
/// \param __b
/// A 128-bit vector of [2 x double].
/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpunord_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpunord_pd(__m128d __a,
+ __m128d __b) {
return (__m128d)__builtin_ia32_cmpunordpd((__v2df)__a, (__v2df)__b);
}
@@ -577,9 +556,8 @@ _mm_cmpunord_pd(__m128d __a, __m128d __b)
/// \param __b
/// A 128-bit vector of [2 x double].
/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpneq_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpneq_pd(__m128d __a,
+ __m128d __b) {
return (__m128d)__builtin_ia32_cmpneqpd((__v2df)__a, (__v2df)__b);
}
@@ -598,9 +576,8 @@ _mm_cmpneq_pd(__m128d __a, __m128d __b)
/// \param __b
/// A 128-bit vector of [2 x double].
/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnlt_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnlt_pd(__m128d __a,
+ __m128d __b) {
return (__m128d)__builtin_ia32_cmpnltpd((__v2df)__a, (__v2df)__b);
}
@@ -619,9 +596,8 @@ _mm_cmpnlt_pd(__m128d __a, __m128d __b)
/// \param __b
/// A 128-bit vector of [2 x double].
/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnle_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnle_pd(__m128d __a,
+ __m128d __b) {
return (__m128d)__builtin_ia32_cmpnlepd((__v2df)__a, (__v2df)__b);
}
@@ -640,9 +616,8 @@ _mm_cmpnle_pd(__m128d __a, __m128d __b)
/// \param __b
/// A 128-bit vector of [2 x double].
/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpngt_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpngt_pd(__m128d __a,
+ __m128d __b) {
return (__m128d)__builtin_ia32_cmpnltpd((__v2df)__b, (__v2df)__a);
}
@@ -661,9 +636,8 @@ _mm_cmpngt_pd(__m128d __a, __m128d __b)
/// \param __b
/// A 128-bit vector of [2 x double].
/// \returns A 128-bit vector containing the comparison results.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnge_pd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnge_pd(__m128d __a,
+ __m128d __b) {
return (__m128d)__builtin_ia32_cmpnlepd((__v2df)__b, (__v2df)__a);
}
@@ -684,9 +658,8 @@ _mm_cmpnge_pd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns A 128-bit vector. The lower 64 bits contains the comparison
/// results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpeq_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpeq_sd(__m128d __a,
+ __m128d __b) {
return (__m128d)__builtin_ia32_cmpeqsd((__v2df)__a, (__v2df)__b);
}
@@ -709,9 +682,8 @@ _mm_cmpeq_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns A 128-bit vector. The lower 64 bits contains the comparison
/// results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmplt_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmplt_sd(__m128d __a,
+ __m128d __b) {
return (__m128d)__builtin_ia32_cmpltsd((__v2df)__a, (__v2df)__b);
}
@@ -734,9 +706,8 @@ _mm_cmplt_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns A 128-bit vector. The lower 64 bits contains the comparison
/// results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmple_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmple_sd(__m128d __a,
+ __m128d __b) {
return (__m128d)__builtin_ia32_cmplesd((__v2df)__a, (__v2df)__b);
}
@@ -759,11 +730,10 @@ _mm_cmple_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns A 128-bit vector. The lower 64 bits contains the comparison
/// results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpgt_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpgt_sd(__m128d __a,
+ __m128d __b) {
__m128d __c = __builtin_ia32_cmpltsd((__v2df)__b, (__v2df)__a);
- return __extension__ (__m128d) { __c[0], __a[1] };
+ return __extension__(__m128d){__c[0], __a[1]};
}
/// Compares the lower double-precision floating-point values in each of
@@ -785,11 +755,10 @@ _mm_cmpgt_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns A 128-bit vector. The lower 64 bits contains the comparison
/// results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpge_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpge_sd(__m128d __a,
+ __m128d __b) {
__m128d __c = __builtin_ia32_cmplesd((__v2df)__b, (__v2df)__a);
- return __extension__ (__m128d) { __c[0], __a[1] };
+ return __extension__(__m128d){__c[0], __a[1]};
}
/// Compares the lower double-precision floating-point values in each of
@@ -813,9 +782,8 @@ _mm_cmpge_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns A 128-bit vector. The lower 64 bits contains the comparison
/// results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpord_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpord_sd(__m128d __a,
+ __m128d __b) {
return (__m128d)__builtin_ia32_cmpordsd((__v2df)__a, (__v2df)__b);
}
@@ -841,9 +809,8 @@ _mm_cmpord_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns A 128-bit vector. The lower 64 bits contains the comparison
/// results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpunord_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpunord_sd(__m128d __a,
+ __m128d __b) {
return (__m128d)__builtin_ia32_cmpunordsd((__v2df)__a, (__v2df)__b);
}
@@ -866,9 +833,8 @@ _mm_cmpunord_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns A 128-bit vector. The lower 64 bits contains the comparison
/// results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpneq_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpneq_sd(__m128d __a,
+ __m128d __b) {
return (__m128d)__builtin_ia32_cmpneqsd((__v2df)__a, (__v2df)__b);
}
@@ -891,9 +857,8 @@ _mm_cmpneq_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns A 128-bit vector. The lower 64 bits contains the comparison
/// results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnlt_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnlt_sd(__m128d __a,
+ __m128d __b) {
return (__m128d)__builtin_ia32_cmpnltsd((__v2df)__a, (__v2df)__b);
}
@@ -916,9 +881,8 @@ _mm_cmpnlt_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns A 128-bit vector. The lower 64 bits contains the comparison
/// results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnle_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnle_sd(__m128d __a,
+ __m128d __b) {
return (__m128d)__builtin_ia32_cmpnlesd((__v2df)__a, (__v2df)__b);
}
@@ -941,11 +905,10 @@ _mm_cmpnle_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns A 128-bit vector. The lower 64 bits contains the comparison
/// results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpngt_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpngt_sd(__m128d __a,
+ __m128d __b) {
__m128d __c = __builtin_ia32_cmpnltsd((__v2df)__b, (__v2df)__a);
- return __extension__ (__m128d) { __c[0], __a[1] };
+ return __extension__(__m128d){__c[0], __a[1]};
}
/// Compares the lower double-precision floating-point values in each of
@@ -967,11 +930,10 @@ _mm_cmpngt_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns A 128-bit vector. The lower 64 bits contains the comparison
/// results. The upper 64 bits are copied from the upper 64 bits of \a __a.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cmpnge_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cmpnge_sd(__m128d __a,
+ __m128d __b) {
__m128d __c = __builtin_ia32_cmpnlesd((__v2df)__b, (__v2df)__a);
- return __extension__ (__m128d) { __c[0], __a[1] };
+ return __extension__(__m128d){__c[0], __a[1]};
}
/// Compares the lower double-precision floating-point values in each of
@@ -992,9 +954,8 @@ _mm_cmpnge_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns An integer containing the comparison results. If either of the two
/// lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comieq_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_comieq_sd(__m128d __a,
+ __m128d __b) {
return __builtin_ia32_comisdeq((__v2df)__a, (__v2df)__b);
}
@@ -1018,9 +979,8 @@ _mm_comieq_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns An integer containing the comparison results. If either of the two
/// lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comilt_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_comilt_sd(__m128d __a,
+ __m128d __b) {
return __builtin_ia32_comisdlt((__v2df)__a, (__v2df)__b);
}
@@ -1044,9 +1004,8 @@ _mm_comilt_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns An integer containing the comparison results. If either of the two
/// lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comile_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_comile_sd(__m128d __a,
+ __m128d __b) {
return __builtin_ia32_comisdle((__v2df)__a, (__v2df)__b);
}
@@ -1070,9 +1029,8 @@ _mm_comile_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns An integer containing the comparison results. If either of the two
/// lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comigt_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_comigt_sd(__m128d __a,
+ __m128d __b) {
return __builtin_ia32_comisdgt((__v2df)__a, (__v2df)__b);
}
@@ -1096,9 +1054,8 @@ _mm_comigt_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns An integer containing the comparison results. If either of the two
/// lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comige_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_comige_sd(__m128d __a,
+ __m128d __b) {
return __builtin_ia32_comisdge((__v2df)__a, (__v2df)__b);
}
@@ -1122,9 +1079,8 @@ _mm_comige_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns An integer containing the comparison results. If either of the two
/// lower double-precision values is NaN, 1 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_comineq_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_comineq_sd(__m128d __a,
+ __m128d __b) {
return __builtin_ia32_comisdneq((__v2df)__a, (__v2df)__b);
}
@@ -1146,9 +1102,8 @@ _mm_comineq_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns An integer containing the comparison results. If either of the two
/// lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomieq_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomieq_sd(__m128d __a,
+ __m128d __b) {
return __builtin_ia32_ucomisdeq((__v2df)__a, (__v2df)__b);
}
@@ -1172,9 +1127,8 @@ _mm_ucomieq_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns An integer containing the comparison results. If either of the two
/// lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomilt_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomilt_sd(__m128d __a,
+ __m128d __b) {
return __builtin_ia32_ucomisdlt((__v2df)__a, (__v2df)__b);
}
@@ -1198,9 +1152,8 @@ _mm_ucomilt_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns An integer containing the comparison results. If either of the two
/// lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomile_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomile_sd(__m128d __a,
+ __m128d __b) {
return __builtin_ia32_ucomisdle((__v2df)__a, (__v2df)__b);
}
@@ -1224,9 +1177,8 @@ _mm_ucomile_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns An integer containing the comparison results. If either of the two
/// lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomigt_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomigt_sd(__m128d __a,
+ __m128d __b) {
return __builtin_ia32_ucomisdgt((__v2df)__a, (__v2df)__b);
}
@@ -1250,9 +1202,8 @@ _mm_ucomigt_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns An integer containing the comparison results. If either of the two
/// lower double-precision values is NaN, 0 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomige_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomige_sd(__m128d __a,
+ __m128d __b) {
return __builtin_ia32_ucomisdge((__v2df)__a, (__v2df)__b);
}
@@ -1276,9 +1227,8 @@ _mm_ucomige_sd(__m128d __a, __m128d __b)
/// compared to the lower double-precision value of \a __a.
/// \returns An integer containing the comparison result. If either of the two
/// lower double-precision values is NaN, 1 is returned.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_ucomineq_sd(__m128d __a, __m128d __b)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_ucomineq_sd(__m128d __a,
+ __m128d __b) {
return __builtin_ia32_ucomisdneq((__v2df)__a, (__v2df)__b);
}
@@ -1295,9 +1245,7 @@ _mm_ucomineq_sd(__m128d __a, __m128d __b)
/// A 128-bit vector of [2 x double].
/// \returns A 128-bit vector of [4 x float] whose lower 64 bits contain the
/// converted values. The upper 64 bits are set to zero.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cvtpd_ps(__m128d __a)
-{
+static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtpd_ps(__m128d __a) {
return __builtin_ia32_cvtpd2ps((__v2df)__a);
}
@@ -1315,9 +1263,7 @@ _mm_cvtpd_ps(__m128d __a)
/// floating-point elements are converted to double-precision values. The
/// upper two elements are unused.
/// \returns A 128-bit vector of [2 x double] containing the converted values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cvtps_pd(__m128 __a)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtps_pd(__m128 __a) {
return (__m128d) __builtin_convertvector(
__builtin_shufflevector((__v4sf)__a, (__v4sf)__a, 0, 1), __v2df);
}
@@ -1338,9 +1284,7 @@ _mm_cvtps_pd(__m128 __a)
///
/// The upper two elements are unused.
/// \returns A 128-bit vector of [2 x double] containing the converted values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cvtepi32_pd(__m128i __a)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtepi32_pd(__m128i __a) {
return (__m128d) __builtin_convertvector(
__builtin_shufflevector((__v4si)__a, (__v4si)__a, 0, 1), __v2df);
}
@@ -1358,9 +1302,7 @@ _mm_cvtepi32_pd(__m128i __a)
/// A 128-bit vector of [2 x double].
/// \returns A 128-bit vector of [4 x i32] whose lower 64 bits contain the
/// converted values. The upper 64 bits are set to zero.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtpd_epi32(__m128d __a)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtpd_epi32(__m128d __a) {
return __builtin_ia32_cvtpd2dq((__v2df)__a);
}
@@ -1375,9 +1317,7 @@ _mm_cvtpd_epi32(__m128d __a)
/// A 128-bit vector of [2 x double]. The lower 64 bits are used in the
/// conversion.
/// \returns A 32-bit signed integer containing the converted value.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_cvtsd_si32(__m128d __a)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_cvtsd_si32(__m128d __a) {
return __builtin_ia32_cvtsd2si((__v2df)__a);
}
@@ -1400,9 +1340,8 @@ _mm_cvtsd_si32(__m128d __a)
/// \returns A 128-bit vector of [4 x float]. The lower 32 bits contain the
/// converted value from the second parameter. The upper 96 bits are copied
/// from the upper 96 bits of the first parameter.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cvtsd_ss(__m128 __a, __m128d __b)
-{
+static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtsd_ss(__m128 __a,
+ __m128d __b) {
return (__m128)__builtin_ia32_cvtsd2ss((__v4sf)__a, (__v2df)__b);
}
@@ -1423,9 +1362,8 @@ _mm_cvtsd_ss(__m128 __a, __m128d __b)
/// \returns A 128-bit vector of [2 x double]. The lower 64 bits contain the
/// converted value from the second parameter. The upper 64 bits are copied
/// from the upper 64 bits of the first parameter.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cvtsi32_sd(__m128d __a, int __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtsi32_sd(__m128d __a,
+ int __b) {
__a[0] = __b;
return __a;
}
@@ -1449,9 +1387,8 @@ _mm_cvtsi32_sd(__m128d __a, int __b)
/// \returns A 128-bit vector of [2 x double]. The lower 64 bits contain the
/// converted value from the second parameter. The upper 64 bits are copied
/// from the upper 64 bits of the first parameter.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cvtss_sd(__m128d __a, __m128 __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtss_sd(__m128d __a,
+ __m128 __b) {
__a[0] = __b[0];
return __a;
}
@@ -1473,9 +1410,7 @@ _mm_cvtss_sd(__m128d __a, __m128 __b)
/// A 128-bit vector of [2 x double].
/// \returns A 128-bit vector of [4 x i32] whose lower 64 bits contain the
/// converted values. The upper 64 bits are set to zero.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvttpd_epi32(__m128d __a)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvttpd_epi32(__m128d __a) {
return (__m128i)__builtin_ia32_cvttpd2dq((__v2df)__a);
}
@@ -1491,9 +1426,7 @@ _mm_cvttpd_epi32(__m128d __a)
/// A 128-bit vector of [2 x double]. The lower 64 bits are used in the
/// conversion.
/// \returns A 32-bit signed integer containing the converted value.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_cvttsd_si32(__m128d __a)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_cvttsd_si32(__m128d __a) {
return __builtin_ia32_cvttsd2si((__v2df)__a);
}
@@ -1508,9 +1441,7 @@ _mm_cvttsd_si32(__m128d __a)
/// \param __a
/// A 128-bit vector of [2 x double].
/// \returns A 64-bit vector of [2 x i32] containing the converted values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_cvtpd_pi32(__m128d __a)
-{
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX _mm_cvtpd_pi32(__m128d __a) {
return (__m64)__builtin_ia32_cvtpd2pi((__v2df)__a);
}
@@ -1528,9 +1459,7 @@ _mm_cvtpd_pi32(__m128d __a)
/// \param __a
/// A 128-bit vector of [2 x double].
/// \returns A 64-bit vector of [2 x i32] containing the converted values.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_cvttpd_pi32(__m128d __a)
-{
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX _mm_cvttpd_pi32(__m128d __a) {
return (__m64)__builtin_ia32_cvttpd2pi((__v2df)__a);
}
@@ -1545,9 +1474,7 @@ _mm_cvttpd_pi32(__m128d __a)
/// \param __a
/// A 64-bit vector of [2 x i32].
/// \returns A 128-bit vector of [2 x double] containing the converted values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS_MMX
-_mm_cvtpi32_pd(__m64 __a)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS_MMX _mm_cvtpi32_pd(__m64 __a) {
return __builtin_ia32_cvtpi2pd((__v2si)__a);
}
@@ -1562,9 +1489,7 @@ _mm_cvtpi32_pd(__m64 __a)
/// A 128-bit vector of [2 x double]. The lower 64 bits are returned.
/// \returns A double-precision floating-point value copied from the lower 64
/// bits of \a __a.
-static __inline__ double __DEFAULT_FN_ATTRS
-_mm_cvtsd_f64(__m128d __a)
-{
+static __inline__ double __DEFAULT_FN_ATTRS _mm_cvtsd_f64(__m128d __a) {
return __a[0];
}
@@ -1579,10 +1504,8 @@ _mm_cvtsd_f64(__m128d __a)
/// A pointer to a 128-bit memory location. The address of the memory
/// location has to be 16-byte aligned.
/// \returns A 128-bit vector of [2 x double] containing the loaded values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_load_pd(double const *__dp)
-{
- return *(const __m128d*)__dp;
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_load_pd(double const *__dp) {
+ return *(const __m128d *)__dp;
}
/// Loads a double-precision floating-point value from a specified memory
@@ -1597,17 +1520,15 @@ _mm_load_pd(double const *__dp)
/// A pointer to a memory location containing a double-precision value.
/// \returns A 128-bit vector of [2 x double] containing the loaded and
/// duplicated values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_load1_pd(double const *__dp)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_load1_pd(double const *__dp) {
struct __mm_load1_pd_struct {
double __u;
} __attribute__((__packed__, __may_alias__));
- double __u = ((const struct __mm_load1_pd_struct*)__dp)->__u;
- return __extension__ (__m128d){ __u, __u };
+ double __u = ((const struct __mm_load1_pd_struct *)__dp)->__u;
+ return __extension__(__m128d){__u, __u};
}
-#define _mm_load_pd1(dp) _mm_load1_pd(dp)
+#define _mm_load_pd1(dp) _mm_load1_pd(dp)
/// Loads two double-precision values, in reverse order, from an aligned
/// memory location into a 128-bit vector of [2 x double].
@@ -1623,10 +1544,8 @@ _mm_load1_pd(double const *__dp)
/// loaded in reverse order.
/// \returns A 128-bit vector of [2 x double] containing the reversed loaded
/// values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_loadr_pd(double const *__dp)
-{
- __m128d __u = *(const __m128d*)__dp;
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadr_pd(double const *__dp) {
+ __m128d __u = *(const __m128d *)__dp;
return __builtin_shufflevector((__v2df)__u, (__v2df)__u, 1, 0);
}
@@ -1641,13 +1560,11 @@ _mm_loadr_pd(double const *__dp)
/// A pointer to a 128-bit memory location. The address of the memory
/// location does not have to be aligned.
/// \returns A 128-bit vector of [2 x double] containing the loaded values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_loadu_pd(double const *__dp)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadu_pd(double const *__dp) {
struct __loadu_pd {
__m128d_u __v;
} __attribute__((__packed__, __may_alias__));
- return ((const struct __loadu_pd*)__dp)->__v;
+ return ((const struct __loadu_pd *)__dp)->__v;
}
/// Loads a 64-bit integer value to the low element of a 128-bit integer
@@ -1661,14 +1578,12 @@ _mm_loadu_pd(double const *__dp)
/// A pointer to a 64-bit memory location. The address of the memory
/// location does not have to be aligned.
/// \returns A 128-bit vector of [2 x i64] containing the loaded value.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_loadu_si64(void const *__a)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadu_si64(void const *__a) {
struct __loadu_si64 {
long long __v;
} __attribute__((__packed__, __may_alias__));
- long long __u = ((const struct __loadu_si64*)__a)->__v;
- return __extension__ (__m128i)(__v2di){__u, 0LL};
+ long long __u = ((const struct __loadu_si64 *)__a)->__v;
+ return __extension__(__m128i)(__v2di){__u, 0LL};
}
/// Loads a 32-bit integer value to the low element of a 128-bit integer
@@ -1682,14 +1597,12 @@ _mm_loadu_si64(void const *__a)
/// A pointer to a 32-bit memory location. The address of the memory
/// location does not have to be aligned.
/// \returns A 128-bit vector of [4 x i32] containing the loaded value.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_loadu_si32(void const *__a)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadu_si32(void const *__a) {
struct __loadu_si32 {
int __v;
} __attribute__((__packed__, __may_alias__));
- int __u = ((const struct __loadu_si32*)__a)->__v;
- return __extension__ (__m128i)(__v4si){__u, 0, 0, 0};
+ int __u = ((const struct __loadu_si32 *)__a)->__v;
+ return __extension__(__m128i)(__v4si){__u, 0, 0, 0};
}
/// Loads a 16-bit integer value to the low element of a 128-bit integer
@@ -1703,14 +1616,12 @@ _mm_loadu_si32(void const *__a)
/// A pointer to a 16-bit memory location. The address of the memory
/// location does not have to be aligned.
/// \returns A 128-bit vector of [8 x i16] containing the loaded value.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_loadu_si16(void const *__a)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_loadu_si16(void const *__a) {
struct __loadu_si16 {
short __v;
} __attribute__((__packed__, __may_alias__));
- short __u = ((const struct __loadu_si16*)__a)->__v;
- return __extension__ (__m128i)(__v8hi){__u, 0, 0, 0, 0, 0, 0, 0};
+ short __u = ((const struct __loadu_si16 *)__a)->__v;
+ return __extension__(__m128i)(__v8hi){__u, 0, 0, 0, 0, 0, 0, 0};
}
/// Loads a 64-bit double-precision value to the low element of a
@@ -1724,14 +1635,12 @@ _mm_loadu_si16(void const *__a)
/// A pointer to a memory location containing a double-precision value.
/// The address of the memory location does not have to be aligned.
/// \returns A 128-bit vector of [2 x double] containing the loaded value.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_load_sd(double const *__dp)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_load_sd(double const *__dp) {
struct __mm_load_sd_struct {
double __u;
} __attribute__((__packed__, __may_alias__));
- double __u = ((const struct __mm_load_sd_struct*)__dp)->__u;
- return __extension__ (__m128d){ __u, 0 };
+ double __u = ((const struct __mm_load_sd_struct *)__dp)->__u;
+ return __extension__(__m128d){__u, 0};
}
/// Loads a double-precision value into the high-order bits of a 128-bit
@@ -1751,14 +1660,13 @@ _mm_load_sd(double const *__dp)
/// [127:64] of the result. The address of the memory location does not have
/// to be aligned.
/// \returns A 128-bit vector of [2 x double] containing the moved values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_loadh_pd(__m128d __a, double const *__dp)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadh_pd(__m128d __a,
+ double const *__dp) {
struct __mm_loadh_pd_struct {
double __u;
} __attribute__((__packed__, __may_alias__));
- double __u = ((const struct __mm_loadh_pd_struct*)__dp)->__u;
- return __extension__ (__m128d){ __a[0], __u };
+ double __u = ((const struct __mm_loadh_pd_struct *)__dp)->__u;
+ return __extension__(__m128d){__a[0], __u};
}
/// Loads a double-precision value into the low-order bits of a 128-bit
@@ -1778,14 +1686,13 @@ _mm_loadh_pd(__m128d __a, double const *__dp)
/// [63:0] of the result. The address of the memory location does not have to
/// be aligned.
/// \returns A 128-bit vector of [2 x double] containing the moved values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_loadl_pd(__m128d __a, double const *__dp)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_loadl_pd(__m128d __a,
+ double const *__dp) {
struct __mm_loadl_pd_struct {
double __u;
} __attribute__((__packed__, __may_alias__));
- double __u = ((const struct __mm_loadl_pd_struct*)__dp)->__u;
- return __extension__ (__m128d){ __u, __a[1] };
+ double __u = ((const struct __mm_loadl_pd_struct *)__dp)->__u;
+ return __extension__(__m128d){__u, __a[1]};
}
/// Constructs a 128-bit floating-point vector of [2 x double] with
@@ -1799,9 +1706,7 @@ _mm_loadl_pd(__m128d __a, double const *__dp)
///
/// \returns A 128-bit floating-point vector of [2 x double] with unspecified
/// content.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_undefined_pd(void)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_undefined_pd(void) {
return (__m128d)__builtin_ia32_undef128();
}
@@ -1819,10 +1724,8 @@ _mm_undefined_pd(void)
/// \returns An initialized 128-bit floating-point vector of [2 x double]. The
/// lower 64 bits contain the value of the parameter. The upper 64 bits are
/// set to zero.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_set_sd(double __w)
-{
- return __extension__ (__m128d){ __w, 0 };
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_set_sd(double __w) {
+ return __extension__(__m128d){__w, 0};
}
/// Constructs a 128-bit floating-point vector of [2 x double], with each
@@ -1837,10 +1740,8 @@ _mm_set_sd(double __w)
/// A double-precision floating-point value used to initialize each vector
/// element of the result.
/// \returns An initialized 128-bit floating-point vector of [2 x double].
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_set1_pd(double __w)
-{
- return __extension__ (__m128d){ __w, __w };
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_set1_pd(double __w) {
+ return __extension__(__m128d){__w, __w};
}
/// Constructs a 128-bit floating-point vector of [2 x double], with each
@@ -1855,9 +1756,7 @@ _mm_set1_pd(double __w)
/// A double-precision floating-point value used to initialize each vector
/// element of the result.
/// \returns An initialized 128-bit floating-point vector of [2 x double].
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_set_pd1(double __w)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_set_pd1(double __w) {
return _mm_set1_pd(__w);
}
@@ -1875,10 +1774,9 @@ _mm_set_pd1(double __w)
/// A double-precision floating-point value used to initialize the lower 64
/// bits of the result.
/// \returns An initialized 128-bit floating-point vector of [2 x double].
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_set_pd(double __w, double __x)
-{
- return __extension__ (__m128d){ __x, __w };
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_set_pd(double __w,
+ double __x) {
+ return __extension__(__m128d){__x, __w};
}
/// Constructs a 128-bit floating-point vector of [2 x double],
@@ -1896,10 +1794,9 @@ _mm_set_pd(double __w, double __x)
/// A double-precision floating-point value used to initialize the upper 64
/// bits of the result.
/// \returns An initialized 128-bit floating-point vector of [2 x double].
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_setr_pd(double __w, double __x)
-{
- return __extension__ (__m128d){ __w, __x };
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_setr_pd(double __w,
+ double __x) {
+ return __extension__(__m128d){__w, __x};
}
/// Constructs a 128-bit floating-point vector of [2 x double]
@@ -1911,10 +1808,8 @@ _mm_setr_pd(double __w, double __x)
///
/// \returns An initialized 128-bit floating-point vector of [2 x double] with
/// all elements set to zero.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_setzero_pd(void)
-{
- return __extension__ (__m128d){ 0, 0 };
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_setzero_pd(void) {
+ return __extension__(__m128d){0, 0};
}
/// Constructs a 128-bit floating-point vector of [2 x double]. The lower
@@ -1932,9 +1827,8 @@ _mm_setzero_pd(void)
/// A 128-bit vector of [2 x double]. The lower 64 bits are written to the
/// lower 64 bits of the result.
/// \returns A 128-bit vector of [2 x double] containing the moved values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_move_sd(__m128d __a, __m128d __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_move_sd(__m128d __a,
+ __m128d __b) {
__a[0] = __b[0];
return __a;
}
@@ -1950,13 +1844,12 @@ _mm_move_sd(__m128d __a, __m128d __b)
/// A pointer to a 64-bit memory location.
/// \param __a
/// A 128-bit vector of [2 x double] containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store_sd(double *__dp, __m128d __a)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_store_sd(double *__dp,
+ __m128d __a) {
struct __mm_store_sd_struct {
double __u;
} __attribute__((__packed__, __may_alias__));
- ((struct __mm_store_sd_struct*)__dp)->__u = __a[0];
+ ((struct __mm_store_sd_struct *)__dp)->__u = __a[0];
}
/// Moves packed double-precision values from a 128-bit vector of
@@ -1972,10 +1865,9 @@ _mm_store_sd(double *__dp, __m128d __a)
/// \param __a
/// A packed 128-bit vector of [2 x double] containing the values to be
/// moved.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store_pd(double *__dp, __m128d __a)
-{
- *(__m128d*)__dp = __a;
+static __inline__ void __DEFAULT_FN_ATTRS _mm_store_pd(double *__dp,
+ __m128d __a) {
+ *(__m128d *)__dp = __a;
}
/// Moves the lower 64 bits of a 128-bit vector of [2 x double] twice to
@@ -1992,9 +1884,8 @@ _mm_store_pd(double *__dp, __m128d __a)
/// \param __a
/// A 128-bit vector of [2 x double] whose lower 64 bits are copied to each
/// of the values in \a __dp.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store1_pd(double *__dp, __m128d __a)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_store1_pd(double *__dp,
+ __m128d __a) {
__a = __builtin_shufflevector((__v2df)__a, (__v2df)__a, 0, 0);
_mm_store_pd(__dp, __a);
}
@@ -2013,9 +1904,8 @@ _mm_store1_pd(double *__dp, __m128d __a)
/// \param __a
/// A 128-bit vector of [2 x double] whose lower 64 bits are copied to each
/// of the values in \a __dp.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store_pd1(double *__dp, __m128d __a)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_store_pd1(double *__dp,
+ __m128d __a) {
_mm_store1_pd(__dp, __a);
}
@@ -2031,13 +1921,12 @@ _mm_store_pd1(double *__dp, __m128d __a)
/// location does not have to be aligned.
/// \param __a
/// A 128-bit vector of [2 x double] containing the values to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_pd(double *__dp, __m128d __a)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_pd(double *__dp,
+ __m128d __a) {
struct __storeu_pd {
__m128d_u __v;
} __attribute__((__packed__, __may_alias__));
- ((struct __storeu_pd*)__dp)->__v = __a;
+ ((struct __storeu_pd *)__dp)->__v = __a;
}
/// Stores two double-precision values, in reverse order, from a 128-bit
@@ -2054,9 +1943,8 @@ _mm_storeu_pd(double *__dp, __m128d __a)
/// \param __a
/// A 128-bit vector of [2 x double] containing the values to be reversed and
/// stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storer_pd(double *__dp, __m128d __a)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_storer_pd(double *__dp,
+ __m128d __a) {
__a = __builtin_shufflevector((__v2df)__a, (__v2df)__a, 1, 0);
*(__m128d *)__dp = __a;
}
@@ -2072,13 +1960,12 @@ _mm_storer_pd(double *__dp, __m128d __a)
/// A pointer to a 64-bit memory location.
/// \param __a
/// A 128-bit vector of [2 x double] containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeh_pd(double *__dp, __m128d __a)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_storeh_pd(double *__dp,
+ __m128d __a) {
struct __mm_storeh_pd_struct {
double __u;
} __attribute__((__packed__, __may_alias__));
- ((struct __mm_storeh_pd_struct*)__dp)->__u = __a[1];
+ ((struct __mm_storeh_pd_struct *)__dp)->__u = __a[1];
}
/// Stores the lower 64 bits of a 128-bit vector of [2 x double] to a
@@ -2092,13 +1979,12 @@ _mm_storeh_pd(double *__dp, __m128d __a)
/// A pointer to a 64-bit memory location.
/// \param __a
/// A 128-bit vector of [2 x double] containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storel_pd(double *__dp, __m128d __a)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_storel_pd(double *__dp,
+ __m128d __a) {
struct __mm_storeh_pd_struct {
double __u;
} __attribute__((__packed__, __may_alias__));
- ((struct __mm_storeh_pd_struct*)__dp)->__u = __a[0];
+ ((struct __mm_storeh_pd_struct *)__dp)->__u = __a[0];
}
/// Adds the corresponding elements of two 128-bit vectors of [16 x i8],
@@ -2117,9 +2003,8 @@ _mm_storel_pd(double *__dp, __m128d __a)
/// A 128-bit vector of [16 x i8].
/// \returns A 128-bit vector of [16 x i8] containing the sums of both
/// parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_add_epi8(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi8(__m128i __a,
+ __m128i __b) {
return (__m128i)((__v16qu)__a + (__v16qu)__b);
}
@@ -2139,9 +2024,8 @@ _mm_add_epi8(__m128i __a, __m128i __b)
/// A 128-bit vector of [8 x i16].
/// \returns A 128-bit vector of [8 x i16] containing the sums of both
/// parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_add_epi16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi16(__m128i __a,
+ __m128i __b) {
return (__m128i)((__v8hu)__a + (__v8hu)__b);
}
@@ -2161,9 +2045,8 @@ _mm_add_epi16(__m128i __a, __m128i __b)
/// A 128-bit vector of [4 x i32].
/// \returns A 128-bit vector of [4 x i32] containing the sums of both
/// parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_add_epi32(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi32(__m128i __a,
+ __m128i __b) {
return (__m128i)((__v4su)__a + (__v4su)__b);
}
@@ -2179,9 +2062,8 @@ _mm_add_epi32(__m128i __a, __m128i __b)
/// \param __b
/// A 64-bit integer.
/// \returns A 64-bit integer containing the sum of both parameters.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_add_si64(__m64 __a, __m64 __b)
-{
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX _mm_add_si64(__m64 __a,
+ __m64 __b) {
return (__m64)__builtin_ia32_paddq((__v1di)__a, (__v1di)__b);
}
@@ -2201,9 +2083,8 @@ _mm_add_si64(__m64 __a, __m64 __b)
/// A 128-bit vector of [2 x i64].
/// \returns A 128-bit vector of [2 x i64] containing the sums of both
/// parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_add_epi64(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_add_epi64(__m128i __a,
+ __m128i __b) {
return (__m128i)((__v2du)__a + (__v2du)__b);
}
@@ -2222,10 +2103,9 @@ _mm_add_epi64(__m128i __a, __m128i __b)
/// A 128-bit signed [16 x i8] vector.
/// \returns A 128-bit signed [16 x i8] vector containing the saturated sums of
/// both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_adds_epi8(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_ia32_paddsb128((__v16qi)__a, (__v16qi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epi8(__m128i __a,
+ __m128i __b) {
+ return (__m128i)__builtin_elementwise_add_sat((__v16qs)__a, (__v16qs)__b);
}
/// Adds, with saturation, the corresponding elements of two 128-bit
@@ -2244,10 +2124,9 @@ _mm_adds_epi8(__m128i __a, __m128i __b)
/// A 128-bit signed [8 x i16] vector.
/// \returns A 128-bit signed [8 x i16] vector containing the saturated sums of
/// both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_adds_epi16(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_ia32_paddsw128((__v8hi)__a, (__v8hi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epi16(__m128i __a,
+ __m128i __b) {
+ return (__m128i)__builtin_elementwise_add_sat((__v8hi)__a, (__v8hi)__b);
}
/// Adds, with saturation, the corresponding elements of two 128-bit
@@ -2265,10 +2144,9 @@ _mm_adds_epi16(__m128i __a, __m128i __b)
/// A 128-bit unsigned [16 x i8] vector.
/// \returns A 128-bit unsigned [16 x i8] vector containing the saturated sums
/// of both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_adds_epu8(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_ia32_paddusb128((__v16qi)__a, (__v16qi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epu8(__m128i __a,
+ __m128i __b) {
+ return (__m128i)__builtin_elementwise_add_sat((__v16qu)__a, (__v16qu)__b);
}
/// Adds, with saturation, the corresponding elements of two 128-bit
@@ -2286,10 +2164,9 @@ _mm_adds_epu8(__m128i __a, __m128i __b)
/// A 128-bit unsigned [8 x i16] vector.
/// \returns A 128-bit unsigned [8 x i16] vector containing the saturated sums
/// of both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_adds_epu16(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_ia32_paddusw128((__v8hi)__a, (__v8hi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_adds_epu16(__m128i __a,
+ __m128i __b) {
+ return (__m128i)__builtin_elementwise_add_sat((__v8hu)__a, (__v8hu)__b);
}
/// Computes the rounded averages of corresponding elements of two
@@ -2306,9 +2183,8 @@ _mm_adds_epu16(__m128i __a, __m128i __b)
/// A 128-bit unsigned [16 x i8] vector.
/// \returns A 128-bit unsigned [16 x i8] vector containing the rounded
/// averages of both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_avg_epu8(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_avg_epu8(__m128i __a,
+ __m128i __b) {
return (__m128i)__builtin_ia32_pavgb128((__v16qi)__a, (__v16qi)__b);
}
@@ -2326,9 +2202,8 @@ _mm_avg_epu8(__m128i __a, __m128i __b)
/// A 128-bit unsigned [8 x i16] vector.
/// \returns A 128-bit unsigned [8 x i16] vector containing the rounded
/// averages of both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_avg_epu16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_avg_epu16(__m128i __a,
+ __m128i __b) {
return (__m128i)__builtin_ia32_pavgw128((__v8hi)__a, (__v8hi)__b);
}
@@ -2352,9 +2227,8 @@ _mm_avg_epu16(__m128i __a, __m128i __b)
/// A 128-bit signed [8 x i16] vector.
/// \returns A 128-bit signed [4 x i32] vector containing the sums of products
/// of both parameters.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_madd_epi16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_madd_epi16(__m128i __a,
+ __m128i __b) {
return (__m128i)__builtin_ia32_pmaddwd128((__v8hi)__a, (__v8hi)__b);
}
@@ -2372,9 +2246,8 @@ _mm_madd_epi16(__m128i __a, __m128i __b)
/// A 128-bit signed [8 x i16] vector.
/// \returns A 128-bit signed [8 x i16] vector containing the greater value of
/// each comparison.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_max_epi16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epi16(__m128i __a,
+ __m128i __b) {
return (__m128i)__builtin_elementwise_max((__v8hi)__a, (__v8hi)__b);
}
@@ -2392,9 +2265,8 @@ _mm_max_epi16(__m128i __a, __m128i __b)
/// A 128-bit unsigned [16 x i8] vector.
/// \returns A 128-bit unsigned [16 x i8] vector containing the greater value of
/// each comparison.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_max_epu8(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epu8(__m128i __a,
+ __m128i __b) {
return (__m128i)__builtin_elementwise_max((__v16qu)__a, (__v16qu)__b);
}
@@ -2412,9 +2284,8 @@ _mm_max_epu8(__m128i __a, __m128i __b)
/// A 128-bit signed [8 x i16] vector.
/// \returns A 128-bit signed [8 x i16] vector containing the smaller value of
/// each comparison.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_min_epi16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epi16(__m128i __a,
+ __m128i __b) {
return (__m128i)__builtin_elementwise_min((__v8hi)__a, (__v8hi)__b);
}
@@ -2432,9 +2303,8 @@ _mm_min_epi16(__m128i __a, __m128i __b)
/// A 128-bit unsigned [16 x i8] vector.
/// \returns A 128-bit unsigned [16 x i8] vector containing the smaller value of
/// each comparison.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_min_epu8(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epu8(__m128i __a,
+ __m128i __b) {
return (__m128i)__builtin_elementwise_min((__v16qu)__a, (__v16qu)__b);
}
@@ -2452,9 +2322,8 @@ _mm_min_epu8(__m128i __a, __m128i __b)
/// A 128-bit signed [8 x i16] vector.
/// \returns A 128-bit signed [8 x i16] vector containing the upper 16 bits of
/// each of the eight 32-bit products.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mulhi_epi16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mulhi_epi16(__m128i __a,
+ __m128i __b) {
return (__m128i)__builtin_ia32_pmulhw128((__v8hi)__a, (__v8hi)__b);
}
@@ -2472,9 +2341,8 @@ _mm_mulhi_epi16(__m128i __a, __m128i __b)
/// A 128-bit unsigned [8 x i16] vector.
/// \returns A 128-bit unsigned [8 x i16] vector containing the upper 16 bits
/// of each of the eight 32-bit products.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mulhi_epu16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mulhi_epu16(__m128i __a,
+ __m128i __b) {
return (__m128i)__builtin_ia32_pmulhuw128((__v8hi)__a, (__v8hi)__b);
}
@@ -2492,9 +2360,8 @@ _mm_mulhi_epu16(__m128i __a, __m128i __b)
/// A 128-bit signed [8 x i16] vector.
/// \returns A 128-bit signed [8 x i16] vector containing the lower 16 bits of
/// each of the eight 32-bit products.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mullo_epi16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mullo_epi16(__m128i __a,
+ __m128i __b) {
return (__m128i)((__v8hu)__a * (__v8hu)__b);
}
@@ -2511,9 +2378,8 @@ _mm_mullo_epi16(__m128i __a, __m128i __b)
/// \param __b
/// A 64-bit integer containing one of the source operands.
/// \returns A 64-bit integer vector containing the product of both operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_mul_su32(__m64 __a, __m64 __b)
-{
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX _mm_mul_su32(__m64 __a,
+ __m64 __b) {
return __builtin_ia32_pmuludq((__v2si)__a, (__v2si)__b);
}
@@ -2530,9 +2396,8 @@ _mm_mul_su32(__m64 __a, __m64 __b)
/// \param __b
/// A [2 x i64] vector containing one of the source operands.
/// \returns A [2 x i64] vector containing the product of both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mul_epu32(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mul_epu32(__m128i __a,
+ __m128i __b) {
return __builtin_ia32_pmuludq128((__v4si)__a, (__v4si)__b);
}
@@ -2552,9 +2417,8 @@ _mm_mul_epu32(__m128i __a, __m128i __b)
/// A 128-bit integer vector containing one of the source operands.
/// \returns A [2 x i64] vector containing the sums of the sets of absolute
/// differences between both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sad_epu8(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sad_epu8(__m128i __a,
+ __m128i __b) {
return __builtin_ia32_psadbw128((__v16qi)__a, (__v16qi)__b);
}
@@ -2570,9 +2434,8 @@ _mm_sad_epu8(__m128i __a, __m128i __b)
/// A 128-bit integer vector containing the subtrahends.
/// \returns A 128-bit integer vector containing the differences of the values
/// in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sub_epi8(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi8(__m128i __a,
+ __m128i __b) {
return (__m128i)((__v16qu)__a - (__v16qu)__b);
}
@@ -2588,9 +2451,8 @@ _mm_sub_epi8(__m128i __a, __m128i __b)
/// A 128-bit integer vector containing the subtrahends.
/// \returns A 128-bit integer vector containing the differences of the values
/// in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sub_epi16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi16(__m128i __a,
+ __m128i __b) {
return (__m128i)((__v8hu)__a - (__v8hu)__b);
}
@@ -2606,9 +2468,8 @@ _mm_sub_epi16(__m128i __a, __m128i __b)
/// A 128-bit integer vector containing the subtrahends.
/// \returns A 128-bit integer vector containing the differences of the values
/// in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sub_epi32(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi32(__m128i __a,
+ __m128i __b) {
return (__m128i)((__v4su)__a - (__v4su)__b);
}
@@ -2625,9 +2486,8 @@ _mm_sub_epi32(__m128i __a, __m128i __b)
/// A 64-bit integer vector containing the subtrahend.
/// \returns A 64-bit integer vector containing the difference of the values in
/// the operands.
-static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX
-_mm_sub_si64(__m64 __a, __m64 __b)
-{
+static __inline__ __m64 __DEFAULT_FN_ATTRS_MMX _mm_sub_si64(__m64 __a,
+ __m64 __b) {
return (__m64)__builtin_ia32_psubq((__v1di)__a, (__v1di)__b);
}
@@ -2643,9 +2503,8 @@ _mm_sub_si64(__m64 __a, __m64 __b)
/// A 128-bit integer vector containing the subtrahends.
/// \returns A 128-bit integer vector containing the differences of the values
/// in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sub_epi64(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sub_epi64(__m128i __a,
+ __m128i __b) {
return (__m128i)((__v2du)__a - (__v2du)__b);
}
@@ -2664,10 +2523,9 @@ _mm_sub_epi64(__m128i __a, __m128i __b)
/// A 128-bit integer vector containing the subtrahends.
/// \returns A 128-bit integer vector containing the differences of the values
/// in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_subs_epi8(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_ia32_psubsb128((__v16qi)__a, (__v16qi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epi8(__m128i __a,
+ __m128i __b) {
+ return (__m128i)__builtin_elementwise_sub_sat((__v16qs)__a, (__v16qs)__b);
}
/// Subtracts corresponding 16-bit signed integer values in the input and
@@ -2685,10 +2543,9 @@ _mm_subs_epi8(__m128i __a, __m128i __b)
/// A 128-bit integer vector containing the subtrahends.
/// \returns A 128-bit integer vector containing the differences of the values
/// in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_subs_epi16(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_ia32_psubsw128((__v8hi)__a, (__v8hi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epi16(__m128i __a,
+ __m128i __b) {
+ return (__m128i)__builtin_elementwise_sub_sat((__v8hi)__a, (__v8hi)__b);
}
/// Subtracts corresponding 8-bit unsigned integer values in the input
@@ -2705,10 +2562,9 @@ _mm_subs_epi16(__m128i __a, __m128i __b)
/// A 128-bit integer vector containing the subtrahends.
/// \returns A 128-bit integer vector containing the unsigned integer
/// differences of the values in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_subs_epu8(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_ia32_psubusb128((__v16qi)__a, (__v16qi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epu8(__m128i __a,
+ __m128i __b) {
+ return (__m128i)__builtin_elementwise_sub_sat((__v16qu)__a, (__v16qu)__b);
}
/// Subtracts corresponding 16-bit unsigned integer values in the input
@@ -2725,10 +2581,9 @@ _mm_subs_epu8(__m128i __a, __m128i __b)
/// A 128-bit integer vector containing the subtrahends.
/// \returns A 128-bit integer vector containing the unsigned integer
/// differences of the values in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_subs_epu16(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_ia32_psubusw128((__v8hi)__a, (__v8hi)__b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_subs_epu16(__m128i __a,
+ __m128i __b) {
+ return (__m128i)__builtin_elementwise_sub_sat((__v8hu)__a, (__v8hu)__b);
}
/// Performs a bitwise AND of two 128-bit integer vectors.
@@ -2743,9 +2598,8 @@ _mm_subs_epu16(__m128i __a, __m128i __b)
/// A 128-bit integer vector containing one of the source operands.
/// \returns A 128-bit integer vector containing the bitwise AND of the values
/// in both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_and_si128(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_and_si128(__m128i __a,
+ __m128i __b) {
return (__m128i)((__v2du)__a & (__v2du)__b);
}
@@ -2763,9 +2617,8 @@ _mm_and_si128(__m128i __a, __m128i __b)
/// A 128-bit vector containing the right source operand.
/// \returns A 128-bit integer vector containing the bitwise AND of the one's
/// complement of the first operand and the values in the second operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_andnot_si128(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_andnot_si128(__m128i __a,
+ __m128i __b) {
return (__m128i)(~(__v2du)__a & (__v2du)__b);
}
/// Performs a bitwise OR of two 128-bit integer vectors.
@@ -2780,9 +2633,8 @@ _mm_andnot_si128(__m128i __a, __m128i __b)
/// A 128-bit integer vector containing one of the source operands.
/// \returns A 128-bit integer vector containing the bitwise OR of the values
/// in both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_or_si128(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_or_si128(__m128i __a,
+ __m128i __b) {
return (__m128i)((__v2du)__a | (__v2du)__b);
}
@@ -2798,9 +2650,8 @@ _mm_or_si128(__m128i __a, __m128i __b)
/// A 128-bit integer vector containing one of the source operands.
/// \returns A 128-bit integer vector containing the bitwise exclusive OR of the
/// values in both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_xor_si128(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_xor_si128(__m128i __a,
+ __m128i __b) {
return (__m128i)((__v2du)__a ^ (__v2du)__b);
}
@@ -2821,11 +2672,13 @@ _mm_xor_si128(__m128i __a, __m128i __b)
/// An immediate value specifying the number of bytes to left-shift operand
/// \a a.
/// \returns A 128-bit integer vector containing the left-shifted value.
-#define _mm_slli_si128(a, imm) \
- ((__m128i)__builtin_ia32_pslldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm)))
+#define _mm_slli_si128(a, imm) \
+ ((__m128i)__builtin_ia32_pslldqi128_byteshift((__v2di)(__m128i)(a), \
+ (int)(imm)))
-#define _mm_bslli_si128(a, imm) \
- ((__m128i)__builtin_ia32_pslldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm)))
+#define _mm_bslli_si128(a, imm) \
+ ((__m128i)__builtin_ia32_pslldqi128_byteshift((__v2di)(__m128i)(a), \
+ (int)(imm)))
/// Left-shifts each 16-bit value in the 128-bit integer vector operand
/// by the specified number of bits. Low-order bits are cleared.
@@ -2840,9 +2693,8 @@ _mm_xor_si128(__m128i __a, __m128i __b)
/// An integer value specifying the number of bits to left-shift each value
/// in operand \a __a.
/// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_slli_epi16(__m128i __a, int __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_slli_epi16(__m128i __a,
+ int __count) {
return (__m128i)__builtin_ia32_psllwi128((__v8hi)__a, __count);
}
@@ -2859,9 +2711,8 @@ _mm_slli_epi16(__m128i __a, int __count)
/// A 128-bit integer vector in which bits [63:0] specify the number of bits
/// to left-shift each value in operand \a __a.
/// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sll_epi16(__m128i __a, __m128i __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi16(__m128i __a,
+ __m128i __count) {
return (__m128i)__builtin_ia32_psllw128((__v8hi)__a, (__v8hi)__count);
}
@@ -2878,9 +2729,8 @@ _mm_sll_epi16(__m128i __a, __m128i __count)
/// An integer value specifying the number of bits to left-shift each value
/// in operand \a __a.
/// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_slli_epi32(__m128i __a, int __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_slli_epi32(__m128i __a,
+ int __count) {
return (__m128i)__builtin_ia32_pslldi128((__v4si)__a, __count);
}
@@ -2897,9 +2747,8 @@ _mm_slli_epi32(__m128i __a, int __count)
/// A 128-bit integer vector in which bits [63:0] specify the number of bits
/// to left-shift each value in operand \a __a.
/// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sll_epi32(__m128i __a, __m128i __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi32(__m128i __a,
+ __m128i __count) {
return (__m128i)__builtin_ia32_pslld128((__v4si)__a, (__v4si)__count);
}
@@ -2916,9 +2765,8 @@ _mm_sll_epi32(__m128i __a, __m128i __count)
/// An integer value specifying the number of bits to left-shift each value
/// in operand \a __a.
/// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_slli_epi64(__m128i __a, int __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_slli_epi64(__m128i __a,
+ int __count) {
return __builtin_ia32_psllqi128((__v2di)__a, __count);
}
@@ -2935,9 +2783,8 @@ _mm_slli_epi64(__m128i __a, int __count)
/// A 128-bit integer vector in which bits [63:0] specify the number of bits
/// to left-shift each value in operand \a __a.
/// \returns A 128-bit integer vector containing the left-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sll_epi64(__m128i __a, __m128i __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sll_epi64(__m128i __a,
+ __m128i __count) {
return __builtin_ia32_psllq128((__v2di)__a, (__v2di)__count);
}
@@ -2955,9 +2802,8 @@ _mm_sll_epi64(__m128i __a, __m128i __count)
/// An integer value specifying the number of bits to right-shift each value
/// in operand \a __a.
/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srai_epi16(__m128i __a, int __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srai_epi16(__m128i __a,
+ int __count) {
return (__m128i)__builtin_ia32_psrawi128((__v8hi)__a, __count);
}
@@ -2975,9 +2821,8 @@ _mm_srai_epi16(__m128i __a, int __count)
/// A 128-bit integer vector in which bits [63:0] specify the number of bits
/// to right-shift each value in operand \a __a.
/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sra_epi16(__m128i __a, __m128i __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sra_epi16(__m128i __a,
+ __m128i __count) {
return (__m128i)__builtin_ia32_psraw128((__v8hi)__a, (__v8hi)__count);
}
@@ -2995,9 +2840,8 @@ _mm_sra_epi16(__m128i __a, __m128i __count)
/// An integer value specifying the number of bits to right-shift each value
/// in operand \a __a.
/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srai_epi32(__m128i __a, int __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srai_epi32(__m128i __a,
+ int __count) {
return (__m128i)__builtin_ia32_psradi128((__v4si)__a, __count);
}
@@ -3015,9 +2859,8 @@ _mm_srai_epi32(__m128i __a, int __count)
/// A 128-bit integer vector in which bits [63:0] specify the number of bits
/// to right-shift each value in operand \a __a.
/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_sra_epi32(__m128i __a, __m128i __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_sra_epi32(__m128i __a,
+ __m128i __count) {
return (__m128i)__builtin_ia32_psrad128((__v4si)__a, (__v4si)__count);
}
@@ -3038,11 +2881,13 @@ _mm_sra_epi32(__m128i __a, __m128i __count)
/// An immediate value specifying the number of bytes to right-shift operand
/// \a a.
/// \returns A 128-bit integer vector containing the right-shifted value.
-#define _mm_srli_si128(a, imm) \
- ((__m128i)__builtin_ia32_psrldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm)))
+#define _mm_srli_si128(a, imm) \
+ ((__m128i)__builtin_ia32_psrldqi128_byteshift((__v2di)(__m128i)(a), \
+ (int)(imm)))
-#define _mm_bsrli_si128(a, imm) \
- ((__m128i)__builtin_ia32_psrldqi128_byteshift((__v2di)(__m128i)(a), (int)(imm)))
+#define _mm_bsrli_si128(a, imm) \
+ ((__m128i)__builtin_ia32_psrldqi128_byteshift((__v2di)(__m128i)(a), \
+ (int)(imm)))
/// Right-shifts each of 16-bit values in the 128-bit integer vector
/// operand by the specified number of bits. High-order bits are cleared.
@@ -3057,9 +2902,8 @@ _mm_sra_epi32(__m128i __a, __m128i __count)
/// An integer value specifying the number of bits to right-shift each value
/// in operand \a __a.
/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srli_epi16(__m128i __a, int __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srli_epi16(__m128i __a,
+ int __count) {
return (__m128i)__builtin_ia32_psrlwi128((__v8hi)__a, __count);
}
@@ -3076,9 +2920,8 @@ _mm_srli_epi16(__m128i __a, int __count)
/// A 128-bit integer vector in which bits [63:0] specify the number of bits
/// to right-shift each value in operand \a __a.
/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srl_epi16(__m128i __a, __m128i __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi16(__m128i __a,
+ __m128i __count) {
return (__m128i)__builtin_ia32_psrlw128((__v8hi)__a, (__v8hi)__count);
}
@@ -3095,9 +2938,8 @@ _mm_srl_epi16(__m128i __a, __m128i __count)
/// An integer value specifying the number of bits to right-shift each value
/// in operand \a __a.
/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srli_epi32(__m128i __a, int __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srli_epi32(__m128i __a,
+ int __count) {
return (__m128i)__builtin_ia32_psrldi128((__v4si)__a, __count);
}
@@ -3114,9 +2956,8 @@ _mm_srli_epi32(__m128i __a, int __count)
/// A 128-bit integer vector in which bits [63:0] specify the number of bits
/// to right-shift each value in operand \a __a.
/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srl_epi32(__m128i __a, __m128i __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi32(__m128i __a,
+ __m128i __count) {
return (__m128i)__builtin_ia32_psrld128((__v4si)__a, (__v4si)__count);
}
@@ -3133,9 +2974,8 @@ _mm_srl_epi32(__m128i __a, __m128i __count)
/// An integer value specifying the number of bits to right-shift each value
/// in operand \a __a.
/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srli_epi64(__m128i __a, int __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srli_epi64(__m128i __a,
+ int __count) {
return __builtin_ia32_psrlqi128((__v2di)__a, __count);
}
@@ -3152,9 +2992,8 @@ _mm_srli_epi64(__m128i __a, int __count)
/// A 128-bit integer vector in which bits [63:0] specify the number of bits
/// to right-shift each value in operand \a __a.
/// \returns A 128-bit integer vector containing the right-shifted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_srl_epi64(__m128i __a, __m128i __count)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_srl_epi64(__m128i __a,
+ __m128i __count) {
return __builtin_ia32_psrlq128((__v2di)__a, (__v2di)__count);
}
@@ -3171,9 +3010,8 @@ _mm_srl_epi64(__m128i __a, __m128i __count)
/// \param __b
/// A 128-bit integer vector.
/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpeq_epi8(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi8(__m128i __a,
+ __m128i __b) {
return (__m128i)((__v16qi)__a == (__v16qi)__b);
}
@@ -3190,9 +3028,8 @@ _mm_cmpeq_epi8(__m128i __a, __m128i __b)
/// \param __b
/// A 128-bit integer vector.
/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpeq_epi16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi16(__m128i __a,
+ __m128i __b) {
return (__m128i)((__v8hi)__a == (__v8hi)__b);
}
@@ -3209,9 +3046,8 @@ _mm_cmpeq_epi16(__m128i __a, __m128i __b)
/// \param __b
/// A 128-bit integer vector.
/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpeq_epi32(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi32(__m128i __a,
+ __m128i __b) {
return (__m128i)((__v4si)__a == (__v4si)__b);
}
@@ -3229,9 +3065,8 @@ _mm_cmpeq_epi32(__m128i __a, __m128i __b)
/// \param __b
/// A 128-bit integer vector.
/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpgt_epi8(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi8(__m128i __a,
+ __m128i __b) {
/* This function always performs a signed comparison, but __v16qi is a char
which may be signed or unsigned, so use __v16qs. */
return (__m128i)((__v16qs)__a > (__v16qs)__b);
@@ -3252,9 +3087,8 @@ _mm_cmpgt_epi8(__m128i __a, __m128i __b)
/// \param __b
/// A 128-bit integer vector.
/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpgt_epi16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi16(__m128i __a,
+ __m128i __b) {
return (__m128i)((__v8hi)__a > (__v8hi)__b);
}
@@ -3273,9 +3107,8 @@ _mm_cmpgt_epi16(__m128i __a, __m128i __b)
/// \param __b
/// A 128-bit integer vector.
/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpgt_epi32(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi32(__m128i __a,
+ __m128i __b) {
return (__m128i)((__v4si)__a > (__v4si)__b);
}
@@ -3294,9 +3127,8 @@ _mm_cmpgt_epi32(__m128i __a, __m128i __b)
/// \param __b
/// A 128-bit integer vector.
/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmplt_epi8(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi8(__m128i __a,
+ __m128i __b) {
return _mm_cmpgt_epi8(__b, __a);
}
@@ -3315,9 +3147,8 @@ _mm_cmplt_epi8(__m128i __a, __m128i __b)
/// \param __b
/// A 128-bit integer vector.
/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmplt_epi16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi16(__m128i __a,
+ __m128i __b) {
return _mm_cmpgt_epi16(__b, __a);
}
@@ -3336,9 +3167,8 @@ _mm_cmplt_epi16(__m128i __a, __m128i __b)
/// \param __b
/// A 128-bit integer vector.
/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmplt_epi32(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmplt_epi32(__m128i __a,
+ __m128i __b) {
return _mm_cmpgt_epi32(__b, __a);
}
@@ -3360,9 +3190,8 @@ _mm_cmplt_epi32(__m128i __a, __m128i __b)
/// \returns A 128-bit vector of [2 x double] whose lower 64 bits contain the
/// converted value of the second operand. The upper 64 bits are copied from
/// the upper 64 bits of the first operand.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_cvtsi64_sd(__m128d __a, long long __b)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_cvtsi64_sd(__m128d __a,
+ long long __b) {
__a[0] = __b;
return __a;
}
@@ -3378,9 +3207,7 @@ _mm_cvtsi64_sd(__m128d __a, long long __b)
/// A 128-bit vector of [2 x double]. The lower 64 bits are used in the
/// conversion.
/// \returns A 64-bit signed integer containing the converted value.
-static __inline__ long long __DEFAULT_FN_ATTRS
-_mm_cvtsd_si64(__m128d __a)
-{
+static __inline__ long long __DEFAULT_FN_ATTRS _mm_cvtsd_si64(__m128d __a) {
return __builtin_ia32_cvtsd2si64((__v2df)__a);
}
@@ -3396,9 +3223,7 @@ _mm_cvtsd_si64(__m128d __a)
/// A 128-bit vector of [2 x double]. The lower 64 bits are used in the
/// conversion.
/// \returns A 64-bit signed integer containing the converted value.
-static __inline__ long long __DEFAULT_FN_ATTRS
-_mm_cvttsd_si64(__m128d __a)
-{
+static __inline__ long long __DEFAULT_FN_ATTRS _mm_cvttsd_si64(__m128d __a) {
return __builtin_ia32_cvttsd2si64((__v2df)__a);
}
#endif
@@ -3412,10 +3237,8 @@ _mm_cvttsd_si64(__m128d __a)
/// \param __a
/// A 128-bit integer vector.
/// \returns A 128-bit vector of [4 x float] containing the converted values.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_cvtepi32_ps(__m128i __a)
-{
- return (__m128)__builtin_convertvector((__v4si)__a, __v4sf);
+static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_cvtepi32_ps(__m128i __a) {
+ return (__m128) __builtin_convertvector((__v4si)__a, __v4sf);
}
/// Converts a vector of [4 x float] into a vector of [4 x i32].
@@ -3428,9 +3251,7 @@ _mm_cvtepi32_ps(__m128i __a)
/// A 128-bit vector of [4 x float].
/// \returns A 128-bit integer vector of [4 x i32] containing the converted
/// values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtps_epi32(__m128 __a)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtps_epi32(__m128 __a) {
return (__m128i)__builtin_ia32_cvtps2dq((__v4sf)__a);
}
@@ -3445,9 +3266,7 @@ _mm_cvtps_epi32(__m128 __a)
/// \param __a
/// A 128-bit vector of [4 x float].
/// \returns A 128-bit vector of [4 x i32] containing the converted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvttps_epi32(__m128 __a)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvttps_epi32(__m128 __a) {
return (__m128i)__builtin_ia32_cvttps2dq((__v4sf)__a);
}
@@ -3461,29 +3280,24 @@ _mm_cvttps_epi32(__m128 __a)
/// \param __a
/// A 32-bit signed integer operand.
/// \returns A 128-bit vector of [4 x i32].
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtsi32_si128(int __a)
-{
- return __extension__ (__m128i)(__v4si){ __a, 0, 0, 0 };
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtsi32_si128(int __a) {
+ return __extension__(__m128i)(__v4si){__a, 0, 0, 0};
}
-#ifdef __x86_64__
/// Returns a vector of [2 x i64] where the lower element is the input
/// operand and the upper element is zero.
///
/// \headerfile <x86intrin.h>
///
-/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction.
+/// This intrinsic corresponds to the <c> VMOVQ / MOVQ </c> instruction
+/// in 64-bit mode.
///
/// \param __a
/// A 64-bit signed integer operand containing the value to be converted.
/// \returns A 128-bit vector of [2 x i64] containing the converted value.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtsi64_si128(long long __a)
-{
- return __extension__ (__m128i)(__v2di){ __a, 0 };
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtsi64_si128(long long __a) {
+ return __extension__(__m128i)(__v2di){__a, 0};
}
-#endif
/// Moves the least significant 32 bits of a vector of [4 x i32] to a
/// 32-bit signed integer value.
@@ -3496,14 +3310,11 @@ _mm_cvtsi64_si128(long long __a)
/// A vector of [4 x i32]. The least significant 32 bits are moved to the
/// destination.
/// \returns A 32-bit signed integer containing the moved value.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_cvtsi128_si32(__m128i __a)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_cvtsi128_si32(__m128i __a) {
__v4si __b = (__v4si)__a;
return __b[0];
}
-#ifdef __x86_64__
/// Moves the least significant 64 bits of a vector of [2 x i64] to a
/// 64-bit signed integer value.
///
@@ -3515,12 +3326,9 @@ _mm_cvtsi128_si32(__m128i __a)
/// A vector of [2 x i64]. The least significant 64 bits are moved to the
/// destination.
/// \returns A 64-bit signed integer containing the moved value.
-static __inline__ long long __DEFAULT_FN_ATTRS
-_mm_cvtsi128_si64(__m128i __a)
-{
+static __inline__ long long __DEFAULT_FN_ATTRS _mm_cvtsi128_si64(__m128i __a) {
return __a[0];
}
-#endif
/// Moves packed integer values from an aligned 128-bit memory location
/// to elements in a 128-bit integer vector.
@@ -3533,8 +3341,7 @@ _mm_cvtsi128_si64(__m128i __a)
/// An aligned pointer to a memory location containing integer values.
/// \returns A 128-bit integer vector containing the moved values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_load_si128(__m128i const *__p)
-{
+_mm_load_si128(__m128i const *__p) {
return *__p;
}
@@ -3549,12 +3356,11 @@ _mm_load_si128(__m128i const *__p)
/// A pointer to a memory location containing integer values.
/// \returns A 128-bit integer vector containing the moved values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_loadu_si128(__m128i_u const *__p)
-{
+_mm_loadu_si128(__m128i_u const *__p) {
struct __loadu_si128 {
__m128i_u __v;
} __attribute__((__packed__, __may_alias__));
- return ((const struct __loadu_si128*)__p)->__v;
+ return ((const struct __loadu_si128 *)__p)->__v;
}
/// Returns a vector of [2 x i64] where the lower element is taken from
@@ -3570,12 +3376,12 @@ _mm_loadu_si128(__m128i_u const *__p)
/// \returns A 128-bit vector of [2 x i64]. The lower order bits contain the
/// moved value. The higher order bits are cleared.
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_loadl_epi64(__m128i_u const *__p)
-{
+_mm_loadl_epi64(__m128i_u const *__p) {
struct __mm_loadl_epi64_struct {
long long __u;
} __attribute__((__packed__, __may_alias__));
- return __extension__ (__m128i) { ((const struct __mm_loadl_epi64_struct*)__p)->__u, 0};
+ return __extension__(__m128i){
+ ((const struct __mm_loadl_epi64_struct *)__p)->__u, 0};
}
/// Generates a 128-bit vector of [4 x i32] with unspecified content.
@@ -3587,9 +3393,7 @@ _mm_loadl_epi64(__m128i_u const *__p)
/// This intrinsic has no corresponding instruction.
///
/// \returns A 128-bit vector of [4 x i32] with unspecified content.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_undefined_si128(void)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_undefined_si128(void) {
return (__m128i)__builtin_ia32_undef128();
}
@@ -3609,10 +3413,9 @@ _mm_undefined_si128(void)
/// destination vector of [2 x i64].
/// \returns An initialized 128-bit vector of [2 x i64] containing the values
/// provided in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi64x(long long __q1, long long __q0)
-{
- return __extension__ (__m128i)(__v2di){ __q0, __q1 };
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set_epi64x(long long __q1,
+ long long __q0) {
+ return __extension__(__m128i)(__v2di){__q0, __q1};
}
/// Initializes both 64-bit values in a 128-bit vector of [2 x i64] with
@@ -3631,9 +3434,8 @@ _mm_set_epi64x(long long __q1, long long __q0)
/// destination vector of [2 x i64].
/// \returns An initialized 128-bit vector of [2 x i64] containing the values
/// provided in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi64(__m64 __q1, __m64 __q0)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set_epi64(__m64 __q1,
+ __m64 __q0) {
return _mm_set_epi64x((long long)__q1, (long long)__q0);
}
@@ -3659,10 +3461,9 @@ _mm_set_epi64(__m64 __q1, __m64 __q0)
/// vector.
/// \returns An initialized 128-bit vector of [4 x i32] containing the values
/// provided in the operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi32(int __i3, int __i2, int __i1, int __i0)
-{
- return __extension__ (__m128i)(__v4si){ __i0, __i1, __i2, __i3};
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set_epi32(int __i3, int __i2,
+ int __i1, int __i0) {
+ return __extension__(__m128i)(__v4si){__i0, __i1, __i2, __i3};
}
/// Initializes the 16-bit values in a 128-bit vector of [8 x i16] with
@@ -3700,9 +3501,10 @@ _mm_set_epi32(int __i3, int __i2, int __i1, int __i0)
/// \returns An initialized 128-bit vector of [8 x i16] containing the values
/// provided in the operands.
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi16(short __w7, short __w6, short __w5, short __w4, short __w3, short __w2, short __w1, short __w0)
-{
- return __extension__ (__m128i)(__v8hi){ __w0, __w1, __w2, __w3, __w4, __w5, __w6, __w7 };
+_mm_set_epi16(short __w7, short __w6, short __w5, short __w4, short __w3,
+ short __w2, short __w1, short __w0) {
+ return __extension__(__m128i)(__v8hi){__w0, __w1, __w2, __w3,
+ __w4, __w5, __w6, __w7};
}
/// Initializes the 8-bit values in a 128-bit vector of [16 x i8] with
@@ -3748,9 +3550,12 @@ _mm_set_epi16(short __w7, short __w6, short __w5, short __w4, short __w3, short
/// \returns An initialized 128-bit vector of [16 x i8] containing the values
/// provided in the operands.
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set_epi8(char __b15, char __b14, char __b13, char __b12, char __b11, char __b10, char __b9, char __b8, char __b7, char __b6, char __b5, char __b4, char __b3, char __b2, char __b1, char __b0)
-{
- return __extension__ (__m128i)(__v16qi){ __b0, __b1, __b2, __b3, __b4, __b5, __b6, __b7, __b8, __b9, __b10, __b11, __b12, __b13, __b14, __b15 };
+_mm_set_epi8(char __b15, char __b14, char __b13, char __b12, char __b11,
+ char __b10, char __b9, char __b8, char __b7, char __b6, char __b5,
+ char __b4, char __b3, char __b2, char __b1, char __b0) {
+ return __extension__(__m128i)(__v16qi){
+ __b0, __b1, __b2, __b3, __b4, __b5, __b6, __b7,
+ __b8, __b9, __b10, __b11, __b12, __b13, __b14, __b15};
}
/// Initializes both values in a 128-bit integer vector with the
@@ -3766,9 +3571,7 @@ _mm_set_epi8(char __b15, char __b14, char __b13, char __b12, char __b11, char __
/// vector.
/// \returns An initialized 128-bit integer vector of [2 x i64] with both
/// elements containing the value provided in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set1_epi64x(long long __q)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi64x(long long __q) {
return _mm_set_epi64x(__q, __q);
}
@@ -3785,9 +3588,7 @@ _mm_set1_epi64x(long long __q)
/// vector.
/// \returns An initialized 128-bit vector of [2 x i64] with all elements
/// containing the value provided in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set1_epi64(__m64 __q)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi64(__m64 __q) {
return _mm_set_epi64(__q, __q);
}
@@ -3804,9 +3605,7 @@ _mm_set1_epi64(__m64 __q)
/// vector.
/// \returns An initialized 128-bit vector of [4 x i32] with all elements
/// containing the value provided in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set1_epi32(int __i)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi32(int __i) {
return _mm_set_epi32(__i, __i, __i, __i);
}
@@ -3823,9 +3622,7 @@ _mm_set1_epi32(int __i)
/// vector.
/// \returns An initialized 128-bit vector of [8 x i16] with all elements
/// containing the value provided in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set1_epi16(short __w)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi16(short __w) {
return _mm_set_epi16(__w, __w, __w, __w, __w, __w, __w, __w);
}
@@ -3842,10 +3639,9 @@ _mm_set1_epi16(short __w)
/// vector.
/// \returns An initialized 128-bit vector of [16 x i8] with all elements
/// containing the value provided in the operand.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_set1_epi8(char __b)
-{
- return _mm_set_epi8(__b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_set1_epi8(char __b) {
+ return _mm_set_epi8(__b, __b, __b, __b, __b, __b, __b, __b, __b, __b, __b,
+ __b, __b, __b, __b, __b);
}
/// Constructs a 128-bit integer vector, initialized in reverse order
@@ -3862,9 +3658,8 @@ _mm_set1_epi8(char __b)
/// A 64-bit integral value used to initialize the upper 64 bits of the
/// result.
/// \returns An initialized 128-bit integer vector.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setr_epi64(__m64 __q0, __m64 __q1)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_setr_epi64(__m64 __q0,
+ __m64 __q1) {
return _mm_set_epi64(__q1, __q0);
}
@@ -3885,9 +3680,9 @@ _mm_setr_epi64(__m64 __q0, __m64 __q1)
/// \param __i3
/// A 32-bit integral value used to initialize bits [127:96] of the result.
/// \returns An initialized 128-bit integer vector.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setr_epi32(int __i0, int __i1, int __i2, int __i3)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_setr_epi32(int __i0, int __i1,
+ int __i2,
+ int __i3) {
return _mm_set_epi32(__i3, __i2, __i1, __i0);
}
@@ -3917,8 +3712,8 @@ _mm_setr_epi32(int __i0, int __i1, int __i2, int __i3)
/// A 16-bit integral value used to initialize bits [127:112] of the result.
/// \returns An initialized 128-bit integer vector.
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setr_epi16(short __w0, short __w1, short __w2, short __w3, short __w4, short __w5, short __w6, short __w7)
-{
+_mm_setr_epi16(short __w0, short __w1, short __w2, short __w3, short __w4,
+ short __w5, short __w6, short __w7) {
return _mm_set_epi16(__w7, __w6, __w5, __w4, __w3, __w2, __w1, __w0);
}
@@ -3964,9 +3759,11 @@ _mm_setr_epi16(short __w0, short __w1, short __w2, short __w3, short __w4, short
/// An 8-bit integral value used to initialize bits [127:120] of the result.
/// \returns An initialized 128-bit integer vector.
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setr_epi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5, char __b6, char __b7, char __b8, char __b9, char __b10, char __b11, char __b12, char __b13, char __b14, char __b15)
-{
- return _mm_set_epi8(__b15, __b14, __b13, __b12, __b11, __b10, __b9, __b8, __b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0);
+_mm_setr_epi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5,
+ char __b6, char __b7, char __b8, char __b9, char __b10,
+ char __b11, char __b12, char __b13, char __b14, char __b15) {
+ return _mm_set_epi8(__b15, __b14, __b13, __b12, __b11, __b10, __b9, __b8,
+ __b7, __b6, __b5, __b4, __b3, __b2, __b1, __b0);
}
/// Creates a 128-bit integer vector initialized to zero.
@@ -3977,10 +3774,8 @@ _mm_setr_epi8(char __b0, char __b1, char __b2, char __b3, char __b4, char __b5,
///
/// \returns An initialized 128-bit integer vector with all elements set to
/// zero.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_setzero_si128(void)
-{
- return __extension__ (__m128i)(__v2di){ 0LL, 0LL };
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_setzero_si128(void) {
+ return __extension__(__m128i)(__v2di){0LL, 0LL};
}
/// Stores a 128-bit integer vector to a memory location aligned on a
@@ -3995,9 +3790,8 @@ _mm_setzero_si128(void)
/// values.
/// \param __b
/// A 128-bit integer vector containing the values to be moved.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_store_si128(__m128i *__p, __m128i __b)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_store_si128(__m128i *__p,
+ __m128i __b) {
*__p = __b;
}
@@ -4011,13 +3805,12 @@ _mm_store_si128(__m128i *__p, __m128i __b)
/// A pointer to a memory location that will receive the integer values.
/// \param __b
/// A 128-bit integer vector containing the values to be moved.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_si128(__m128i_u *__p, __m128i __b)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si128(__m128i_u *__p,
+ __m128i __b) {
struct __storeu_si128 {
__m128i_u __v;
} __attribute__((__packed__, __may_alias__));
- ((struct __storeu_si128*)__p)->__v = __b;
+ ((struct __storeu_si128 *)__p)->__v = __b;
}
/// Stores a 64-bit integer value from the low element of a 128-bit integer
@@ -4032,13 +3825,12 @@ _mm_storeu_si128(__m128i_u *__p, __m128i __b)
/// location does not have to be aligned.
/// \param __b
/// A 128-bit integer vector containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_si64(void *__p, __m128i __b)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si64(void *__p,
+ __m128i __b) {
struct __storeu_si64 {
long long __v;
} __attribute__((__packed__, __may_alias__));
- ((struct __storeu_si64*)__p)->__v = ((__v2di)__b)[0];
+ ((struct __storeu_si64 *)__p)->__v = ((__v2di)__b)[0];
}
/// Stores a 32-bit integer value from the low element of a 128-bit integer
@@ -4053,13 +3845,12 @@ _mm_storeu_si64(void *__p, __m128i __b)
/// location does not have to be aligned.
/// \param __b
/// A 128-bit integer vector containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_si32(void *__p, __m128i __b)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si32(void *__p,
+ __m128i __b) {
struct __storeu_si32 {
int __v;
} __attribute__((__packed__, __may_alias__));
- ((struct __storeu_si32*)__p)->__v = ((__v4si)__b)[0];
+ ((struct __storeu_si32 *)__p)->__v = ((__v4si)__b)[0];
}
/// Stores a 16-bit integer value from the low element of a 128-bit integer
@@ -4074,13 +3865,12 @@ _mm_storeu_si32(void *__p, __m128i __b)
/// location does not have to be aligned.
/// \param __b
/// A 128-bit integer vector containing the value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storeu_si16(void *__p, __m128i __b)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_storeu_si16(void *__p,
+ __m128i __b) {
struct __storeu_si16 {
short __v;
} __attribute__((__packed__, __may_alias__));
- ((struct __storeu_si16*)__p)->__v = ((__v8hi)__b)[0];
+ ((struct __storeu_si16 *)__p)->__v = ((__v8hi)__b)[0];
}
/// Moves bytes selected by the mask from the first operand to the
@@ -4104,9 +3894,9 @@ _mm_storeu_si16(void *__p, __m128i __b)
/// \param __p
/// A pointer to an unaligned 128-bit memory location where the specified
/// values are moved.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_maskmoveu_si128(__m128i __d, __m128i __n, char *__p)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_maskmoveu_si128(__m128i __d,
+ __m128i __n,
+ char *__p) {
__builtin_ia32_maskmovdqu((__v16qi)__d, (__v16qi)__n, __p);
}
@@ -4123,13 +3913,12 @@ _mm_maskmoveu_si128(__m128i __d, __m128i __n, char *__p)
/// \param __a
/// A 128-bit integer vector of [2 x i64]. The lower 64 bits contain the
/// value to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_storel_epi64(__m128i_u *__p, __m128i __a)
-{
+static __inline__ void __DEFAULT_FN_ATTRS _mm_storel_epi64(__m128i_u *__p,
+ __m128i __a) {
struct __mm_storel_epi64_struct {
long long __u;
} __attribute__((__packed__, __may_alias__));
- ((struct __mm_storel_epi64_struct*)__p)->__u = __a[0];
+ ((struct __mm_storel_epi64_struct *)__p)->__u = __a[0];
}
/// Stores a 128-bit floating point vector of [2 x double] to a 128-bit
@@ -4146,10 +3935,9 @@ _mm_storel_epi64(__m128i_u *__p, __m128i __a)
/// A pointer to the 128-bit aligned memory location used to store the value.
/// \param __a
/// A vector of [2 x double] containing the 64-bit values to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_stream_pd(double *__p, __m128d __a)
-{
- __builtin_nontemporal_store((__v2df)__a, (__v2df*)__p);
+static __inline__ void __DEFAULT_FN_ATTRS _mm_stream_pd(double *__p,
+ __m128d __a) {
+ __builtin_nontemporal_store((__v2df)__a, (__v2df *)__p);
}
/// Stores a 128-bit integer vector to a 128-bit aligned memory location.
@@ -4165,10 +3953,9 @@ _mm_stream_pd(double *__p, __m128d __a)
/// A pointer to the 128-bit aligned memory location used to store the value.
/// \param __a
/// A 128-bit integer vector containing the values to be stored.
-static __inline__ void __DEFAULT_FN_ATTRS
-_mm_stream_si128(__m128i *__p, __m128i __a)
-{
- __builtin_nontemporal_store((__v2di)__a, (__v2di*)__p);
+static __inline__ void __DEFAULT_FN_ATTRS _mm_stream_si128(__m128i *__p,
+ __m128i __a) {
+ __builtin_nontemporal_store((__v2di)__a, (__v2di *)__p);
}
/// Stores a 32-bit integer value in the specified memory location.
@@ -4184,9 +3971,9 @@ _mm_stream_si128(__m128i *__p, __m128i __a)
/// A pointer to the 32-bit memory location used to store the value.
/// \param __a
/// A 32-bit integer containing the value to be stored.
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("sse2")))
-_mm_stream_si32(int *__p, int __a)
-{
+static __inline__ void
+ __attribute__((__always_inline__, __nodebug__, __target__("sse2")))
+ _mm_stream_si32(int *__p, int __a) {
__builtin_ia32_movnti(__p, __a);
}
@@ -4204,9 +3991,9 @@ _mm_stream_si32(int *__p, int __a)
/// A pointer to the 64-bit memory location used to store the value.
/// \param __a
/// A 64-bit integer containing the value to be stored.
-static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("sse2")))
-_mm_stream_si64(long long *__p, long long __a)
-{
+static __inline__ void
+ __attribute__((__always_inline__, __nodebug__, __target__("sse2")))
+ _mm_stream_si64(long long *__p, long long __a) {
__builtin_ia32_movnti64(__p, __a);
}
#endif
@@ -4225,7 +4012,7 @@ extern "C" {
/// \param __p
/// A pointer to the memory location used to identify the cache line to be
/// flushed.
-void _mm_clflush(void const * __p);
+void _mm_clflush(void const *__p);
/// Forces strong memory ordering (serialization) between load
/// instructions preceding this instruction and load instructions following
@@ -4275,9 +4062,8 @@ void _mm_mfence(void);
/// than 0x80 are saturated to 0x80. The converted [8 x i8] values are
/// written to the higher 64 bits of the result.
/// \returns A 128-bit vector of [16 x i8] containing the converted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_packs_epi16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packs_epi16(__m128i __a,
+ __m128i __b) {
return (__m128i)__builtin_ia32_packsswb128((__v8hi)__a, (__v8hi)__b);
}
@@ -4303,9 +4089,8 @@ _mm_packs_epi16(__m128i __a, __m128i __b)
/// less than 0x8000 are saturated to 0x8000. The converted [4 x i16] values
/// are written to the higher 64 bits of the result.
/// \returns A 128-bit vector of [8 x i16] containing the converted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_packs_epi32(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packs_epi32(__m128i __a,
+ __m128i __b) {
return (__m128i)__builtin_ia32_packssdw128((__v4si)__a, (__v4si)__b);
}
@@ -4331,9 +4116,8 @@ _mm_packs_epi32(__m128i __a, __m128i __b)
/// than 0x00 are saturated to 0x00. The converted [8 x i8] values are
/// written to the higher 64 bits of the result.
/// \returns A 128-bit vector of [16 x i8] containing the converted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_packus_epi16(__m128i __a, __m128i __b)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packus_epi16(__m128i __a,
+ __m128i __b) {
return (__m128i)__builtin_ia32_packuswb128((__v8hi)__a, (__v8hi)__b);
}
@@ -4342,25 +4126,29 @@ _mm_packus_epi16(__m128i __a, __m128i __b)
///
/// \headerfile <x86intrin.h>
///
+/// \code
+/// __m128i _mm_extract_epi16(__m128i a, const int imm);
+/// \endcode
+///
/// This intrinsic corresponds to the <c> VPEXTRW / PEXTRW </c> instruction.
///
-/// \param __a
+/// \param a
/// A 128-bit integer vector.
-/// \param __imm
-/// An immediate value. Bits [2:0] selects values from \a __a to be assigned
+/// \param imm
+/// An immediate value. Bits [2:0] selects values from \a a to be assigned
/// to bits[15:0] of the result. \n
-/// 000: assign values from bits [15:0] of \a __a. \n
-/// 001: assign values from bits [31:16] of \a __a. \n
-/// 010: assign values from bits [47:32] of \a __a. \n
-/// 011: assign values from bits [63:48] of \a __a. \n
-/// 100: assign values from bits [79:64] of \a __a. \n
-/// 101: assign values from bits [95:80] of \a __a. \n
-/// 110: assign values from bits [111:96] of \a __a. \n
-/// 111: assign values from bits [127:112] of \a __a.
+/// 000: assign values from bits [15:0] of \a a. \n
+/// 001: assign values from bits [31:16] of \a a. \n
+/// 010: assign values from bits [47:32] of \a a. \n
+/// 011: assign values from bits [63:48] of \a a. \n
+/// 100: assign values from bits [79:64] of \a a. \n
+/// 101: assign values from bits [95:80] of \a a. \n
+/// 110: assign values from bits [111:96] of \a a. \n
+/// 111: assign values from bits [127:112] of \a a.
/// \returns An integer, whose lower 16 bits are selected from the 128-bit
/// integer vector parameter and the remaining bits are assigned zeros.
-#define _mm_extract_epi16(a, imm) \
- ((int)(unsigned short)__builtin_ia32_vec_ext_v8hi((__v8hi)(__m128i)(a), \
+#define _mm_extract_epi16(a, imm) \
+ ((int)(unsigned short)__builtin_ia32_vec_ext_v8hi((__v8hi)(__m128i)(a), \
(int)(imm)))
/// Constructs a 128-bit integer vector by first making a copy of the
@@ -4370,21 +4158,25 @@ _mm_packus_epi16(__m128i __a, __m128i __b)
///
/// \headerfile <x86intrin.h>
///
+/// \code
+/// __m128i _mm_insert_epi16(__m128i a, int b, const int imm);
+/// \endcode
+///
/// This intrinsic corresponds to the <c> VPINSRW / PINSRW </c> instruction.
///
-/// \param __a
+/// \param a
/// A 128-bit integer vector of [8 x i16]. This vector is copied to the
/// result and then one of the eight elements in the result is replaced by
-/// the lower 16 bits of \a __b.
-/// \param __b
+/// the lower 16 bits of \a b.
+/// \param b
/// An integer. The lower 16 bits of this parameter are written to the
-/// result beginning at an offset specified by \a __imm.
-/// \param __imm
+/// result beginning at an offset specified by \a imm.
+/// \param imm
/// An immediate value specifying the bit offset in the result at which the
-/// lower 16 bits of \a __b are written.
+/// lower 16 bits of \a b are written.
/// \returns A 128-bit integer vector containing the constructed values.
-#define _mm_insert_epi16(a, b, imm) \
- ((__m128i)__builtin_ia32_vec_set_v8hi((__v8hi)(__m128i)(a), (int)(b), \
+#define _mm_insert_epi16(a, b, imm) \
+ ((__m128i)__builtin_ia32_vec_set_v8hi((__v8hi)(__m128i)(a), (int)(b), \
(int)(imm)))
/// Copies the values of the most significant bits from each 8-bit
@@ -4399,9 +4191,7 @@ _mm_packus_epi16(__m128i __a, __m128i __b)
/// A 128-bit integer vector containing the values with bits to be extracted.
/// \returns The most significant bits from each 8-bit element in \a __a,
/// written to bits [15:0]. The other bits are assigned zeros.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_movemask_epi8(__m128i __a)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_movemask_epi8(__m128i __a) {
return __builtin_ia32_pmovmskb128((__v16qi)__a);
}
@@ -4431,9 +4221,12 @@ _mm_movemask_epi8(__m128i __a)
/// 00: assign values from bits [31:0] of \a a. \n
/// 01: assign values from bits [63:32] of \a a. \n
/// 10: assign values from bits [95:64] of \a a. \n
-/// 11: assign values from bits [127:96] of \a a.
+/// 11: assign values from bits [127:96] of \a a. \n
+/// Note: To generate a mask, you can use the \c _MM_SHUFFLE macro.
+/// <c>_MM_SHUFFLE(b6, b4, b2, b0)</c> can create an 8-bit mask of the form
+/// <c>[b6, b4, b2, b0]</c>.
/// \returns A 128-bit integer vector containing the shuffled values.
-#define _mm_shuffle_epi32(a, imm) \
+#define _mm_shuffle_epi32(a, imm) \
((__m128i)__builtin_ia32_pshufd((__v4si)(__m128i)(a), (int)(imm)))
/// Constructs a 128-bit integer vector by shuffling four lower 16-bit
@@ -4462,8 +4255,11 @@ _mm_movemask_epi8(__m128i __a)
/// 01: assign values from bits [31:16] of \a a. \n
/// 10: assign values from bits [47:32] of \a a. \n
/// 11: assign values from bits [63:48] of \a a. \n
+/// Note: To generate a mask, you can use the \c _MM_SHUFFLE macro.
+/// <c>_MM_SHUFFLE(b6, b4, b2, b0)</c> can create an 8-bit mask of the form
+/// <c>[b6, b4, b2, b0]</c>.
/// \returns A 128-bit integer vector containing the shuffled values.
-#define _mm_shufflelo_epi16(a, imm) \
+#define _mm_shufflelo_epi16(a, imm) \
((__m128i)__builtin_ia32_pshuflw((__v8hi)(__m128i)(a), (int)(imm)))
/// Constructs a 128-bit integer vector by shuffling four upper 16-bit
@@ -4492,8 +4288,11 @@ _mm_movemask_epi8(__m128i __a)
/// 01: assign values from bits [95:80] of \a a. \n
/// 10: assign values from bits [111:96] of \a a. \n
/// 11: assign values from bits [127:112] of \a a. \n
+/// Note: To generate a mask, you can use the \c _MM_SHUFFLE macro.
+/// <c>_MM_SHUFFLE(b6, b4, b2, b0)</c> can create an 8-bit mask of the form
+/// <c>[b6, b4, b2, b0]</c>.
/// \returns A 128-bit integer vector containing the shuffled values.
-#define _mm_shufflehi_epi16(a, imm) \
+#define _mm_shufflehi_epi16(a, imm) \
((__m128i)__builtin_ia32_pshufhw((__v8hi)(__m128i)(a), (int)(imm)))
/// Unpacks the high-order (index 8-15) values from two 128-bit vectors
@@ -4525,10 +4324,11 @@ _mm_movemask_epi8(__m128i __a)
/// Bits [119:112] are written to bits [111:104] of the result. \n
/// Bits [127:120] are written to bits [127:120] of the result.
/// \returns A 128-bit vector of [16 x i8] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpackhi_epi8(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_shufflevector((__v16qi)__a, (__v16qi)__b, 8, 16+8, 9, 16+9, 10, 16+10, 11, 16+11, 12, 16+12, 13, 16+13, 14, 16+14, 15, 16+15);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi8(__m128i __a,
+ __m128i __b) {
+ return (__m128i)__builtin_shufflevector(
+ (__v16qi)__a, (__v16qi)__b, 8, 16 + 8, 9, 16 + 9, 10, 16 + 10, 11,
+ 16 + 11, 12, 16 + 12, 13, 16 + 13, 14, 16 + 14, 15, 16 + 15);
}
/// Unpacks the high-order (index 4-7) values from two 128-bit vectors of
@@ -4552,10 +4352,10 @@ _mm_unpackhi_epi8(__m128i __a, __m128i __b)
/// Bits [111:96] are written to bits [95:80] of the result. \n
/// Bits [127:112] are written to bits [127:112] of the result.
/// \returns A 128-bit vector of [8 x i16] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpackhi_epi16(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 4, 8+4, 5, 8+5, 6, 8+6, 7, 8+7);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi16(__m128i __a,
+ __m128i __b) {
+ return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 4, 8 + 4, 5,
+ 8 + 5, 6, 8 + 6, 7, 8 + 7);
}
/// Unpacks the high-order (index 2,3) values from two 128-bit vectors of
@@ -4575,10 +4375,10 @@ _mm_unpackhi_epi16(__m128i __a, __m128i __b)
/// Bits [95:64] are written to bits [64:32] of the destination. \n
/// Bits [127:96] are written to bits [127:96] of the destination.
/// \returns A 128-bit vector of [4 x i32] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpackhi_epi32(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 2, 4+2, 3, 4+3);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi32(__m128i __a,
+ __m128i __b) {
+ return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 2, 4 + 2, 3,
+ 4 + 3);
}
/// Unpacks the high-order 64-bit elements from two 128-bit vectors of
@@ -4596,10 +4396,9 @@ _mm_unpackhi_epi32(__m128i __a, __m128i __b)
/// A 128-bit vector of [2 x i64]. \n
/// Bits [127:64] are written to bits [127:64] of the destination.
/// \returns A 128-bit vector of [2 x i64] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpackhi_epi64(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_shufflevector((__v2di)__a, (__v2di)__b, 1, 2+1);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpackhi_epi64(__m128i __a,
+ __m128i __b) {
+ return (__m128i)__builtin_shufflevector((__v2di)__a, (__v2di)__b, 1, 2 + 1);
}
/// Unpacks the low-order (index 0-7) values from two 128-bit vectors of
@@ -4631,10 +4430,11 @@ _mm_unpackhi_epi64(__m128i __a, __m128i __b)
/// Bits [55:48] are written to bits [111:104] of the result. \n
/// Bits [63:56] are written to bits [127:120] of the result.
/// \returns A 128-bit vector of [16 x i8] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpacklo_epi8(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_shufflevector((__v16qi)__a, (__v16qi)__b, 0, 16+0, 1, 16+1, 2, 16+2, 3, 16+3, 4, 16+4, 5, 16+5, 6, 16+6, 7, 16+7);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi8(__m128i __a,
+ __m128i __b) {
+ return (__m128i)__builtin_shufflevector(
+ (__v16qi)__a, (__v16qi)__b, 0, 16 + 0, 1, 16 + 1, 2, 16 + 2, 3, 16 + 3, 4,
+ 16 + 4, 5, 16 + 5, 6, 16 + 6, 7, 16 + 7);
}
/// Unpacks the low-order (index 0-3) values from each of the two 128-bit
@@ -4659,10 +4459,10 @@ _mm_unpacklo_epi8(__m128i __a, __m128i __b)
/// Bits [47:32] are written to bits [95:80] of the result. \n
/// Bits [63:48] are written to bits [127:112] of the result.
/// \returns A 128-bit vector of [8 x i16] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpacklo_epi16(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 0, 8+0, 1, 8+1, 2, 8+2, 3, 8+3);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi16(__m128i __a,
+ __m128i __b) {
+ return (__m128i)__builtin_shufflevector((__v8hi)__a, (__v8hi)__b, 0, 8 + 0, 1,
+ 8 + 1, 2, 8 + 2, 3, 8 + 3);
}
/// Unpacks the low-order (index 0,1) values from two 128-bit vectors of
@@ -4682,10 +4482,10 @@ _mm_unpacklo_epi16(__m128i __a, __m128i __b)
/// Bits [31:0] are written to bits [64:32] of the destination. \n
/// Bits [63:32] are written to bits [127:96] of the destination.
/// \returns A 128-bit vector of [4 x i32] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpacklo_epi32(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 0, 4+0, 1, 4+1);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi32(__m128i __a,
+ __m128i __b) {
+ return (__m128i)__builtin_shufflevector((__v4si)__a, (__v4si)__b, 0, 4 + 0, 1,
+ 4 + 1);
}
/// Unpacks the low-order 64-bit elements from two 128-bit vectors of
@@ -4703,10 +4503,9 @@ _mm_unpacklo_epi32(__m128i __a, __m128i __b)
/// A 128-bit vector of [2 x i64]. \n
/// Bits [63:0] are written to bits [127:64] of the destination. \n
/// \returns A 128-bit vector of [2 x i64] containing the interleaved values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_unpacklo_epi64(__m128i __a, __m128i __b)
-{
- return (__m128i)__builtin_shufflevector((__v2di)__a, (__v2di)__b, 0, 2+0);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_unpacklo_epi64(__m128i __a,
+ __m128i __b) {
+ return (__m128i)__builtin_shufflevector((__v2di)__a, (__v2di)__b, 0, 2 + 0);
}
/// Returns the lower 64 bits of a 128-bit integer vector as a 64-bit
@@ -4720,9 +4519,7 @@ _mm_unpacklo_epi64(__m128i __a, __m128i __b)
/// A 128-bit integer vector operand. The lower 64 bits are moved to the
/// destination.
/// \returns A 64-bit integer containing the lower 64 bits of the parameter.
-static __inline__ __m64 __DEFAULT_FN_ATTRS
-_mm_movepi64_pi64(__m128i __a)
-{
+static __inline__ __m64 __DEFAULT_FN_ATTRS _mm_movepi64_pi64(__m128i __a) {
return (__m64)__a[0];
}
@@ -4737,10 +4534,8 @@ _mm_movepi64_pi64(__m128i __a)
/// A 64-bit value.
/// \returns A 128-bit integer vector. The lower 64 bits contain the value from
/// the operand. The upper 64 bits are assigned zeros.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_movpi64_epi64(__m64 __a)
-{
- return __extension__ (__m128i)(__v2di){ (long long)__a, 0 };
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_movpi64_epi64(__m64 __a) {
+ return __extension__(__m128i)(__v2di){(long long)__a, 0};
}
/// Moves the lower 64 bits of a 128-bit integer vector to a 128-bit
@@ -4755,9 +4550,7 @@ _mm_movpi64_epi64(__m64 __a)
/// destination.
/// \returns A 128-bit integer vector. The lower 64 bits contain the value from
/// the operand. The upper 64 bits are assigned zeros.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_move_epi64(__m128i __a)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_move_epi64(__m128i __a) {
return __builtin_shufflevector((__v2di)__a, _mm_setzero_si128(), 0, 2);
}
@@ -4776,10 +4569,9 @@ _mm_move_epi64(__m128i __a)
/// A 128-bit vector of [2 x double]. \n
/// Bits [127:64] are written to bits [127:64] of the destination.
/// \returns A 128-bit vector of [2 x double] containing the interleaved values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_unpackhi_pd(__m128d __a, __m128d __b)
-{
- return __builtin_shufflevector((__v2df)__a, (__v2df)__b, 1, 2+1);
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_unpackhi_pd(__m128d __a,
+ __m128d __b) {
+ return __builtin_shufflevector((__v2df)__a, (__v2df)__b, 1, 2 + 1);
}
/// Unpacks the low-order 64-bit elements from two 128-bit vectors
@@ -4797,10 +4589,9 @@ _mm_unpackhi_pd(__m128d __a, __m128d __b)
/// A 128-bit vector of [2 x double]. \n
/// Bits [63:0] are written to bits [127:64] of the destination.
/// \returns A 128-bit vector of [2 x double] containing the interleaved values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_unpacklo_pd(__m128d __a, __m128d __b)
-{
- return __builtin_shufflevector((__v2df)__a, (__v2df)__b, 0, 2+0);
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_unpacklo_pd(__m128d __a,
+ __m128d __b) {
+ return __builtin_shufflevector((__v2df)__a, (__v2df)__b, 0, 2 + 0);
}
/// Extracts the sign bits of the double-precision values in the 128-bit
@@ -4816,13 +4607,10 @@ _mm_unpacklo_pd(__m128d __a, __m128d __b)
/// be extracted.
/// \returns The sign bits from each of the double-precision elements in \a __a,
/// written to bits [1:0]. The remaining bits are assigned values of zero.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_movemask_pd(__m128d __a)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_movemask_pd(__m128d __a) {
return __builtin_ia32_movmskpd((__v2df)__a);
}
-
/// Constructs a 128-bit floating-point vector of [2 x double] from two
/// 128-bit vector parameters of [2 x double], using the immediate-value
/// parameter as a specifier.
@@ -4846,9 +4634,12 @@ _mm_movemask_pd(__m128d __a)
/// Bit[0] = 1: upper element of \a a copied to lower element of result. \n
/// Bit[1] = 0: lower element of \a b copied to upper element of result. \n
/// Bit[1] = 1: upper element of \a b copied to upper element of result. \n
+/// Note: To generate a mask, you can use the \c _MM_SHUFFLE2 macro.
+/// <c>_MM_SHUFFLE2(b1, b0)</c> can create a 2-bit mask of the form
+/// <c>[b1, b0]</c>.
/// \returns A 128-bit vector of [2 x double] containing the shuffled values.
-#define _mm_shuffle_pd(a, b, i) \
- ((__m128d)__builtin_ia32_shufpd((__v2df)(__m128d)(a), (__v2df)(__m128d)(b), \
+#define _mm_shuffle_pd(a, b, i) \
+ ((__m128d)__builtin_ia32_shufpd((__v2df)(__m128d)(a), (__v2df)(__m128d)(b), \
(int)(i)))
/// Casts a 128-bit floating-point vector of [2 x double] into a 128-bit
@@ -4862,9 +4653,7 @@ _mm_movemask_pd(__m128d __a)
/// A 128-bit floating-point vector of [2 x double].
/// \returns A 128-bit floating-point vector of [4 x float] containing the same
/// bitwise pattern as the parameter.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_castpd_ps(__m128d __a)
-{
+static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_castpd_ps(__m128d __a) {
return (__m128)__a;
}
@@ -4879,9 +4668,7 @@ _mm_castpd_ps(__m128d __a)
/// A 128-bit floating-point vector of [2 x double].
/// \returns A 128-bit integer vector containing the same bitwise pattern as the
/// parameter.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_castpd_si128(__m128d __a)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_castpd_si128(__m128d __a) {
return (__m128i)__a;
}
@@ -4896,9 +4683,7 @@ _mm_castpd_si128(__m128d __a)
/// A 128-bit floating-point vector of [4 x float].
/// \returns A 128-bit floating-point vector of [2 x double] containing the same
/// bitwise pattern as the parameter.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_castps_pd(__m128 __a)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_castps_pd(__m128 __a) {
return (__m128d)__a;
}
@@ -4913,9 +4698,7 @@ _mm_castps_pd(__m128 __a)
/// A 128-bit floating-point vector of [4 x float].
/// \returns A 128-bit integer vector containing the same bitwise pattern as the
/// parameter.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_castps_si128(__m128 __a)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_castps_si128(__m128 __a) {
return (__m128i)__a;
}
@@ -4930,9 +4713,7 @@ _mm_castps_si128(__m128 __a)
/// A 128-bit integer vector.
/// \returns A 128-bit floating-point vector of [4 x float] containing the same
/// bitwise pattern as the parameter.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_castsi128_ps(__m128i __a)
-{
+static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_castsi128_ps(__m128i __a) {
return (__m128)__a;
}
@@ -4947,9 +4728,7 @@ _mm_castsi128_ps(__m128i __a)
/// A 128-bit integer vector.
/// \returns A 128-bit floating-point vector of [2 x double] containing the same
/// bitwise pattern as the parameter.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_castsi128_pd(__m128i __a)
-{
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_castsi128_pd(__m128i __a) {
return (__m128d)__a;
}
@@ -4974,12 +4753,13 @@ void _mm_pause(void);
#define _MM_SHUFFLE2(x, y) (((x) << 1) | (y))
-#define _MM_DENORMALS_ZERO_ON (0x0040U)
-#define _MM_DENORMALS_ZERO_OFF (0x0000U)
+#define _MM_DENORMALS_ZERO_ON (0x0040U)
+#define _MM_DENORMALS_ZERO_OFF (0x0000U)
#define _MM_DENORMALS_ZERO_MASK (0x0040U)
#define _MM_GET_DENORMALS_ZERO_MODE() (_mm_getcsr() & _MM_DENORMALS_ZERO_MASK)
-#define _MM_SET_DENORMALS_ZERO_MODE(x) (_mm_setcsr((_mm_getcsr() & ~_MM_DENORMALS_ZERO_MASK) | (x)))
+#define _MM_SET_DENORMALS_ZERO_MODE(x) \
+ (_mm_setcsr((_mm_getcsr() & ~_MM_DENORMALS_ZERO_MASK) | (x)))
#endif /* __EMMINTRIN_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/f16cintrin.h b/contrib/llvm-project/clang/lib/Headers/f16cintrin.h
index 13905e6fb0ec..94a662c1d93a 100644
--- a/contrib/llvm-project/clang/lib/Headers/f16cintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/f16cintrin.h
@@ -65,9 +65,9 @@ _cvtsh_ss(unsigned short __a)
/// 011: Truncate \n
/// 1XX: Use MXCSR.RC for rounding
/// \returns The converted 16-bit half-precision float value.
-#define _cvtss_sh(a, imm) \
- ((unsigned short)(((__v8hi)__builtin_ia32_vcvtps2ph((__v4sf){a, 0, 0, 0}, \
- (imm)))[0]))
+#define _cvtss_sh(a, imm) __extension__ ({ \
+ (unsigned short)(((__v8hi)__builtin_ia32_vcvtps2ph((__v4sf){a, 0, 0, 0}, \
+ (imm)))[0]); })
/// Converts a 128-bit vector containing 32-bit float values into a
/// 128-bit vector containing 16-bit half-precision float values.
diff --git a/contrib/llvm-project/clang/lib/Headers/hlsl.h b/contrib/llvm-project/clang/lib/Headers/hlsl.h
new file mode 100644
index 000000000000..a9dce4503ddd
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/hlsl.h
@@ -0,0 +1,15 @@
+//===----- hlsl.h - HLSL definitions --------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _HLSL_H_
+#define _HLSL_H_
+
+#include "hlsl/hlsl_basic_types.h"
+#include "hlsl/hlsl_intrinsics.h"
+
+#endif //_HLSL_H_
diff --git a/contrib/llvm-project/clang/lib/Headers/hlsl/hlsl_basic_types.h b/contrib/llvm-project/clang/lib/Headers/hlsl/hlsl_basic_types.h
new file mode 100644
index 000000000000..2069990f5c06
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/hlsl/hlsl_basic_types.h
@@ -0,0 +1,64 @@
+//===----- hlsl_basic_types.h - HLSL definitions for basic types ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _HLSL_HLSL_BASIC_TYPES_H_
+#define _HLSL_HLSL_BASIC_TYPES_H_
+
+// built-in scalar data types:
+
+#ifdef __HLSL_ENABLE_16_BIT
+// 16-bit integer.
+typedef unsigned short uint16_t;
+typedef short int16_t;
+#endif
+
+// unsigned 32-bit integer.
+typedef unsigned int uint;
+
+// 64-bit integer.
+typedef unsigned long uint64_t;
+typedef long int64_t;
+
+// built-in vector data types:
+
+#ifdef __HLSL_ENABLE_16_BIT
+typedef int16_t int16_t2 __attribute__((ext_vector_type(2)));
+typedef int16_t int16_t3 __attribute__((ext_vector_type(3)));
+typedef int16_t int16_t4 __attribute__((ext_vector_type(4)));
+typedef uint16_t uint16_t2 __attribute__((ext_vector_type(2)));
+typedef uint16_t uint16_t3 __attribute__((ext_vector_type(3)));
+typedef uint16_t uint16_t4 __attribute__((ext_vector_type(4)));
+#endif
+
+typedef int int2 __attribute__((ext_vector_type(2)));
+typedef int int3 __attribute__((ext_vector_type(3)));
+typedef int int4 __attribute__((ext_vector_type(4)));
+typedef uint uint2 __attribute__((ext_vector_type(2)));
+typedef uint uint3 __attribute__((ext_vector_type(3)));
+typedef uint uint4 __attribute__((ext_vector_type(4)));
+typedef int64_t int64_t2 __attribute__((ext_vector_type(2)));
+typedef int64_t int64_t3 __attribute__((ext_vector_type(3)));
+typedef int64_t int64_t4 __attribute__((ext_vector_type(4)));
+typedef uint64_t uint64_t2 __attribute__((ext_vector_type(2)));
+typedef uint64_t uint64_t3 __attribute__((ext_vector_type(3)));
+typedef uint64_t uint64_t4 __attribute__((ext_vector_type(4)));
+
+#ifdef __HLSL_ENABLE_16_BIT
+typedef half half2 __attribute__((ext_vector_type(2)));
+typedef half half3 __attribute__((ext_vector_type(3)));
+typedef half half4 __attribute__((ext_vector_type(4)));
+#endif
+
+typedef float float2 __attribute__((ext_vector_type(2)));
+typedef float float3 __attribute__((ext_vector_type(3)));
+typedef float float4 __attribute__((ext_vector_type(4)));
+typedef double double2 __attribute__((ext_vector_type(2)));
+typedef double double3 __attribute__((ext_vector_type(3)));
+typedef double double4 __attribute__((ext_vector_type(4)));
+
+#endif //_HLSL_HLSL_BASIC_TYPES_H_
diff --git a/contrib/llvm-project/clang/lib/Headers/hlsl/hlsl_intrinsics.h b/contrib/llvm-project/clang/lib/Headers/hlsl/hlsl_intrinsics.h
new file mode 100644
index 000000000000..b5cdb8b44970
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/hlsl/hlsl_intrinsics.h
@@ -0,0 +1,15 @@
+//===----- hlsl_intrinsics.h - HLSL definitions for intrinsics ----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _HLSL_HLSL_INTRINSICS_H_
+#define _HLSL_HLSL_INTRINSICS_H_
+
+__attribute__((clang_builtin_alias(__builtin_hlsl_wave_active_count_bits))) uint
+WaveActiveCountBits(bool bBit);
+
+#endif //_HLSL_HLSL_INTRINSICS_H_
diff --git a/contrib/llvm-project/clang/lib/Headers/hresetintrin.h b/contrib/llvm-project/clang/lib/Headers/hresetintrin.h
index 13e31a2e03ad..646f6c130961 100644
--- a/contrib/llvm-project/clang/lib/Headers/hresetintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/hresetintrin.h
@@ -25,7 +25,7 @@
///
/// This intrinsic corresponds to the <c> HRESET </c> instruction.
///
-/// \operation
+/// \code{.operation}
/// IF __eax == 0
/// // nop
/// ELSE
@@ -35,7 +35,7 @@
/// FI
/// ENDFOR
/// FI
-/// \endoperation
+/// \endcode
static __inline void __DEFAULT_FN_ATTRS
_hreset(int __eax)
{
diff --git a/contrib/llvm-project/clang/lib/Headers/ia32intrin.h b/contrib/llvm-project/clang/lib/Headers/ia32intrin.h
index ec8142b9c640..f1904efd71c4 100644
--- a/contrib/llvm-project/clang/lib/Headers/ia32intrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/ia32intrin.h
@@ -40,7 +40,7 @@
*/
static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
__bsfd(int __A) {
- return __builtin_ctz(__A);
+ return __builtin_ctz((unsigned int)__A);
}
/** Find the first set bit starting from the msb. Result is undefined if
@@ -57,7 +57,7 @@ __bsfd(int __A) {
*/
static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
__bsrd(int __A) {
- return 31 - __builtin_clz(__A);
+ return 31 - __builtin_clz((unsigned int)__A);
}
/** Swaps the bytes in the input. Converting little endian to big endian or
@@ -73,12 +73,12 @@ __bsrd(int __A) {
*/
static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
__bswapd(int __A) {
- return __builtin_bswap32(__A);
+ return (int)__builtin_bswap32((unsigned int)__A);
}
static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
_bswap(int __A) {
- return __builtin_bswap32(__A);
+ return (int)__builtin_bswap32((unsigned int)__A);
}
#define _bit_scan_forward(A) __bsfd((A))
@@ -99,7 +99,7 @@ _bswap(int __A) {
*/
static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
__bsfq(long long __A) {
- return __builtin_ctzll(__A);
+ return (long long)__builtin_ctzll((unsigned long long)__A);
}
/** Find the first set bit starting from the msb. Result is undefined if
@@ -116,7 +116,7 @@ __bsfq(long long __A) {
*/
static __inline__ int __DEFAULT_FN_ATTRS_CONSTEXPR
__bsrq(long long __A) {
- return 63 - __builtin_clzll(__A);
+ return 63 - __builtin_clzll((unsigned long long)__A);
}
/** Swaps the bytes in the input. Converting little endian to big endian or
@@ -132,7 +132,7 @@ __bsrq(long long __A) {
*/
static __inline__ long long __DEFAULT_FN_ATTRS_CONSTEXPR
__bswapq(long long __A) {
- return __builtin_bswap64(__A);
+ return (long long)__builtin_bswap64((unsigned long long)__A);
}
#define _bswap64(A) __bswapq((A))
@@ -395,23 +395,23 @@ __rorw(unsigned short __X, int __C) {
static __inline__ unsigned int __DEFAULT_FN_ATTRS_CONSTEXPR
__rold(unsigned int __X, int __C) {
- return __builtin_rotateleft32(__X, __C);
+ return __builtin_rotateleft32(__X, (unsigned int)__C);
}
static __inline__ unsigned int __DEFAULT_FN_ATTRS_CONSTEXPR
__rord(unsigned int __X, int __C) {
- return __builtin_rotateright32(__X, __C);
+ return __builtin_rotateright32(__X, (unsigned int)__C);
}
#ifdef __x86_64__
static __inline__ unsigned long long __DEFAULT_FN_ATTRS_CONSTEXPR
__rolq(unsigned long long __X, int __C) {
- return __builtin_rotateleft64(__X, __C);
+ return __builtin_rotateleft64(__X, (unsigned long long)__C);
}
static __inline__ unsigned long long __DEFAULT_FN_ATTRS_CONSTEXPR
__rorq(unsigned long long __X, int __C) {
- return __builtin_rotateright64(__X, __C);
+ return __builtin_rotateright64(__X, (unsigned long long)__C);
}
#endif /* __x86_64__ */
diff --git a/contrib/llvm-project/clang/lib/Headers/immintrin.h b/contrib/llvm-project/clang/lib/Headers/immintrin.h
index e5174f8785e5..e4d7a799b1ca 100644
--- a/contrib/llvm-project/clang/lib/Headers/immintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/immintrin.h
@@ -276,20 +276,20 @@ _rdpid_u32(void) {
static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
_rdrand16_step(unsigned short *__p)
{
- return __builtin_ia32_rdrand16_step(__p);
+ return (int)__builtin_ia32_rdrand16_step(__p);
}
static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
_rdrand32_step(unsigned int *__p)
{
- return __builtin_ia32_rdrand32_step(__p);
+ return (int)__builtin_ia32_rdrand32_step(__p);
}
#ifdef __x86_64__
static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("rdrnd")))
_rdrand64_step(unsigned long long *__p)
{
- return __builtin_ia32_rdrand64_step(__p);
+ return (int)__builtin_ia32_rdrand64_step(__p);
}
#endif
#endif /* __RDRND__ */
@@ -360,50 +360,50 @@ _writegsbase_u64(unsigned long long __V)
static __inline__ short __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
_loadbe_i16(void const * __P) {
struct __loadu_i16 {
- short __v;
+ unsigned short __v;
} __attribute__((__packed__, __may_alias__));
- return __builtin_bswap16(((const struct __loadu_i16*)__P)->__v);
+ return (short)__builtin_bswap16(((const struct __loadu_i16*)__P)->__v);
}
static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
_storebe_i16(void * __P, short __D) {
struct __storeu_i16 {
- short __v;
+ unsigned short __v;
} __attribute__((__packed__, __may_alias__));
- ((struct __storeu_i16*)__P)->__v = __builtin_bswap16(__D);
+ ((struct __storeu_i16*)__P)->__v = __builtin_bswap16((unsigned short)__D);
}
static __inline__ int __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
_loadbe_i32(void const * __P) {
struct __loadu_i32 {
- int __v;
+ unsigned int __v;
} __attribute__((__packed__, __may_alias__));
- return __builtin_bswap32(((const struct __loadu_i32*)__P)->__v);
+ return (int)__builtin_bswap32(((const struct __loadu_i32*)__P)->__v);
}
static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
_storebe_i32(void * __P, int __D) {
struct __storeu_i32 {
- int __v;
+ unsigned int __v;
} __attribute__((__packed__, __may_alias__));
- ((struct __storeu_i32*)__P)->__v = __builtin_bswap32(__D);
+ ((struct __storeu_i32*)__P)->__v = __builtin_bswap32((unsigned int)__D);
}
#ifdef __x86_64__
static __inline__ long long __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
_loadbe_i64(void const * __P) {
struct __loadu_i64 {
- long long __v;
+ unsigned long long __v;
} __attribute__((__packed__, __may_alias__));
- return __builtin_bswap64(((const struct __loadu_i64*)__P)->__v);
+ return (long long)__builtin_bswap64(((const struct __loadu_i64*)__P)->__v);
}
static __inline__ void __attribute__((__always_inline__, __nodebug__, __target__("movbe")))
_storebe_i64(void * __P, long long __D) {
struct __storeu_i64 {
- long long __v;
+ unsigned long long __v;
} __attribute__((__packed__, __may_alias__));
- ((struct __storeu_i64*)__P)->__v = __builtin_bswap64(__D);
+ ((struct __storeu_i64*)__P)->__v = __builtin_bswap64((unsigned long long)__D);
}
#endif
#endif /* __MOVBE */
diff --git a/contrib/llvm-project/clang/lib/Headers/intrin.h b/contrib/llvm-project/clang/lib/Headers/intrin.h
index 02e66d02067c..de68b07491c6 100644
--- a/contrib/llvm-project/clang/lib/Headers/intrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/intrin.h
@@ -534,27 +534,6 @@ static __inline__ void __DEFAULT_FN_ATTRS __stosq(unsigned __int64 *__dst,
|* Misc
\*----------------------------------------------------------------------------*/
#if defined(__i386__) || defined(__x86_64__)
-#if defined(__i386__)
-#define __cpuid_count(__leaf, __count, __eax, __ebx, __ecx, __edx) \
- __asm("cpuid" \
- : "=a"(__eax), "=b"(__ebx), "=c"(__ecx), "=d"(__edx) \
- : "0"(__leaf), "2"(__count))
-#else
-/* x86-64 uses %rbx as the base register, so preserve it. */
-#define __cpuid_count(__leaf, __count, __eax, __ebx, __ecx, __edx) \
- __asm("xchg{q} {%%rbx, %q1|%q1, rbx}\n" \
- "cpuid\n" \
- "xchg{q} {%%rbx, %q1|%q1, rbx}" \
- : "=a"(__eax), "=r"(__ebx), "=c"(__ecx), "=d"(__edx) \
- : "0"(__leaf), "2"(__count))
-#endif
-static __inline__ void __DEFAULT_FN_ATTRS __cpuid(int __info[4], int __level) {
- __cpuid_count(__level, 0, __info[0], __info[1], __info[2], __info[3]);
-}
-static __inline__ void __DEFAULT_FN_ATTRS __cpuidex(int __info[4], int __level,
- int __ecx) {
- __cpuid_count(__level, __ecx, __info[0], __info[1], __info[2], __info[3]);
-}
static __inline__ void __DEFAULT_FN_ATTRS __halt(void) {
__asm__ volatile("hlt");
}
@@ -581,6 +560,18 @@ unsigned __int64 __cdecl _byteswap_uint64(unsigned __int64 val);
__int64 __mulh(__int64 __a, __int64 __b);
unsigned __int64 __umulh(unsigned __int64 __a, unsigned __int64 __b);
+
+void __break(int);
+
+void __writex18byte(unsigned long offset, unsigned char data);
+void __writex18word(unsigned long offset, unsigned short data);
+void __writex18dword(unsigned long offset, unsigned long data);
+void __writex18qword(unsigned long offset, unsigned __int64 data);
+
+unsigned char __readx18byte(unsigned long offset);
+unsigned short __readx18word(unsigned long offset);
+unsigned long __readx18dword(unsigned long offset);
+unsigned __int64 __readx18qword(unsigned long offset);
#endif
/*----------------------------------------------------------------------------*\
diff --git a/contrib/llvm-project/clang/lib/Headers/keylockerintrin.h b/contrib/llvm-project/clang/lib/Headers/keylockerintrin.h
index ad9428e6c8b5..1994ac42070a 100644
--- a/contrib/llvm-project/clang/lib/Headers/keylockerintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/keylockerintrin.h
@@ -46,7 +46,7 @@
///
/// This intrinsic corresponds to the <c> LOADIWKEY </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// IF CPL > 0 // LOADKWKEY only allowed at ring 0 (supervisor mode)
/// GP (0)
/// FI
@@ -91,7 +91,7 @@
/// AF := 0
/// PF := 0
/// CF := 0
-/// \endoperation
+/// \endcode
static __inline__ void __DEFAULT_FN_ATTRS
_mm_loadiwkey (unsigned int __ctl, __m128i __intkey,
__m128i __enkey_lo, __m128i __enkey_hi) {
@@ -106,7 +106,7 @@ _mm_loadiwkey (unsigned int __ctl, __m128i __intkey,
///
/// This intrinsic corresponds to the <c> ENCODEKEY128 </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// InputKey[127:0] := __key[127:0]
/// KeyMetadata[2:0] := __htype[2:0]
/// KeyMetadata[23:3] := 0 // Reserved for future usage
@@ -126,7 +126,7 @@ _mm_loadiwkey (unsigned int __ctl, __m128i __intkey,
/// AF := 0
/// PF := 0
/// CF := 0
-/// \endoperation
+/// \endcode
static __inline__ unsigned int __DEFAULT_FN_ATTRS
_mm_encodekey128_u32(unsigned int __htype, __m128i __key, void *__h) {
return __builtin_ia32_encodekey128_u32(__htype, (__v2di)__key, __h);
@@ -141,7 +141,7 @@ _mm_encodekey128_u32(unsigned int __htype, __m128i __key, void *__h) {
///
/// This intrinsic corresponds to the <c> ENCODEKEY256 </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// InputKey[127:0] := __key_lo[127:0]
/// InputKey[255:128] := __key_hi[255:128]
/// KeyMetadata[2:0] := __htype[2:0]
@@ -163,7 +163,7 @@ _mm_encodekey128_u32(unsigned int __htype, __m128i __key, void *__h) {
/// AF := 0
/// PF := 0
/// CF := 0
-/// \endoperation
+/// \endcode
static __inline__ unsigned int __DEFAULT_FN_ATTRS
_mm_encodekey256_u32(unsigned int __htype, __m128i __key_lo, __m128i __key_hi,
void *__h) {
@@ -179,7 +179,7 @@ _mm_encodekey256_u32(unsigned int __htype, __m128i __key_lo, __m128i __key_hi,
///
/// This intrinsic corresponds to the <c> AESENC128KL </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// Handle[383:0] := MEM[__h+383:__h] // Load is not guaranteed to be atomic.
/// IllegalHandle := ( HandleReservedBitSet (Handle[383:0]) ||
/// (Handle[127:0] AND (CPL > 0)) ||
@@ -202,7 +202,7 @@ _mm_encodekey256_u32(unsigned int __htype, __m128i __key_lo, __m128i __key_hi,
/// AF := 0
/// PF := 0
/// CF := 0
-/// \endoperation
+/// \endcode
static __inline__ unsigned char __DEFAULT_FN_ATTRS
_mm_aesenc128kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
return __builtin_ia32_aesenc128kl_u8((__v2di *)__odata, (__v2di)__idata, __h);
@@ -216,7 +216,7 @@ _mm_aesenc128kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
///
/// This intrinsic corresponds to the <c> AESENC256KL </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// Handle[511:0] := MEM[__h+511:__h] // Load is not guaranteed to be atomic.
/// IllegalHandle := ( HandleReservedBitSet (Handle[511:0]) ||
/// (Handle[127:0] AND (CPL > 0)) ||
@@ -241,7 +241,7 @@ _mm_aesenc128kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
/// AF := 0
/// PF := 0
/// CF := 0
-/// \endoperation
+/// \endcode
static __inline__ unsigned char __DEFAULT_FN_ATTRS
_mm_aesenc256kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
return __builtin_ia32_aesenc256kl_u8((__v2di *)__odata, (__v2di)__idata, __h);
@@ -255,7 +255,7 @@ _mm_aesenc256kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
///
/// This intrinsic corresponds to the <c> AESDEC128KL </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// Handle[383:0] := MEM[__h+383:__h] // Load is not guaranteed to be atomic.
/// IllegalHandle := (HandleReservedBitSet (Handle[383:0]) ||
/// (Handle[127:0] AND (CPL > 0)) ||
@@ -280,7 +280,7 @@ _mm_aesenc256kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
/// AF := 0
/// PF := 0
/// CF := 0
-/// \endoperation
+/// \endcode
static __inline__ unsigned char __DEFAULT_FN_ATTRS
_mm_aesdec128kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
return __builtin_ia32_aesdec128kl_u8((__v2di *)__odata, (__v2di)__idata, __h);
@@ -294,7 +294,7 @@ _mm_aesdec128kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
///
/// This intrinsic corresponds to the <c> AESDEC256KL </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// Handle[511:0] := MEM[__h+511:__h]
/// IllegalHandle := (HandleReservedBitSet (Handle[511:0]) ||
/// (Handle[127:0] AND (CPL > 0)) ||
@@ -319,7 +319,7 @@ _mm_aesdec128kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
/// AF := 0
/// PF := 0
/// CF := 0
-/// \endoperation
+/// \endcode
static __inline__ unsigned char __DEFAULT_FN_ATTRS
_mm_aesdec256kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
return __builtin_ia32_aesdec256kl_u8((__v2di *)__odata, (__v2di)__idata, __h);
@@ -346,7 +346,7 @@ _mm_aesdec256kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
///
/// This intrinsic corresponds to the <c> AESENCWIDE128KL </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// Handle := MEM[__h+383:__h]
/// IllegalHandle := ( HandleReservedBitSet (Handle[383:0]) ||
/// (Handle[127:0] AND (CPL > 0)) ||
@@ -377,7 +377,7 @@ _mm_aesdec256kl_u8(__m128i* __odata, __m128i __idata, const void *__h) {
/// AF := 0
/// PF := 0
/// CF := 0
-/// \endoperation
+/// \endcode
static __inline__ unsigned char __DEFAULT_FN_ATTRS
_mm_aesencwide128kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) {
return __builtin_ia32_aesencwide128kl_u8((__v2di *)__odata,
@@ -392,7 +392,7 @@ _mm_aesencwide128kl_u8(__m128i __odata[8], const __m128i __idata[8], const void*
///
/// This intrinsic corresponds to the <c> AESENCWIDE256KL </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// Handle[511:0] := MEM[__h+511:__h]
/// IllegalHandle := ( HandleReservedBitSet (Handle[511:0]) ||
/// (Handle[127:0] AND (CPL > 0)) ||
@@ -423,7 +423,7 @@ _mm_aesencwide128kl_u8(__m128i __odata[8], const __m128i __idata[8], const void*
/// AF := 0
/// PF := 0
/// CF := 0
-/// \endoperation
+/// \endcode
static __inline__ unsigned char __DEFAULT_FN_ATTRS
_mm_aesencwide256kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) {
return __builtin_ia32_aesencwide256kl_u8((__v2di *)__odata,
@@ -438,7 +438,7 @@ _mm_aesencwide256kl_u8(__m128i __odata[8], const __m128i __idata[8], const void*
///
/// This intrinsic corresponds to the <c> AESDECWIDE128KL </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// Handle[383:0] := MEM[__h+383:__h]
/// IllegalHandle := ( HandleReservedBitSet (Handle[383:0]) ||
/// (Handle[127:0] AND (CPL > 0)) ||
@@ -469,7 +469,7 @@ _mm_aesencwide256kl_u8(__m128i __odata[8], const __m128i __idata[8], const void*
/// AF := 0
/// PF := 0
/// CF := 0
-/// \endoperation
+/// \endcode
static __inline__ unsigned char __DEFAULT_FN_ATTRS
_mm_aesdecwide128kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) {
return __builtin_ia32_aesdecwide128kl_u8((__v2di *)__odata,
@@ -484,7 +484,7 @@ _mm_aesdecwide128kl_u8(__m128i __odata[8], const __m128i __idata[8], const void*
///
/// This intrinsic corresponds to the <c> AESDECWIDE256KL </c> instructions.
///
-/// \operation
+/// \code{.operation}
/// Handle[511:0] := MEM[__h+511:__h]
/// IllegalHandle = ( HandleReservedBitSet (Handle[511:0]) ||
/// (Handle[127:0] AND (CPL > 0)) ||
@@ -515,7 +515,7 @@ _mm_aesdecwide128kl_u8(__m128i __odata[8], const __m128i __idata[8], const void*
/// AF := 0
/// PF := 0
/// CF := 0
-/// \endoperation
+/// \endcode
static __inline__ unsigned char __DEFAULT_FN_ATTRS
_mm_aesdecwide256kl_u8(__m128i __odata[8], const __m128i __idata[8], const void* __h) {
return __builtin_ia32_aesdecwide256kl_u8((__v2di *)__odata,
diff --git a/contrib/llvm-project/clang/lib/Headers/mm_malloc.h b/contrib/llvm-project/clang/lib/Headers/mm_malloc.h
index 933dbaacade5..d32fe5941627 100644
--- a/contrib/llvm-project/clang/lib/Headers/mm_malloc.h
+++ b/contrib/llvm-project/clang/lib/Headers/mm_malloc.h
@@ -28,9 +28,9 @@ extern "C" int posix_memalign(void **__memptr, size_t __alignment, size_t __size
#if !(defined(_WIN32) && defined(_mm_malloc))
static __inline__ void *__attribute__((__always_inline__, __nodebug__,
- __malloc__))
-_mm_malloc(size_t __size, size_t __align)
-{
+ __malloc__, __alloc_size__(1),
+ __alloc_align__(2)))
+_mm_malloc(size_t __size, size_t __align) {
if (__align == 1) {
return malloc(__size);
}
diff --git a/contrib/llvm-project/clang/lib/Headers/opencl-c-base.h b/contrib/llvm-project/clang/lib/Headers/opencl-c-base.h
index 5191c41bcd05..c433b4f7eb1a 100644
--- a/contrib/llvm-project/clang/lib/Headers/opencl-c-base.h
+++ b/contrib/llvm-project/clang/lib/Headers/opencl-c-base.h
@@ -21,6 +21,7 @@
#define cl_khr_subgroup_shuffle 1
#define cl_khr_subgroup_shuffle_relative 1
#define cl_khr_subgroup_clustered_reduce 1
+#define cl_khr_subgroup_rotate 1
#define cl_khr_extended_bit_ops 1
#define cl_khr_integer_dot_product 1
#define __opencl_c_integer_dot_product_input_4x8bit 1
@@ -67,6 +68,7 @@
#if (__OPENCL_CPP_VERSION__ == 202100 || __OPENCL_C_VERSION__ == 300)
// For the SPIR and SPIR-V target all features are supported.
#if defined(__SPIR__) || defined(__SPIRV__)
+#define __opencl_c_work_group_collective_functions 1
#define __opencl_c_atomic_order_seq_cst 1
#define __opencl_c_atomic_scope_device 1
#define __opencl_c_atomic_scope_all_devices 1
@@ -80,6 +82,11 @@
#define __opencl_c_named_address_space_builtins 1
#endif // !defined(__opencl_c_generic_address_space)
+#if defined(cl_intel_subgroups) || defined(cl_khr_subgroups) || defined(__opencl_c_subgroups)
+// Internal feature macro to provide subgroup builtins.
+#define __opencl_subgroup_builtins 1
+#endif
+
// built-in scalar data types:
/**
@@ -197,6 +204,9 @@ typedef double double8 __attribute__((ext_vector_type(8)));
typedef double double16 __attribute__((ext_vector_type(16)));
#endif
+// An internal alias for half, for use by OpenCLBuiltins.td.
+#define __half half
+
#if defined(__OPENCL_CPP_VERSION__)
#define NULL nullptr
#elif defined(__OPENCL_C_VERSION__)
diff --git a/contrib/llvm-project/clang/lib/Headers/opencl-c.h b/contrib/llvm-project/clang/lib/Headers/opencl-c.h
index c7bb77716ac4..ed647d9e9c06 100644
--- a/contrib/llvm-project/clang/lib/Headers/opencl-c.h
+++ b/contrib/llvm-project/clang/lib/Headers/opencl-c.h
@@ -6362,7 +6362,7 @@ uint __ovld __cnfn get_work_dim(void);
* dimindx, get_global_size() returns 1.
* For clEnqueueTask, this always returns 1.
*/
-size_t __ovld __cnfn get_global_size(uint dimindx);
+size_t __ovld __cnfn get_global_size(uint);
/**
* Returns the unique global work-item ID value for
@@ -6373,7 +6373,7 @@ size_t __ovld __cnfn get_global_size(uint dimindx);
* other values of dimindx, get_global_id() returns 0.
* For clEnqueueTask, this returns 0.
*/
-size_t __ovld __cnfn get_global_id(uint dimindx);
+size_t __ovld __cnfn get_global_id(uint);
/**
* Returns the number of local work-items specified in
@@ -6387,7 +6387,7 @@ size_t __ovld __cnfn get_global_id(uint dimindx);
* get_local_size() returns 1.
* For clEnqueueTask, this always returns 1.
*/
-size_t __ovld __cnfn get_local_size(uint dimindx);
+size_t __ovld __cnfn get_local_size(uint);
/**
* Returns the unique local work-item ID i.e. a work-item
@@ -6397,7 +6397,7 @@ size_t __ovld __cnfn get_local_size(uint dimindx);
* get_local_id() returns 0.
* For clEnqueueTask, this returns 0.
*/
-size_t __ovld __cnfn get_local_id(uint dimindx);
+size_t __ovld __cnfn get_local_id(uint);
/**
* Returns the number of work-groups that will execute a
@@ -6406,7 +6406,7 @@ size_t __ovld __cnfn get_local_id(uint dimindx);
* For other values of dimindx, get_num_groups() returns 1.
* For clEnqueueTask, this always returns 1.
*/
-size_t __ovld __cnfn get_num_groups(uint dimindx);
+size_t __ovld __cnfn get_num_groups(uint);
/**
* get_group_id returns the work-group ID which is a
@@ -6415,7 +6415,7 @@ size_t __ovld __cnfn get_num_groups(uint dimindx);
* For other values, get_group_id() returns 0.
* For clEnqueueTask, this returns 0.
*/
-size_t __ovld __cnfn get_group_id(uint dimindx);
+size_t __ovld __cnfn get_group_id(uint);
/**
* get_global_offset returns the offset values specified in
@@ -6425,10 +6425,10 @@ size_t __ovld __cnfn get_group_id(uint dimindx);
* For other values, get_global_offset() returns 0.
* For clEnqueueTask, this returns 0.
*/
-size_t __ovld __cnfn get_global_offset(uint dimindx);
+size_t __ovld __cnfn get_global_offset(uint);
#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-size_t __ovld get_enqueued_local_size(uint dimindx);
+size_t __ovld get_enqueued_local_size(uint);
size_t __ovld get_global_linear_id(void);
size_t __ovld get_local_linear_id(void);
#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
@@ -6490,27 +6490,27 @@ half16 __ovld __cnfn acosh(half16);
/**
* Compute acos (x) / PI.
*/
-float __ovld __cnfn acospi(float x);
-float2 __ovld __cnfn acospi(float2 x);
-float3 __ovld __cnfn acospi(float3 x);
-float4 __ovld __cnfn acospi(float4 x);
-float8 __ovld __cnfn acospi(float8 x);
-float16 __ovld __cnfn acospi(float16 x);
+float __ovld __cnfn acospi(float);
+float2 __ovld __cnfn acospi(float2);
+float3 __ovld __cnfn acospi(float3);
+float4 __ovld __cnfn acospi(float4);
+float8 __ovld __cnfn acospi(float8);
+float16 __ovld __cnfn acospi(float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn acospi(double x);
-double2 __ovld __cnfn acospi(double2 x);
-double3 __ovld __cnfn acospi(double3 x);
-double4 __ovld __cnfn acospi(double4 x);
-double8 __ovld __cnfn acospi(double8 x);
-double16 __ovld __cnfn acospi(double16 x);
+double __ovld __cnfn acospi(double);
+double2 __ovld __cnfn acospi(double2);
+double3 __ovld __cnfn acospi(double3);
+double4 __ovld __cnfn acospi(double4);
+double8 __ovld __cnfn acospi(double8);
+double16 __ovld __cnfn acospi(double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn acospi(half x);
-half2 __ovld __cnfn acospi(half2 x);
-half3 __ovld __cnfn acospi(half3 x);
-half4 __ovld __cnfn acospi(half4 x);
-half8 __ovld __cnfn acospi(half8 x);
-half16 __ovld __cnfn acospi(half16 x);
+half __ovld __cnfn acospi(half);
+half2 __ovld __cnfn acospi(half2);
+half3 __ovld __cnfn acospi(half3);
+half4 __ovld __cnfn acospi(half4);
+half8 __ovld __cnfn acospi(half8);
+half16 __ovld __cnfn acospi(half16);
#endif //cl_khr_fp16
/**
@@ -6568,79 +6568,79 @@ half16 __ovld __cnfn asinh(half16);
/**
* Compute asin (x) / PI.
*/
-float __ovld __cnfn asinpi(float x);
-float2 __ovld __cnfn asinpi(float2 x);
-float3 __ovld __cnfn asinpi(float3 x);
-float4 __ovld __cnfn asinpi(float4 x);
-float8 __ovld __cnfn asinpi(float8 x);
-float16 __ovld __cnfn asinpi(float16 x);
+float __ovld __cnfn asinpi(float);
+float2 __ovld __cnfn asinpi(float2);
+float3 __ovld __cnfn asinpi(float3);
+float4 __ovld __cnfn asinpi(float4);
+float8 __ovld __cnfn asinpi(float8);
+float16 __ovld __cnfn asinpi(float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn asinpi(double x);
-double2 __ovld __cnfn asinpi(double2 x);
-double3 __ovld __cnfn asinpi(double3 x);
-double4 __ovld __cnfn asinpi(double4 x);
-double8 __ovld __cnfn asinpi(double8 x);
-double16 __ovld __cnfn asinpi(double16 x);
+double __ovld __cnfn asinpi(double);
+double2 __ovld __cnfn asinpi(double2);
+double3 __ovld __cnfn asinpi(double3);
+double4 __ovld __cnfn asinpi(double4);
+double8 __ovld __cnfn asinpi(double8);
+double16 __ovld __cnfn asinpi(double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn asinpi(half x);
-half2 __ovld __cnfn asinpi(half2 x);
-half3 __ovld __cnfn asinpi(half3 x);
-half4 __ovld __cnfn asinpi(half4 x);
-half8 __ovld __cnfn asinpi(half8 x);
-half16 __ovld __cnfn asinpi(half16 x);
+half __ovld __cnfn asinpi(half);
+half2 __ovld __cnfn asinpi(half2);
+half3 __ovld __cnfn asinpi(half3);
+half4 __ovld __cnfn asinpi(half4);
+half8 __ovld __cnfn asinpi(half8);
+half16 __ovld __cnfn asinpi(half16);
#endif //cl_khr_fp16
/**
* Arc tangent function.
*/
-float __ovld __cnfn atan(float y_over_x);
-float2 __ovld __cnfn atan(float2 y_over_x);
-float3 __ovld __cnfn atan(float3 y_over_x);
-float4 __ovld __cnfn atan(float4 y_over_x);
-float8 __ovld __cnfn atan(float8 y_over_x);
-float16 __ovld __cnfn atan(float16 y_over_x);
+float __ovld __cnfn atan(float);
+float2 __ovld __cnfn atan(float2);
+float3 __ovld __cnfn atan(float3);
+float4 __ovld __cnfn atan(float4);
+float8 __ovld __cnfn atan(float8);
+float16 __ovld __cnfn atan(float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn atan(double y_over_x);
-double2 __ovld __cnfn atan(double2 y_over_x);
-double3 __ovld __cnfn atan(double3 y_over_x);
-double4 __ovld __cnfn atan(double4 y_over_x);
-double8 __ovld __cnfn atan(double8 y_over_x);
-double16 __ovld __cnfn atan(double16 y_over_x);
+double __ovld __cnfn atan(double);
+double2 __ovld __cnfn atan(double2);
+double3 __ovld __cnfn atan(double3);
+double4 __ovld __cnfn atan(double4);
+double8 __ovld __cnfn atan(double8);
+double16 __ovld __cnfn atan(double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn atan(half y_over_x);
-half2 __ovld __cnfn atan(half2 y_over_x);
-half3 __ovld __cnfn atan(half3 y_over_x);
-half4 __ovld __cnfn atan(half4 y_over_x);
-half8 __ovld __cnfn atan(half8 y_over_x);
-half16 __ovld __cnfn atan(half16 y_over_x);
+half __ovld __cnfn atan(half);
+half2 __ovld __cnfn atan(half2);
+half3 __ovld __cnfn atan(half3);
+half4 __ovld __cnfn atan(half4);
+half8 __ovld __cnfn atan(half8);
+half16 __ovld __cnfn atan(half16);
#endif //cl_khr_fp16
/**
* Arc tangent of y / x.
*/
-float __ovld __cnfn atan2(float y, float x);
-float2 __ovld __cnfn atan2(float2 y, float2 x);
-float3 __ovld __cnfn atan2(float3 y, float3 x);
-float4 __ovld __cnfn atan2(float4 y, float4 x);
-float8 __ovld __cnfn atan2(float8 y, float8 x);
-float16 __ovld __cnfn atan2(float16 y, float16 x);
+float __ovld __cnfn atan2(float, float);
+float2 __ovld __cnfn atan2(float2, float2);
+float3 __ovld __cnfn atan2(float3, float3);
+float4 __ovld __cnfn atan2(float4, float4);
+float8 __ovld __cnfn atan2(float8, float8);
+float16 __ovld __cnfn atan2(float16, float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn atan2(double y, double x);
-double2 __ovld __cnfn atan2(double2 y, double2 x);
-double3 __ovld __cnfn atan2(double3 y, double3 x);
-double4 __ovld __cnfn atan2(double4 y, double4 x);
-double8 __ovld __cnfn atan2(double8 y, double8 x);
-double16 __ovld __cnfn atan2(double16 y, double16 x);
+double __ovld __cnfn atan2(double, double);
+double2 __ovld __cnfn atan2(double2, double2);
+double3 __ovld __cnfn atan2(double3, double3);
+double4 __ovld __cnfn atan2(double4, double4);
+double8 __ovld __cnfn atan2(double8, double8);
+double16 __ovld __cnfn atan2(double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn atan2(half y, half x);
-half2 __ovld __cnfn atan2(half2 y, half2 x);
-half3 __ovld __cnfn atan2(half3 y, half3 x);
-half4 __ovld __cnfn atan2(half4 y, half4 x);
-half8 __ovld __cnfn atan2(half8 y, half8 x);
-half16 __ovld __cnfn atan2(half16 y, half16 x);
+half __ovld __cnfn atan2(half, half);
+half2 __ovld __cnfn atan2(half2, half2);
+half3 __ovld __cnfn atan2(half3, half3);
+half4 __ovld __cnfn atan2(half4, half4);
+half8 __ovld __cnfn atan2(half8, half8);
+half16 __ovld __cnfn atan2(half16, half16);
#endif //cl_khr_fp16
/**
@@ -6672,53 +6672,53 @@ half16 __ovld __cnfn atanh(half16);
/**
* Compute atan (x) / PI.
*/
-float __ovld __cnfn atanpi(float x);
-float2 __ovld __cnfn atanpi(float2 x);
-float3 __ovld __cnfn atanpi(float3 x);
-float4 __ovld __cnfn atanpi(float4 x);
-float8 __ovld __cnfn atanpi(float8 x);
-float16 __ovld __cnfn atanpi(float16 x);
+float __ovld __cnfn atanpi(float);
+float2 __ovld __cnfn atanpi(float2);
+float3 __ovld __cnfn atanpi(float3);
+float4 __ovld __cnfn atanpi(float4);
+float8 __ovld __cnfn atanpi(float8);
+float16 __ovld __cnfn atanpi(float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn atanpi(double x);
-double2 __ovld __cnfn atanpi(double2 x);
-double3 __ovld __cnfn atanpi(double3 x);
-double4 __ovld __cnfn atanpi(double4 x);
-double8 __ovld __cnfn atanpi(double8 x);
-double16 __ovld __cnfn atanpi(double16 x);
+double __ovld __cnfn atanpi(double);
+double2 __ovld __cnfn atanpi(double2);
+double3 __ovld __cnfn atanpi(double3);
+double4 __ovld __cnfn atanpi(double4);
+double8 __ovld __cnfn atanpi(double8);
+double16 __ovld __cnfn atanpi(double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn atanpi(half x);
-half2 __ovld __cnfn atanpi(half2 x);
-half3 __ovld __cnfn atanpi(half3 x);
-half4 __ovld __cnfn atanpi(half4 x);
-half8 __ovld __cnfn atanpi(half8 x);
-half16 __ovld __cnfn atanpi(half16 x);
+half __ovld __cnfn atanpi(half);
+half2 __ovld __cnfn atanpi(half2);
+half3 __ovld __cnfn atanpi(half3);
+half4 __ovld __cnfn atanpi(half4);
+half8 __ovld __cnfn atanpi(half8);
+half16 __ovld __cnfn atanpi(half16);
#endif //cl_khr_fp16
/**
* Compute atan2 (y, x) / PI.
*/
-float __ovld __cnfn atan2pi(float y, float x);
-float2 __ovld __cnfn atan2pi(float2 y, float2 x);
-float3 __ovld __cnfn atan2pi(float3 y, float3 x);
-float4 __ovld __cnfn atan2pi(float4 y, float4 x);
-float8 __ovld __cnfn atan2pi(float8 y, float8 x);
-float16 __ovld __cnfn atan2pi(float16 y, float16 x);
+float __ovld __cnfn atan2pi(float, float);
+float2 __ovld __cnfn atan2pi(float2, float2);
+float3 __ovld __cnfn atan2pi(float3, float3);
+float4 __ovld __cnfn atan2pi(float4, float4);
+float8 __ovld __cnfn atan2pi(float8, float8);
+float16 __ovld __cnfn atan2pi(float16, float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn atan2pi(double y, double x);
-double2 __ovld __cnfn atan2pi(double2 y, double2 x);
-double3 __ovld __cnfn atan2pi(double3 y, double3 x);
-double4 __ovld __cnfn atan2pi(double4 y, double4 x);
-double8 __ovld __cnfn atan2pi(double8 y, double8 x);
-double16 __ovld __cnfn atan2pi(double16 y, double16 x);
+double __ovld __cnfn atan2pi(double, double);
+double2 __ovld __cnfn atan2pi(double2, double2);
+double3 __ovld __cnfn atan2pi(double3, double3);
+double4 __ovld __cnfn atan2pi(double4, double4);
+double8 __ovld __cnfn atan2pi(double8, double8);
+double16 __ovld __cnfn atan2pi(double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn atan2pi(half y, half x);
-half2 __ovld __cnfn atan2pi(half2 y, half2 x);
-half3 __ovld __cnfn atan2pi(half3 y, half3 x);
-half4 __ovld __cnfn atan2pi(half4 y, half4 x);
-half8 __ovld __cnfn atan2pi(half8 y, half8 x);
-half16 __ovld __cnfn atan2pi(half16 y, half16 x);
+half __ovld __cnfn atan2pi(half, half);
+half2 __ovld __cnfn atan2pi(half2, half2);
+half3 __ovld __cnfn atan2pi(half3, half3);
+half4 __ovld __cnfn atan2pi(half4, half4);
+half8 __ovld __cnfn atan2pi(half8, half8);
+half16 __ovld __cnfn atan2pi(half16, half16);
#endif //cl_khr_fp16
/**
@@ -6777,27 +6777,27 @@ half16 __ovld __cnfn ceil(half16);
/**
* Returns x with its sign changed to match the sign of y.
*/
-float __ovld __cnfn copysign(float x, float y);
-float2 __ovld __cnfn copysign(float2 x, float2 y);
-float3 __ovld __cnfn copysign(float3 x, float3 y);
-float4 __ovld __cnfn copysign(float4 x, float4 y);
-float8 __ovld __cnfn copysign(float8 x, float8 y);
-float16 __ovld __cnfn copysign(float16 x, float16 y);
+float __ovld __cnfn copysign(float, float);
+float2 __ovld __cnfn copysign(float2, float2);
+float3 __ovld __cnfn copysign(float3, float3);
+float4 __ovld __cnfn copysign(float4, float4);
+float8 __ovld __cnfn copysign(float8, float8);
+float16 __ovld __cnfn copysign(float16, float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn copysign(double x, double y);
-double2 __ovld __cnfn copysign(double2 x, double2 y);
-double3 __ovld __cnfn copysign(double3 x, double3 y);
-double4 __ovld __cnfn copysign(double4 x, double4 y);
-double8 __ovld __cnfn copysign(double8 x, double8 y);
-double16 __ovld __cnfn copysign(double16 x, double16 y);
+double __ovld __cnfn copysign(double, double);
+double2 __ovld __cnfn copysign(double2, double2);
+double3 __ovld __cnfn copysign(double3, double3);
+double4 __ovld __cnfn copysign(double4, double4);
+double8 __ovld __cnfn copysign(double8, double8);
+double16 __ovld __cnfn copysign(double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn copysign(half x, half y);
-half2 __ovld __cnfn copysign(half2 x, half2 y);
-half3 __ovld __cnfn copysign(half3 x, half3 y);
-half4 __ovld __cnfn copysign(half4 x, half4 y);
-half8 __ovld __cnfn copysign(half8 x, half8 y);
-half16 __ovld __cnfn copysign(half16 x, half16 y);
+half __ovld __cnfn copysign(half, half);
+half2 __ovld __cnfn copysign(half2, half2);
+half3 __ovld __cnfn copysign(half3, half3);
+half4 __ovld __cnfn copysign(half4, half4);
+half8 __ovld __cnfn copysign(half8, half8);
+half16 __ovld __cnfn copysign(half16, half16);
#endif //cl_khr_fp16
/**
@@ -6855,27 +6855,27 @@ half16 __ovld __cnfn cosh(half16);
/**
* Compute cos (PI * x).
*/
-float __ovld __cnfn cospi(float x);
-float2 __ovld __cnfn cospi(float2 x);
-float3 __ovld __cnfn cospi(float3 x);
-float4 __ovld __cnfn cospi(float4 x);
-float8 __ovld __cnfn cospi(float8 x);
-float16 __ovld __cnfn cospi(float16 x);
+float __ovld __cnfn cospi(float);
+float2 __ovld __cnfn cospi(float2);
+float3 __ovld __cnfn cospi(float3);
+float4 __ovld __cnfn cospi(float4);
+float8 __ovld __cnfn cospi(float8);
+float16 __ovld __cnfn cospi(float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn cospi(double x);
-double2 __ovld __cnfn cospi(double2 x);
-double3 __ovld __cnfn cospi(double3 x);
-double4 __ovld __cnfn cospi(double4 x);
-double8 __ovld __cnfn cospi(double8 x);
-double16 __ovld __cnfn cospi(double16 x);
+double __ovld __cnfn cospi(double);
+double2 __ovld __cnfn cospi(double2);
+double3 __ovld __cnfn cospi(double3);
+double4 __ovld __cnfn cospi(double4);
+double8 __ovld __cnfn cospi(double8);
+double16 __ovld __cnfn cospi(double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn cospi(half x);
-half2 __ovld __cnfn cospi(half2 x);
-half3 __ovld __cnfn cospi(half3 x);
-half4 __ovld __cnfn cospi(half4 x);
-half8 __ovld __cnfn cospi(half8 x);
-half16 __ovld __cnfn cospi(half16 x);
+half __ovld __cnfn cospi(half);
+half2 __ovld __cnfn cospi(half2);
+half3 __ovld __cnfn cospi(half3);
+half4 __ovld __cnfn cospi(half4);
+half8 __ovld __cnfn cospi(half8);
+half16 __ovld __cnfn cospi(half16);
#endif //cl_khr_fp16
/**
@@ -6934,27 +6934,27 @@ half16 __ovld __cnfn erf(half16);
/**
* Compute the base e exponential function of x.
*/
-float __ovld __cnfn exp(float x);
-float2 __ovld __cnfn exp(float2 x);
-float3 __ovld __cnfn exp(float3 x);
-float4 __ovld __cnfn exp(float4 x);
-float8 __ovld __cnfn exp(float8 x);
-float16 __ovld __cnfn exp(float16 x);
+float __ovld __cnfn exp(float);
+float2 __ovld __cnfn exp(float2);
+float3 __ovld __cnfn exp(float3);
+float4 __ovld __cnfn exp(float4);
+float8 __ovld __cnfn exp(float8);
+float16 __ovld __cnfn exp(float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn exp(double x);
-double2 __ovld __cnfn exp(double2 x);
-double3 __ovld __cnfn exp(double3 x);
-double4 __ovld __cnfn exp(double4 x);
-double8 __ovld __cnfn exp(double8 x);
-double16 __ovld __cnfn exp(double16 x);
+double __ovld __cnfn exp(double);
+double2 __ovld __cnfn exp(double2);
+double3 __ovld __cnfn exp(double3);
+double4 __ovld __cnfn exp(double4);
+double8 __ovld __cnfn exp(double8);
+double16 __ovld __cnfn exp(double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn exp(half x);
-half2 __ovld __cnfn exp(half2 x);
-half3 __ovld __cnfn exp(half3 x);
-half4 __ovld __cnfn exp(half4 x);
-half8 __ovld __cnfn exp(half8 x);
-half16 __ovld __cnfn exp(half16 x);
+half __ovld __cnfn exp(half);
+half2 __ovld __cnfn exp(half2);
+half3 __ovld __cnfn exp(half3);
+half4 __ovld __cnfn exp(half4);
+half8 __ovld __cnfn exp(half8);
+half16 __ovld __cnfn exp(half16);
#endif //cl_khr_fp16
/**
@@ -7012,27 +7012,27 @@ half16 __ovld __cnfn exp10(half16);
/**
* Compute e^x- 1.0.
*/
-float __ovld __cnfn expm1(float x);
-float2 __ovld __cnfn expm1(float2 x);
-float3 __ovld __cnfn expm1(float3 x);
-float4 __ovld __cnfn expm1(float4 x);
-float8 __ovld __cnfn expm1(float8 x);
-float16 __ovld __cnfn expm1(float16 x);
+float __ovld __cnfn expm1(float);
+float2 __ovld __cnfn expm1(float2);
+float3 __ovld __cnfn expm1(float3);
+float4 __ovld __cnfn expm1(float4);
+float8 __ovld __cnfn expm1(float8);
+float16 __ovld __cnfn expm1(float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn expm1(double x);
-double2 __ovld __cnfn expm1(double2 x);
-double3 __ovld __cnfn expm1(double3 x);
-double4 __ovld __cnfn expm1(double4 x);
-double8 __ovld __cnfn expm1(double8 x);
-double16 __ovld __cnfn expm1(double16 x);
+double __ovld __cnfn expm1(double);
+double2 __ovld __cnfn expm1(double2);
+double3 __ovld __cnfn expm1(double3);
+double4 __ovld __cnfn expm1(double4);
+double8 __ovld __cnfn expm1(double8);
+double16 __ovld __cnfn expm1(double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn expm1(half x);
-half2 __ovld __cnfn expm1(half2 x);
-half3 __ovld __cnfn expm1(half3 x);
-half4 __ovld __cnfn expm1(half4 x);
-half8 __ovld __cnfn expm1(half8 x);
-half16 __ovld __cnfn expm1(half16 x);
+half __ovld __cnfn expm1(half);
+half2 __ovld __cnfn expm1(half2);
+half3 __ovld __cnfn expm1(half3);
+half4 __ovld __cnfn expm1(half4);
+half8 __ovld __cnfn expm1(half8);
+half16 __ovld __cnfn expm1(half16);
#endif //cl_khr_fp16
/**
@@ -7064,27 +7064,27 @@ half16 __ovld __cnfn fabs(half16);
/**
* x - y if x > y, +0 if x is less than or equal to y.
*/
-float __ovld __cnfn fdim(float x, float y);
-float2 __ovld __cnfn fdim(float2 x, float2 y);
-float3 __ovld __cnfn fdim(float3 x, float3 y);
-float4 __ovld __cnfn fdim(float4 x, float4 y);
-float8 __ovld __cnfn fdim(float8 x, float8 y);
-float16 __ovld __cnfn fdim(float16 x, float16 y);
+float __ovld __cnfn fdim(float, float);
+float2 __ovld __cnfn fdim(float2, float2);
+float3 __ovld __cnfn fdim(float3, float3);
+float4 __ovld __cnfn fdim(float4, float4);
+float8 __ovld __cnfn fdim(float8, float8);
+float16 __ovld __cnfn fdim(float16, float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn fdim(double x, double y);
-double2 __ovld __cnfn fdim(double2 x, double2 y);
-double3 __ovld __cnfn fdim(double3 x, double3 y);
-double4 __ovld __cnfn fdim(double4 x, double4 y);
-double8 __ovld __cnfn fdim(double8 x, double8 y);
-double16 __ovld __cnfn fdim(double16 x, double16 y);
+double __ovld __cnfn fdim(double, double);
+double2 __ovld __cnfn fdim(double2, double2);
+double3 __ovld __cnfn fdim(double3, double3);
+double4 __ovld __cnfn fdim(double4, double4);
+double8 __ovld __cnfn fdim(double8, double8);
+double16 __ovld __cnfn fdim(double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn fdim(half x, half y);
-half2 __ovld __cnfn fdim(half2 x, half2 y);
-half3 __ovld __cnfn fdim(half3 x, half3 y);
-half4 __ovld __cnfn fdim(half4 x, half4 y);
-half8 __ovld __cnfn fdim(half8 x, half8 y);
-half16 __ovld __cnfn fdim(half16 x, half16 y);
+half __ovld __cnfn fdim(half, half);
+half2 __ovld __cnfn fdim(half2, half2);
+half3 __ovld __cnfn fdim(half3, half3);
+half4 __ovld __cnfn fdim(half4, half4);
+half8 __ovld __cnfn fdim(half8, half8);
+half16 __ovld __cnfn fdim(half16, half16);
#endif //cl_khr_fp16
/**
@@ -7121,27 +7121,27 @@ half16 __ovld __cnfn floor(half16);
* intermediate products shall not occur. Edge case
* behavior is per the IEEE 754-2008 standard.
*/
-float __ovld __cnfn fma(float a, float b, float c);
-float2 __ovld __cnfn fma(float2 a, float2 b, float2 c);
-float3 __ovld __cnfn fma(float3 a, float3 b, float3 c);
-float4 __ovld __cnfn fma(float4 a, float4 b, float4 c);
-float8 __ovld __cnfn fma(float8 a, float8 b, float8 c);
-float16 __ovld __cnfn fma(float16 a, float16 b, float16 c);
+float __ovld __cnfn fma(float, float, float);
+float2 __ovld __cnfn fma(float2, float2, float2);
+float3 __ovld __cnfn fma(float3, float3, float3);
+float4 __ovld __cnfn fma(float4, float4, float4);
+float8 __ovld __cnfn fma(float8, float8, float8);
+float16 __ovld __cnfn fma(float16, float16, float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn fma(double a, double b, double c);
-double2 __ovld __cnfn fma(double2 a, double2 b, double2 c);
-double3 __ovld __cnfn fma(double3 a, double3 b, double3 c);
-double4 __ovld __cnfn fma(double4 a, double4 b, double4 c);
-double8 __ovld __cnfn fma(double8 a, double8 b, double8 c);
-double16 __ovld __cnfn fma(double16 a, double16 b, double16 c);
+double __ovld __cnfn fma(double, double, double);
+double2 __ovld __cnfn fma(double2, double2, double2);
+double3 __ovld __cnfn fma(double3, double3, double3);
+double4 __ovld __cnfn fma(double4, double4, double4);
+double8 __ovld __cnfn fma(double8, double8, double8);
+double16 __ovld __cnfn fma(double16, double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn fma(half a, half b, half c);
-half2 __ovld __cnfn fma(half2 a, half2 b, half2 c);
-half3 __ovld __cnfn fma(half3 a, half3 b, half3 c);
-half4 __ovld __cnfn fma(half4 a, half4 b, half4 c);
-half8 __ovld __cnfn fma(half8 a, half8 b, half8 c);
-half16 __ovld __cnfn fma(half16 a, half16 b, half16 c);
+half __ovld __cnfn fma(half, half, half);
+half2 __ovld __cnfn fma(half2, half2, half2);
+half3 __ovld __cnfn fma(half3, half3, half3);
+half4 __ovld __cnfn fma(half4, half4, half4);
+half8 __ovld __cnfn fma(half8, half8, half8);
+half16 __ovld __cnfn fma(half16, half16, half16);
#endif //cl_khr_fp16
/**
@@ -7150,42 +7150,42 @@ half16 __ovld __cnfn fma(half16 a, half16 b, half16 c);
* argument. If both arguments are NaNs, fmax()
* returns a NaN.
*/
-float __ovld __cnfn fmax(float x, float y);
-float2 __ovld __cnfn fmax(float2 x, float2 y);
-float3 __ovld __cnfn fmax(float3 x, float3 y);
-float4 __ovld __cnfn fmax(float4 x, float4 y);
-float8 __ovld __cnfn fmax(float8 x, float8 y);
-float16 __ovld __cnfn fmax(float16 x, float16 y);
-float2 __ovld __cnfn fmax(float2 x, float y);
-float3 __ovld __cnfn fmax(float3 x, float y);
-float4 __ovld __cnfn fmax(float4 x, float y);
-float8 __ovld __cnfn fmax(float8 x, float y);
-float16 __ovld __cnfn fmax(float16 x, float y);
+float __ovld __cnfn fmax(float, float);
+float2 __ovld __cnfn fmax(float2, float2);
+float3 __ovld __cnfn fmax(float3, float3);
+float4 __ovld __cnfn fmax(float4, float4);
+float8 __ovld __cnfn fmax(float8, float8);
+float16 __ovld __cnfn fmax(float16, float16);
+float2 __ovld __cnfn fmax(float2, float);
+float3 __ovld __cnfn fmax(float3, float);
+float4 __ovld __cnfn fmax(float4, float);
+float8 __ovld __cnfn fmax(float8, float);
+float16 __ovld __cnfn fmax(float16, float);
#ifdef cl_khr_fp64
-double __ovld __cnfn fmax(double x, double y);
-double2 __ovld __cnfn fmax(double2 x, double2 y);
-double3 __ovld __cnfn fmax(double3 x, double3 y);
-double4 __ovld __cnfn fmax(double4 x, double4 y);
-double8 __ovld __cnfn fmax(double8 x, double8 y);
-double16 __ovld __cnfn fmax(double16 x, double16 y);
-double2 __ovld __cnfn fmax(double2 x, double y);
-double3 __ovld __cnfn fmax(double3 x, double y);
-double4 __ovld __cnfn fmax(double4 x, double y);
-double8 __ovld __cnfn fmax(double8 x, double y);
-double16 __ovld __cnfn fmax(double16 x, double y);
+double __ovld __cnfn fmax(double, double);
+double2 __ovld __cnfn fmax(double2, double2);
+double3 __ovld __cnfn fmax(double3, double3);
+double4 __ovld __cnfn fmax(double4, double4);
+double8 __ovld __cnfn fmax(double8, double8);
+double16 __ovld __cnfn fmax(double16, double16);
+double2 __ovld __cnfn fmax(double2, double);
+double3 __ovld __cnfn fmax(double3, double);
+double4 __ovld __cnfn fmax(double4, double);
+double8 __ovld __cnfn fmax(double8, double);
+double16 __ovld __cnfn fmax(double16, double);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn fmax(half x, half y);
-half2 __ovld __cnfn fmax(half2 x, half2 y);
-half3 __ovld __cnfn fmax(half3 x, half3 y);
-half4 __ovld __cnfn fmax(half4 x, half4 y);
-half8 __ovld __cnfn fmax(half8 x, half8 y);
-half16 __ovld __cnfn fmax(half16 x, half16 y);
-half2 __ovld __cnfn fmax(half2 x, half y);
-half3 __ovld __cnfn fmax(half3 x, half y);
-half4 __ovld __cnfn fmax(half4 x, half y);
-half8 __ovld __cnfn fmax(half8 x, half y);
-half16 __ovld __cnfn fmax(half16 x, half y);
+half __ovld __cnfn fmax(half, half);
+half2 __ovld __cnfn fmax(half2, half2);
+half3 __ovld __cnfn fmax(half3, half3);
+half4 __ovld __cnfn fmax(half4, half4);
+half8 __ovld __cnfn fmax(half8, half8);
+half16 __ovld __cnfn fmax(half16, half16);
+half2 __ovld __cnfn fmax(half2, half);
+half3 __ovld __cnfn fmax(half3, half);
+half4 __ovld __cnfn fmax(half4, half);
+half8 __ovld __cnfn fmax(half8, half);
+half16 __ovld __cnfn fmax(half16, half);
#endif //cl_khr_fp16
/**
@@ -7194,68 +7194,68 @@ half16 __ovld __cnfn fmax(half16 x, half y);
* argument. If both arguments are NaNs, fmin()
* returns a NaN.
*/
-float __ovld __cnfn fmin(float x, float y);
-float2 __ovld __cnfn fmin(float2 x, float2 y);
-float3 __ovld __cnfn fmin(float3 x, float3 y);
-float4 __ovld __cnfn fmin(float4 x, float4 y);
-float8 __ovld __cnfn fmin(float8 x, float8 y);
-float16 __ovld __cnfn fmin(float16 x, float16 y);
-float2 __ovld __cnfn fmin(float2 x, float y);
-float3 __ovld __cnfn fmin(float3 x, float y);
-float4 __ovld __cnfn fmin(float4 x, float y);
-float8 __ovld __cnfn fmin(float8 x, float y);
-float16 __ovld __cnfn fmin(float16 x, float y);
+float __ovld __cnfn fmin(float, float);
+float2 __ovld __cnfn fmin(float2, float2);
+float3 __ovld __cnfn fmin(float3, float3);
+float4 __ovld __cnfn fmin(float4, float4);
+float8 __ovld __cnfn fmin(float8, float8);
+float16 __ovld __cnfn fmin(float16, float16);
+float2 __ovld __cnfn fmin(float2, float);
+float3 __ovld __cnfn fmin(float3, float);
+float4 __ovld __cnfn fmin(float4, float);
+float8 __ovld __cnfn fmin(float8, float);
+float16 __ovld __cnfn fmin(float16, float);
#ifdef cl_khr_fp64
-double __ovld __cnfn fmin(double x, double y);
-double2 __ovld __cnfn fmin(double2 x, double2 y);
-double3 __ovld __cnfn fmin(double3 x, double3 y);
-double4 __ovld __cnfn fmin(double4 x, double4 y);
-double8 __ovld __cnfn fmin(double8 x, double8 y);
-double16 __ovld __cnfn fmin(double16 x, double16 y);
-double2 __ovld __cnfn fmin(double2 x, double y);
-double3 __ovld __cnfn fmin(double3 x, double y);
-double4 __ovld __cnfn fmin(double4 x, double y);
-double8 __ovld __cnfn fmin(double8 x, double y);
-double16 __ovld __cnfn fmin(double16 x, double y);
+double __ovld __cnfn fmin(double, double);
+double2 __ovld __cnfn fmin(double2, double2);
+double3 __ovld __cnfn fmin(double3, double3);
+double4 __ovld __cnfn fmin(double4, double4);
+double8 __ovld __cnfn fmin(double8, double8);
+double16 __ovld __cnfn fmin(double16, double16);
+double2 __ovld __cnfn fmin(double2, double);
+double3 __ovld __cnfn fmin(double3, double);
+double4 __ovld __cnfn fmin(double4, double);
+double8 __ovld __cnfn fmin(double8, double);
+double16 __ovld __cnfn fmin(double16, double);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn fmin(half x, half y);
-half2 __ovld __cnfn fmin(half2 x, half2 y);
-half3 __ovld __cnfn fmin(half3 x, half3 y);
-half4 __ovld __cnfn fmin(half4 x, half4 y);
-half8 __ovld __cnfn fmin(half8 x, half8 y);
-half16 __ovld __cnfn fmin(half16 x, half16 y);
-half2 __ovld __cnfn fmin(half2 x, half y);
-half3 __ovld __cnfn fmin(half3 x, half y);
-half4 __ovld __cnfn fmin(half4 x, half y);
-half8 __ovld __cnfn fmin(half8 x, half y);
-half16 __ovld __cnfn fmin(half16 x, half y);
+half __ovld __cnfn fmin(half, half);
+half2 __ovld __cnfn fmin(half2, half2);
+half3 __ovld __cnfn fmin(half3, half3);
+half4 __ovld __cnfn fmin(half4, half4);
+half8 __ovld __cnfn fmin(half8, half8);
+half16 __ovld __cnfn fmin(half16, half16);
+half2 __ovld __cnfn fmin(half2, half);
+half3 __ovld __cnfn fmin(half3, half);
+half4 __ovld __cnfn fmin(half4, half);
+half8 __ovld __cnfn fmin(half8, half);
+half16 __ovld __cnfn fmin(half16, half);
#endif //cl_khr_fp16
/**
* Modulus. Returns x - y * trunc (x/y).
*/
-float __ovld __cnfn fmod(float x, float y);
-float2 __ovld __cnfn fmod(float2 x, float2 y);
-float3 __ovld __cnfn fmod(float3 x, float3 y);
-float4 __ovld __cnfn fmod(float4 x, float4 y);
-float8 __ovld __cnfn fmod(float8 x, float8 y);
-float16 __ovld __cnfn fmod(float16 x, float16 y);
+float __ovld __cnfn fmod(float, float);
+float2 __ovld __cnfn fmod(float2, float2);
+float3 __ovld __cnfn fmod(float3, float3);
+float4 __ovld __cnfn fmod(float4, float4);
+float8 __ovld __cnfn fmod(float8, float8);
+float16 __ovld __cnfn fmod(float16, float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn fmod(double x, double y);
-double2 __ovld __cnfn fmod(double2 x, double2 y);
-double3 __ovld __cnfn fmod(double3 x, double3 y);
-double4 __ovld __cnfn fmod(double4 x, double4 y);
-double8 __ovld __cnfn fmod(double8 x, double8 y);
-double16 __ovld __cnfn fmod(double16 x, double16 y);
+double __ovld __cnfn fmod(double, double);
+double2 __ovld __cnfn fmod(double2, double2);
+double3 __ovld __cnfn fmod(double3, double3);
+double4 __ovld __cnfn fmod(double4, double4);
+double8 __ovld __cnfn fmod(double8, double8);
+double16 __ovld __cnfn fmod(double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn fmod(half x, half y);
-half2 __ovld __cnfn fmod(half2 x, half2 y);
-half3 __ovld __cnfn fmod(half3 x, half3 y);
-half4 __ovld __cnfn fmod(half4 x, half4 y);
-half8 __ovld __cnfn fmod(half8 x, half8 y);
-half16 __ovld __cnfn fmod(half16 x, half16 y);
+half __ovld __cnfn fmod(half, half);
+half2 __ovld __cnfn fmod(half2, half2);
+half3 __ovld __cnfn fmod(half3, half3);
+half4 __ovld __cnfn fmod(half4, half4);
+half8 __ovld __cnfn fmod(half8, half8);
+half16 __ovld __cnfn fmod(half16, half16);
#endif //cl_khr_fp16
/**
@@ -7263,88 +7263,88 @@ half16 __ovld __cnfn fmod(half16 x, half16 y);
* floor(x) is returned in iptr.
*/
#if defined(__opencl_c_generic_address_space)
-float __ovld fract(float x, float *iptr);
-float2 __ovld fract(float2 x, float2 *iptr);
-float3 __ovld fract(float3 x, float3 *iptr);
-float4 __ovld fract(float4 x, float4 *iptr);
-float8 __ovld fract(float8 x, float8 *iptr);
-float16 __ovld fract(float16 x, float16 *iptr);
+float __ovld fract(float, float *);
+float2 __ovld fract(float2, float2 *);
+float3 __ovld fract(float3, float3 *);
+float4 __ovld fract(float4, float4 *);
+float8 __ovld fract(float8, float8 *);
+float16 __ovld fract(float16, float16 *);
#ifdef cl_khr_fp64
-double __ovld fract(double x, double *iptr);
-double2 __ovld fract(double2 x, double2 *iptr);
-double3 __ovld fract(double3 x, double3 *iptr);
-double4 __ovld fract(double4 x, double4 *iptr);
-double8 __ovld fract(double8 x, double8 *iptr);
-double16 __ovld fract(double16 x, double16 *iptr);
+double __ovld fract(double, double *);
+double2 __ovld fract(double2, double2 *);
+double3 __ovld fract(double3, double3 *);
+double4 __ovld fract(double4, double4 *);
+double8 __ovld fract(double8, double8 *);
+double16 __ovld fract(double16, double16 *);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld fract(half x, half *iptr);
-half2 __ovld fract(half2 x, half2 *iptr);
-half3 __ovld fract(half3 x, half3 *iptr);
-half4 __ovld fract(half4 x, half4 *iptr);
-half8 __ovld fract(half8 x, half8 *iptr);
-half16 __ovld fract(half16 x, half16 *iptr);
+half __ovld fract(half, half *);
+half2 __ovld fract(half2, half2 *);
+half3 __ovld fract(half3, half3 *);
+half4 __ovld fract(half4, half4 *);
+half8 __ovld fract(half8, half8 *);
+half16 __ovld fract(half16, half16 *);
#endif //cl_khr_fp16
#endif //defined(__opencl_c_generic_address_space)
#if defined(__opencl_c_named_address_space_builtins)
-float __ovld fract(float x, __global float *iptr);
-float2 __ovld fract(float2 x, __global float2 *iptr);
-float3 __ovld fract(float3 x, __global float3 *iptr);
-float4 __ovld fract(float4 x, __global float4 *iptr);
-float8 __ovld fract(float8 x, __global float8 *iptr);
-float16 __ovld fract(float16 x, __global float16 *iptr);
-float __ovld fract(float x, __local float *iptr);
-float2 __ovld fract(float2 x, __local float2 *iptr);
-float3 __ovld fract(float3 x, __local float3 *iptr);
-float4 __ovld fract(float4 x, __local float4 *iptr);
-float8 __ovld fract(float8 x, __local float8 *iptr);
-float16 __ovld fract(float16 x, __local float16 *iptr);
-float __ovld fract(float x, __private float *iptr);
-float2 __ovld fract(float2 x, __private float2 *iptr);
-float3 __ovld fract(float3 x, __private float3 *iptr);
-float4 __ovld fract(float4 x, __private float4 *iptr);
-float8 __ovld fract(float8 x, __private float8 *iptr);
-float16 __ovld fract(float16 x, __private float16 *iptr);
+float __ovld fract(float, __global float *);
+float2 __ovld fract(float2, __global float2 *);
+float3 __ovld fract(float3, __global float3 *);
+float4 __ovld fract(float4, __global float4 *);
+float8 __ovld fract(float8, __global float8 *);
+float16 __ovld fract(float16, __global float16 *);
+float __ovld fract(float, __local float *);
+float2 __ovld fract(float2, __local float2 *);
+float3 __ovld fract(float3, __local float3 *);
+float4 __ovld fract(float4, __local float4 *);
+float8 __ovld fract(float8, __local float8 *);
+float16 __ovld fract(float16, __local float16 *);
+float __ovld fract(float, __private float *);
+float2 __ovld fract(float2, __private float2 *);
+float3 __ovld fract(float3, __private float3 *);
+float4 __ovld fract(float4, __private float4 *);
+float8 __ovld fract(float8, __private float8 *);
+float16 __ovld fract(float16, __private float16 *);
#ifdef cl_khr_fp64
-double __ovld fract(double x, __global double *iptr);
-double2 __ovld fract(double2 x, __global double2 *iptr);
-double3 __ovld fract(double3 x, __global double3 *iptr);
-double4 __ovld fract(double4 x, __global double4 *iptr);
-double8 __ovld fract(double8 x, __global double8 *iptr);
-double16 __ovld fract(double16 x, __global double16 *iptr);
-double __ovld fract(double x, __local double *iptr);
-double2 __ovld fract(double2 x, __local double2 *iptr);
-double3 __ovld fract(double3 x, __local double3 *iptr);
-double4 __ovld fract(double4 x, __local double4 *iptr);
-double8 __ovld fract(double8 x, __local double8 *iptr);
-double16 __ovld fract(double16 x, __local double16 *iptr);
-double __ovld fract(double x, __private double *iptr);
-double2 __ovld fract(double2 x, __private double2 *iptr);
-double3 __ovld fract(double3 x, __private double3 *iptr);
-double4 __ovld fract(double4 x, __private double4 *iptr);
-double8 __ovld fract(double8 x, __private double8 *iptr);
-double16 __ovld fract(double16 x, __private double16 *iptr);
+double __ovld fract(double, __global double *);
+double2 __ovld fract(double2, __global double2 *);
+double3 __ovld fract(double3, __global double3 *);
+double4 __ovld fract(double4, __global double4 *);
+double8 __ovld fract(double8, __global double8 *);
+double16 __ovld fract(double16, __global double16 *);
+double __ovld fract(double, __local double *);
+double2 __ovld fract(double2, __local double2 *);
+double3 __ovld fract(double3, __local double3 *);
+double4 __ovld fract(double4, __local double4 *);
+double8 __ovld fract(double8, __local double8 *);
+double16 __ovld fract(double16, __local double16 *);
+double __ovld fract(double, __private double *);
+double2 __ovld fract(double2, __private double2 *);
+double3 __ovld fract(double3, __private double3 *);
+double4 __ovld fract(double4, __private double4 *);
+double8 __ovld fract(double8, __private double8 *);
+double16 __ovld fract(double16, __private double16 *);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld fract(half x, __global half *iptr);
-half2 __ovld fract(half2 x, __global half2 *iptr);
-half3 __ovld fract(half3 x, __global half3 *iptr);
-half4 __ovld fract(half4 x, __global half4 *iptr);
-half8 __ovld fract(half8 x, __global half8 *iptr);
-half16 __ovld fract(half16 x, __global half16 *iptr);
-half __ovld fract(half x, __local half *iptr);
-half2 __ovld fract(half2 x, __local half2 *iptr);
-half3 __ovld fract(half3 x, __local half3 *iptr);
-half4 __ovld fract(half4 x, __local half4 *iptr);
-half8 __ovld fract(half8 x, __local half8 *iptr);
-half16 __ovld fract(half16 x, __local half16 *iptr);
-half __ovld fract(half x, __private half *iptr);
-half2 __ovld fract(half2 x, __private half2 *iptr);
-half3 __ovld fract(half3 x, __private half3 *iptr);
-half4 __ovld fract(half4 x, __private half4 *iptr);
-half8 __ovld fract(half8 x, __private half8 *iptr);
-half16 __ovld fract(half16 x, __private half16 *iptr);
+half __ovld fract(half, __global half *);
+half2 __ovld fract(half2, __global half2 *);
+half3 __ovld fract(half3, __global half3 *);
+half4 __ovld fract(half4, __global half4 *);
+half8 __ovld fract(half8, __global half8 *);
+half16 __ovld fract(half16, __global half16 *);
+half __ovld fract(half, __local half *);
+half2 __ovld fract(half2, __local half2 *);
+half3 __ovld fract(half3, __local half3 *);
+half4 __ovld fract(half4, __local half4 *);
+half8 __ovld fract(half8, __local half8 *);
+half16 __ovld fract(half16, __local half16 *);
+half __ovld fract(half, __private half *);
+half2 __ovld fract(half2, __private half2 *);
+half3 __ovld fract(half3, __private half3 *);
+half4 __ovld fract(half4, __private half4 *);
+half8 __ovld fract(half8, __private half8 *);
+half16 __ovld fract(half16, __private half16 *);
#endif //cl_khr_fp16
#endif //defined(__opencl_c_named_address_space_builtins)
@@ -7355,88 +7355,88 @@ half16 __ovld fract(half16 x, __private half16 *iptr);
* component of x equals mantissa returned * 2^exp.
*/
#if defined(__opencl_c_generic_address_space)
-float __ovld frexp(float x, int *exp);
-float2 __ovld frexp(float2 x, int2 *exp);
-float3 __ovld frexp(float3 x, int3 *exp);
-float4 __ovld frexp(float4 x, int4 *exp);
-float8 __ovld frexp(float8 x, int8 *exp);
-float16 __ovld frexp(float16 x, int16 *exp);
+float __ovld frexp(float, int *);
+float2 __ovld frexp(float2, int2 *);
+float3 __ovld frexp(float3, int3 *);
+float4 __ovld frexp(float4, int4 *);
+float8 __ovld frexp(float8, int8 *);
+float16 __ovld frexp(float16, int16 *);
#ifdef cl_khr_fp64
-double __ovld frexp(double x, int *exp);
-double2 __ovld frexp(double2 x, int2 *exp);
-double3 __ovld frexp(double3 x, int3 *exp);
-double4 __ovld frexp(double4 x, int4 *exp);
-double8 __ovld frexp(double8 x, int8 *exp);
-double16 __ovld frexp(double16 x, int16 *exp);
+double __ovld frexp(double, int *);
+double2 __ovld frexp(double2, int2 *);
+double3 __ovld frexp(double3, int3 *);
+double4 __ovld frexp(double4, int4 *);
+double8 __ovld frexp(double8, int8 *);
+double16 __ovld frexp(double16, int16 *);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld frexp(half x, int *exp);
-half2 __ovld frexp(half2 x, int2 *exp);
-half3 __ovld frexp(half3 x, int3 *exp);
-half4 __ovld frexp(half4 x, int4 *exp);
-half8 __ovld frexp(half8 x, int8 *exp);
-half16 __ovld frexp(half16 x, int16 *exp);
+half __ovld frexp(half, int *);
+half2 __ovld frexp(half2, int2 *);
+half3 __ovld frexp(half3, int3 *);
+half4 __ovld frexp(half4, int4 *);
+half8 __ovld frexp(half8, int8 *);
+half16 __ovld frexp(half16, int16 *);
#endif //cl_khr_fp16
#endif //defined(__opencl_c_generic_address_space)
#if defined(__opencl_c_named_address_space_builtins)
-float __ovld frexp(float x, __global int *exp);
-float2 __ovld frexp(float2 x, __global int2 *exp);
-float3 __ovld frexp(float3 x, __global int3 *exp);
-float4 __ovld frexp(float4 x, __global int4 *exp);
-float8 __ovld frexp(float8 x, __global int8 *exp);
-float16 __ovld frexp(float16 x, __global int16 *exp);
-float __ovld frexp(float x, __local int *exp);
-float2 __ovld frexp(float2 x, __local int2 *exp);
-float3 __ovld frexp(float3 x, __local int3 *exp);
-float4 __ovld frexp(float4 x, __local int4 *exp);
-float8 __ovld frexp(float8 x, __local int8 *exp);
-float16 __ovld frexp(float16 x, __local int16 *exp);
-float __ovld frexp(float x, __private int *exp);
-float2 __ovld frexp(float2 x, __private int2 *exp);
-float3 __ovld frexp(float3 x, __private int3 *exp);
-float4 __ovld frexp(float4 x, __private int4 *exp);
-float8 __ovld frexp(float8 x, __private int8 *exp);
-float16 __ovld frexp(float16 x, __private int16 *exp);
+float __ovld frexp(float, __global int *);
+float2 __ovld frexp(float2, __global int2 *);
+float3 __ovld frexp(float3, __global int3 *);
+float4 __ovld frexp(float4, __global int4 *);
+float8 __ovld frexp(float8, __global int8 *);
+float16 __ovld frexp(float16, __global int16 *);
+float __ovld frexp(float, __local int *);
+float2 __ovld frexp(float2, __local int2 *);
+float3 __ovld frexp(float3, __local int3 *);
+float4 __ovld frexp(float4, __local int4 *);
+float8 __ovld frexp(float8, __local int8 *);
+float16 __ovld frexp(float16, __local int16 *);
+float __ovld frexp(float, __private int *);
+float2 __ovld frexp(float2, __private int2 *);
+float3 __ovld frexp(float3, __private int3 *);
+float4 __ovld frexp(float4, __private int4 *);
+float8 __ovld frexp(float8, __private int8 *);
+float16 __ovld frexp(float16, __private int16 *);
#ifdef cl_khr_fp64
-double __ovld frexp(double x, __global int *exp);
-double2 __ovld frexp(double2 x, __global int2 *exp);
-double3 __ovld frexp(double3 x, __global int3 *exp);
-double4 __ovld frexp(double4 x, __global int4 *exp);
-double8 __ovld frexp(double8 x, __global int8 *exp);
-double16 __ovld frexp(double16 x, __global int16 *exp);
-double __ovld frexp(double x, __local int *exp);
-double2 __ovld frexp(double2 x, __local int2 *exp);
-double3 __ovld frexp(double3 x, __local int3 *exp);
-double4 __ovld frexp(double4 x, __local int4 *exp);
-double8 __ovld frexp(double8 x, __local int8 *exp);
-double16 __ovld frexp(double16 x, __local int16 *exp);
-double __ovld frexp(double x, __private int *exp);
-double2 __ovld frexp(double2 x, __private int2 *exp);
-double3 __ovld frexp(double3 x, __private int3 *exp);
-double4 __ovld frexp(double4 x, __private int4 *exp);
-double8 __ovld frexp(double8 x, __private int8 *exp);
-double16 __ovld frexp(double16 x, __private int16 *exp);
+double __ovld frexp(double, __global int *);
+double2 __ovld frexp(double2, __global int2 *);
+double3 __ovld frexp(double3, __global int3 *);
+double4 __ovld frexp(double4, __global int4 *);
+double8 __ovld frexp(double8, __global int8 *);
+double16 __ovld frexp(double16, __global int16 *);
+double __ovld frexp(double, __local int *);
+double2 __ovld frexp(double2, __local int2 *);
+double3 __ovld frexp(double3, __local int3 *);
+double4 __ovld frexp(double4, __local int4 *);
+double8 __ovld frexp(double8, __local int8 *);
+double16 __ovld frexp(double16, __local int16 *);
+double __ovld frexp(double, __private int *);
+double2 __ovld frexp(double2, __private int2 *);
+double3 __ovld frexp(double3, __private int3 *);
+double4 __ovld frexp(double4, __private int4 *);
+double8 __ovld frexp(double8, __private int8 *);
+double16 __ovld frexp(double16, __private int16 *);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld frexp(half x, __global int *exp);
-half2 __ovld frexp(half2 x, __global int2 *exp);
-half3 __ovld frexp(half3 x, __global int3 *exp);
-half4 __ovld frexp(half4 x, __global int4 *exp);
-half8 __ovld frexp(half8 x, __global int8 *exp);
-half16 __ovld frexp(half16 x, __global int16 *exp);
-half __ovld frexp(half x, __local int *exp);
-half2 __ovld frexp(half2 x, __local int2 *exp);
-half3 __ovld frexp(half3 x, __local int3 *exp);
-half4 __ovld frexp(half4 x, __local int4 *exp);
-half8 __ovld frexp(half8 x, __local int8 *exp);
-half16 __ovld frexp(half16 x, __local int16 *exp);
-half __ovld frexp(half x, __private int *exp);
-half2 __ovld frexp(half2 x, __private int2 *exp);
-half3 __ovld frexp(half3 x, __private int3 *exp);
-half4 __ovld frexp(half4 x, __private int4 *exp);
-half8 __ovld frexp(half8 x, __private int8 *exp);
-half16 __ovld frexp(half16 x, __private int16 *exp);
+half __ovld frexp(half, __global int *);
+half2 __ovld frexp(half2, __global int2 *);
+half3 __ovld frexp(half3, __global int3 *);
+half4 __ovld frexp(half4, __global int4 *);
+half8 __ovld frexp(half8, __global int8 *);
+half16 __ovld frexp(half16, __global int16 *);
+half __ovld frexp(half, __local int *);
+half2 __ovld frexp(half2, __local int2 *);
+half3 __ovld frexp(half3, __local int3 *);
+half4 __ovld frexp(half4, __local int4 *);
+half8 __ovld frexp(half8, __local int8 *);
+half16 __ovld frexp(half16, __local int16 *);
+half __ovld frexp(half, __private int *);
+half2 __ovld frexp(half2, __private int2 *);
+half3 __ovld frexp(half3, __private int3 *);
+half4 __ovld frexp(half4, __private int4 *);
+half8 __ovld frexp(half8, __private int8 *);
+half16 __ovld frexp(half16, __private int16 *);
#endif //cl_khr_fp16
#endif //defined(__opencl_c_named_address_space_builtins)
@@ -7444,94 +7444,94 @@ half16 __ovld frexp(half16 x, __private int16 *exp);
* Compute the value of the square root of x^2 + y^2
* without undue overflow or underflow.
*/
-float __ovld __cnfn hypot(float x, float y);
-float2 __ovld __cnfn hypot(float2 x, float2 y);
-float3 __ovld __cnfn hypot(float3 x, float3 y);
-float4 __ovld __cnfn hypot(float4 x, float4 y);
-float8 __ovld __cnfn hypot(float8 x, float8 y);
-float16 __ovld __cnfn hypot(float16 x, float16 y);
+float __ovld __cnfn hypot(float, float);
+float2 __ovld __cnfn hypot(float2, float2);
+float3 __ovld __cnfn hypot(float3, float3);
+float4 __ovld __cnfn hypot(float4, float4);
+float8 __ovld __cnfn hypot(float8, float8);
+float16 __ovld __cnfn hypot(float16, float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn hypot(double x, double y);
-double2 __ovld __cnfn hypot(double2 x, double2 y);
-double3 __ovld __cnfn hypot(double3 x, double3 y);
-double4 __ovld __cnfn hypot(double4 x, double4 y);
-double8 __ovld __cnfn hypot(double8 x, double8 y);
-double16 __ovld __cnfn hypot(double16 x, double16 y);
+double __ovld __cnfn hypot(double, double);
+double2 __ovld __cnfn hypot(double2, double2);
+double3 __ovld __cnfn hypot(double3, double3);
+double4 __ovld __cnfn hypot(double4, double4);
+double8 __ovld __cnfn hypot(double8, double8);
+double16 __ovld __cnfn hypot(double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn hypot(half x, half y);
-half2 __ovld __cnfn hypot(half2 x, half2 y);
-half3 __ovld __cnfn hypot(half3 x, half3 y);
-half4 __ovld __cnfn hypot(half4 x, half4 y);
-half8 __ovld __cnfn hypot(half8 x, half8 y);
-half16 __ovld __cnfn hypot(half16 x, half16 y);
+half __ovld __cnfn hypot(half, half);
+half2 __ovld __cnfn hypot(half2, half2);
+half3 __ovld __cnfn hypot(half3, half3);
+half4 __ovld __cnfn hypot(half4, half4);
+half8 __ovld __cnfn hypot(half8, half8);
+half16 __ovld __cnfn hypot(half16, half16);
#endif //cl_khr_fp16
/**
* Return the exponent as an integer value.
*/
-int __ovld __cnfn ilogb(float x);
-int2 __ovld __cnfn ilogb(float2 x);
-int3 __ovld __cnfn ilogb(float3 x);
-int4 __ovld __cnfn ilogb(float4 x);
-int8 __ovld __cnfn ilogb(float8 x);
-int16 __ovld __cnfn ilogb(float16 x);
+int __ovld __cnfn ilogb(float);
+int2 __ovld __cnfn ilogb(float2);
+int3 __ovld __cnfn ilogb(float3);
+int4 __ovld __cnfn ilogb(float4);
+int8 __ovld __cnfn ilogb(float8);
+int16 __ovld __cnfn ilogb(float16);
#ifdef cl_khr_fp64
-int __ovld __cnfn ilogb(double x);
-int2 __ovld __cnfn ilogb(double2 x);
-int3 __ovld __cnfn ilogb(double3 x);
-int4 __ovld __cnfn ilogb(double4 x);
-int8 __ovld __cnfn ilogb(double8 x);
-int16 __ovld __cnfn ilogb(double16 x);
+int __ovld __cnfn ilogb(double);
+int2 __ovld __cnfn ilogb(double2);
+int3 __ovld __cnfn ilogb(double3);
+int4 __ovld __cnfn ilogb(double4);
+int8 __ovld __cnfn ilogb(double8);
+int16 __ovld __cnfn ilogb(double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-int __ovld __cnfn ilogb(half x);
-int2 __ovld __cnfn ilogb(half2 x);
-int3 __ovld __cnfn ilogb(half3 x);
-int4 __ovld __cnfn ilogb(half4 x);
-int8 __ovld __cnfn ilogb(half8 x);
-int16 __ovld __cnfn ilogb(half16 x);
+int __ovld __cnfn ilogb(half);
+int2 __ovld __cnfn ilogb(half2);
+int3 __ovld __cnfn ilogb(half3);
+int4 __ovld __cnfn ilogb(half4);
+int8 __ovld __cnfn ilogb(half8);
+int16 __ovld __cnfn ilogb(half16);
#endif //cl_khr_fp16
/**
* Multiply x by 2 to the power n.
*/
-float __ovld __cnfn ldexp(float x, int n);
-float2 __ovld __cnfn ldexp(float2 x, int2 n);
-float3 __ovld __cnfn ldexp(float3 x, int3 n);
-float4 __ovld __cnfn ldexp(float4 x, int4 n);
-float8 __ovld __cnfn ldexp(float8 x, int8 n);
-float16 __ovld __cnfn ldexp(float16 x, int16 n);
-float2 __ovld __cnfn ldexp(float2 x, int n);
-float3 __ovld __cnfn ldexp(float3 x, int n);
-float4 __ovld __cnfn ldexp(float4 x, int n);
-float8 __ovld __cnfn ldexp(float8 x, int n);
-float16 __ovld __cnfn ldexp(float16 x, int n);
+float __ovld __cnfn ldexp(float, int);
+float2 __ovld __cnfn ldexp(float2, int2);
+float3 __ovld __cnfn ldexp(float3, int3);
+float4 __ovld __cnfn ldexp(float4, int4);
+float8 __ovld __cnfn ldexp(float8, int8);
+float16 __ovld __cnfn ldexp(float16, int16);
+float2 __ovld __cnfn ldexp(float2, int);
+float3 __ovld __cnfn ldexp(float3, int);
+float4 __ovld __cnfn ldexp(float4, int);
+float8 __ovld __cnfn ldexp(float8, int);
+float16 __ovld __cnfn ldexp(float16, int);
#ifdef cl_khr_fp64
-double __ovld __cnfn ldexp(double x, int n);
-double2 __ovld __cnfn ldexp(double2 x, int2 n);
-double3 __ovld __cnfn ldexp(double3 x, int3 n);
-double4 __ovld __cnfn ldexp(double4 x, int4 n);
-double8 __ovld __cnfn ldexp(double8 x, int8 n);
-double16 __ovld __cnfn ldexp(double16 x, int16 n);
-double2 __ovld __cnfn ldexp(double2 x, int n);
-double3 __ovld __cnfn ldexp(double3 x, int n);
-double4 __ovld __cnfn ldexp(double4 x, int n);
-double8 __ovld __cnfn ldexp(double8 x, int n);
-double16 __ovld __cnfn ldexp(double16 x, int n);
+double __ovld __cnfn ldexp(double, int);
+double2 __ovld __cnfn ldexp(double2, int2);
+double3 __ovld __cnfn ldexp(double3, int3);
+double4 __ovld __cnfn ldexp(double4, int4);
+double8 __ovld __cnfn ldexp(double8, int8);
+double16 __ovld __cnfn ldexp(double16, int16);
+double2 __ovld __cnfn ldexp(double2, int);
+double3 __ovld __cnfn ldexp(double3, int);
+double4 __ovld __cnfn ldexp(double4, int);
+double8 __ovld __cnfn ldexp(double8, int);
+double16 __ovld __cnfn ldexp(double16, int);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn ldexp(half x, int n);
-half2 __ovld __cnfn ldexp(half2 x, int2 n);
-half3 __ovld __cnfn ldexp(half3 x, int3 n);
-half4 __ovld __cnfn ldexp(half4 x, int4 n);
-half8 __ovld __cnfn ldexp(half8 x, int8 n);
-half16 __ovld __cnfn ldexp(half16 x, int16 n);
-half2 __ovld __cnfn ldexp(half2 x, int n);
-half3 __ovld __cnfn ldexp(half3 x, int n);
-half4 __ovld __cnfn ldexp(half4 x, int n);
-half8 __ovld __cnfn ldexp(half8 x, int n);
-half16 __ovld __cnfn ldexp(half16 x, int n);
+half __ovld __cnfn ldexp(half, int);
+half2 __ovld __cnfn ldexp(half2, int2);
+half3 __ovld __cnfn ldexp(half3, int3);
+half4 __ovld __cnfn ldexp(half4, int4);
+half8 __ovld __cnfn ldexp(half8, int8);
+half16 __ovld __cnfn ldexp(half16, int16);
+half2 __ovld __cnfn ldexp(half2, int);
+half3 __ovld __cnfn ldexp(half3, int);
+half4 __ovld __cnfn ldexp(half4, int);
+half8 __ovld __cnfn ldexp(half8, int);
+half16 __ovld __cnfn ldexp(half16, int);
#endif //cl_khr_fp16
/**
@@ -7540,112 +7540,112 @@ half16 __ovld __cnfn ldexp(half16 x, int n);
* function. The sign of the gamma function is
* returned in the signp argument of lgamma_r.
*/
-float __ovld __cnfn lgamma(float x);
-float2 __ovld __cnfn lgamma(float2 x);
-float3 __ovld __cnfn lgamma(float3 x);
-float4 __ovld __cnfn lgamma(float4 x);
-float8 __ovld __cnfn lgamma(float8 x);
-float16 __ovld __cnfn lgamma(float16 x);
+float __ovld __cnfn lgamma(float);
+float2 __ovld __cnfn lgamma(float2);
+float3 __ovld __cnfn lgamma(float3);
+float4 __ovld __cnfn lgamma(float4);
+float8 __ovld __cnfn lgamma(float8);
+float16 __ovld __cnfn lgamma(float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn lgamma(double x);
-double2 __ovld __cnfn lgamma(double2 x);
-double3 __ovld __cnfn lgamma(double3 x);
-double4 __ovld __cnfn lgamma(double4 x);
-double8 __ovld __cnfn lgamma(double8 x);
-double16 __ovld __cnfn lgamma(double16 x);
+double __ovld __cnfn lgamma(double);
+double2 __ovld __cnfn lgamma(double2);
+double3 __ovld __cnfn lgamma(double3);
+double4 __ovld __cnfn lgamma(double4);
+double8 __ovld __cnfn lgamma(double8);
+double16 __ovld __cnfn lgamma(double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn lgamma(half x);
-half2 __ovld __cnfn lgamma(half2 x);
-half3 __ovld __cnfn lgamma(half3 x);
-half4 __ovld __cnfn lgamma(half4 x);
-half8 __ovld __cnfn lgamma(half8 x);
-half16 __ovld __cnfn lgamma(half16 x);
+half __ovld __cnfn lgamma(half);
+half2 __ovld __cnfn lgamma(half2);
+half3 __ovld __cnfn lgamma(half3);
+half4 __ovld __cnfn lgamma(half4);
+half8 __ovld __cnfn lgamma(half8);
+half16 __ovld __cnfn lgamma(half16);
#endif //cl_khr_fp16
#if defined(__opencl_c_generic_address_space)
-float __ovld lgamma_r(float x, int *signp);
-float2 __ovld lgamma_r(float2 x, int2 *signp);
-float3 __ovld lgamma_r(float3 x, int3 *signp);
-float4 __ovld lgamma_r(float4 x, int4 *signp);
-float8 __ovld lgamma_r(float8 x, int8 *signp);
-float16 __ovld lgamma_r(float16 x, int16 *signp);
+float __ovld lgamma_r(float, int *);
+float2 __ovld lgamma_r(float2, int2 *);
+float3 __ovld lgamma_r(float3, int3 *);
+float4 __ovld lgamma_r(float4, int4 *);
+float8 __ovld lgamma_r(float8, int8 *);
+float16 __ovld lgamma_r(float16, int16 *);
#ifdef cl_khr_fp64
-double __ovld lgamma_r(double x, int *signp);
-double2 __ovld lgamma_r(double2 x, int2 *signp);
-double3 __ovld lgamma_r(double3 x, int3 *signp);
-double4 __ovld lgamma_r(double4 x, int4 *signp);
-double8 __ovld lgamma_r(double8 x, int8 *signp);
-double16 __ovld lgamma_r(double16 x, int16 *signp);
+double __ovld lgamma_r(double, int *);
+double2 __ovld lgamma_r(double2, int2 *);
+double3 __ovld lgamma_r(double3, int3 *);
+double4 __ovld lgamma_r(double4, int4 *);
+double8 __ovld lgamma_r(double8, int8 *);
+double16 __ovld lgamma_r(double16, int16 *);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld lgamma_r(half x, int *signp);
-half2 __ovld lgamma_r(half2 x, int2 *signp);
-half3 __ovld lgamma_r(half3 x, int3 *signp);
-half4 __ovld lgamma_r(half4 x, int4 *signp);
-half8 __ovld lgamma_r(half8 x, int8 *signp);
-half16 __ovld lgamma_r(half16 x, int16 *signp);
+half __ovld lgamma_r(half, int *);
+half2 __ovld lgamma_r(half2, int2 *);
+half3 __ovld lgamma_r(half3, int3 *);
+half4 __ovld lgamma_r(half4, int4 *);
+half8 __ovld lgamma_r(half8, int8 *);
+half16 __ovld lgamma_r(half16, int16 *);
#endif //cl_khr_fp16
#endif //defined(__opencl_c_generic_address_space)
#if defined(__opencl_c_named_address_space_builtins)
-float __ovld lgamma_r(float x, __global int *signp);
-float2 __ovld lgamma_r(float2 x, __global int2 *signp);
-float3 __ovld lgamma_r(float3 x, __global int3 *signp);
-float4 __ovld lgamma_r(float4 x, __global int4 *signp);
-float8 __ovld lgamma_r(float8 x, __global int8 *signp);
-float16 __ovld lgamma_r(float16 x, __global int16 *signp);
-float __ovld lgamma_r(float x, __local int *signp);
-float2 __ovld lgamma_r(float2 x, __local int2 *signp);
-float3 __ovld lgamma_r(float3 x, __local int3 *signp);
-float4 __ovld lgamma_r(float4 x, __local int4 *signp);
-float8 __ovld lgamma_r(float8 x, __local int8 *signp);
-float16 __ovld lgamma_r(float16 x, __local int16 *signp);
-float __ovld lgamma_r(float x, __private int *signp);
-float2 __ovld lgamma_r(float2 x, __private int2 *signp);
-float3 __ovld lgamma_r(float3 x, __private int3 *signp);
-float4 __ovld lgamma_r(float4 x, __private int4 *signp);
-float8 __ovld lgamma_r(float8 x, __private int8 *signp);
-float16 __ovld lgamma_r(float16 x, __private int16 *signp);
+float __ovld lgamma_r(float, __global int *);
+float2 __ovld lgamma_r(float2, __global int2 *);
+float3 __ovld lgamma_r(float3, __global int3 *);
+float4 __ovld lgamma_r(float4, __global int4 *);
+float8 __ovld lgamma_r(float8, __global int8 *);
+float16 __ovld lgamma_r(float16, __global int16 *);
+float __ovld lgamma_r(float, __local int *);
+float2 __ovld lgamma_r(float2, __local int2 *);
+float3 __ovld lgamma_r(float3, __local int3 *);
+float4 __ovld lgamma_r(float4, __local int4 *);
+float8 __ovld lgamma_r(float8, __local int8 *);
+float16 __ovld lgamma_r(float16, __local int16 *);
+float __ovld lgamma_r(float, __private int *);
+float2 __ovld lgamma_r(float2, __private int2 *);
+float3 __ovld lgamma_r(float3, __private int3 *);
+float4 __ovld lgamma_r(float4, __private int4 *);
+float8 __ovld lgamma_r(float8, __private int8 *);
+float16 __ovld lgamma_r(float16, __private int16 *);
#ifdef cl_khr_fp64
-double __ovld lgamma_r(double x, __global int *signp);
-double2 __ovld lgamma_r(double2 x, __global int2 *signp);
-double3 __ovld lgamma_r(double3 x, __global int3 *signp);
-double4 __ovld lgamma_r(double4 x, __global int4 *signp);
-double8 __ovld lgamma_r(double8 x, __global int8 *signp);
-double16 __ovld lgamma_r(double16 x, __global int16 *signp);
-double __ovld lgamma_r(double x, __local int *signp);
-double2 __ovld lgamma_r(double2 x, __local int2 *signp);
-double3 __ovld lgamma_r(double3 x, __local int3 *signp);
-double4 __ovld lgamma_r(double4 x, __local int4 *signp);
-double8 __ovld lgamma_r(double8 x, __local int8 *signp);
-double16 __ovld lgamma_r(double16 x, __local int16 *signp);
-double __ovld lgamma_r(double x, __private int *signp);
-double2 __ovld lgamma_r(double2 x, __private int2 *signp);
-double3 __ovld lgamma_r(double3 x, __private int3 *signp);
-double4 __ovld lgamma_r(double4 x, __private int4 *signp);
-double8 __ovld lgamma_r(double8 x, __private int8 *signp);
-double16 __ovld lgamma_r(double16 x, __private int16 *signp);
+double __ovld lgamma_r(double, __global int *);
+double2 __ovld lgamma_r(double2, __global int2 *);
+double3 __ovld lgamma_r(double3, __global int3 *);
+double4 __ovld lgamma_r(double4, __global int4 *);
+double8 __ovld lgamma_r(double8, __global int8 *);
+double16 __ovld lgamma_r(double16, __global int16 *);
+double __ovld lgamma_r(double, __local int *);
+double2 __ovld lgamma_r(double2, __local int2 *);
+double3 __ovld lgamma_r(double3, __local int3 *);
+double4 __ovld lgamma_r(double4, __local int4 *);
+double8 __ovld lgamma_r(double8, __local int8 *);
+double16 __ovld lgamma_r(double16, __local int16 *);
+double __ovld lgamma_r(double, __private int *);
+double2 __ovld lgamma_r(double2, __private int2 *);
+double3 __ovld lgamma_r(double3, __private int3 *);
+double4 __ovld lgamma_r(double4, __private int4 *);
+double8 __ovld lgamma_r(double8, __private int8 *);
+double16 __ovld lgamma_r(double16, __private int16 *);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld lgamma_r(half x, __global int *signp);
-half2 __ovld lgamma_r(half2 x, __global int2 *signp);
-half3 __ovld lgamma_r(half3 x, __global int3 *signp);
-half4 __ovld lgamma_r(half4 x, __global int4 *signp);
-half8 __ovld lgamma_r(half8 x, __global int8 *signp);
-half16 __ovld lgamma_r(half16 x, __global int16 *signp);
-half __ovld lgamma_r(half x, __local int *signp);
-half2 __ovld lgamma_r(half2 x, __local int2 *signp);
-half3 __ovld lgamma_r(half3 x, __local int3 *signp);
-half4 __ovld lgamma_r(half4 x, __local int4 *signp);
-half8 __ovld lgamma_r(half8 x, __local int8 *signp);
-half16 __ovld lgamma_r(half16 x, __local int16 *signp);
-half __ovld lgamma_r(half x, __private int *signp);
-half2 __ovld lgamma_r(half2 x, __private int2 *signp);
-half3 __ovld lgamma_r(half3 x, __private int3 *signp);
-half4 __ovld lgamma_r(half4 x, __private int4 *signp);
-half8 __ovld lgamma_r(half8 x, __private int8 *signp);
-half16 __ovld lgamma_r(half16 x, __private int16 *signp);
+half __ovld lgamma_r(half, __global int *);
+half2 __ovld lgamma_r(half2, __global int2 *);
+half3 __ovld lgamma_r(half3, __global int3 *);
+half4 __ovld lgamma_r(half4, __global int4 *);
+half8 __ovld lgamma_r(half8, __global int8 *);
+half16 __ovld lgamma_r(half16, __global int16 *);
+half __ovld lgamma_r(half, __local int *);
+half2 __ovld lgamma_r(half2, __local int2 *);
+half3 __ovld lgamma_r(half3, __local int3 *);
+half4 __ovld lgamma_r(half4, __local int4 *);
+half8 __ovld lgamma_r(half8, __local int8 *);
+half16 __ovld lgamma_r(half16, __local int16 *);
+half __ovld lgamma_r(half, __private int *);
+half2 __ovld lgamma_r(half2, __private int2 *);
+half3 __ovld lgamma_r(half3, __private int3 *);
+half4 __ovld lgamma_r(half4, __private int4 *);
+half8 __ovld lgamma_r(half8, __private int8 *);
+half16 __ovld lgamma_r(half16, __private int16 *);
#endif //cl_khr_fp16
#endif //defined(__opencl_c_named_address_space_builtins)
@@ -7730,54 +7730,54 @@ half16 __ovld __cnfn log10(half16);
/**
* Compute a base e logarithm of (1.0 + x).
*/
-float __ovld __cnfn log1p(float x);
-float2 __ovld __cnfn log1p(float2 x);
-float3 __ovld __cnfn log1p(float3 x);
-float4 __ovld __cnfn log1p(float4 x);
-float8 __ovld __cnfn log1p(float8 x);
-float16 __ovld __cnfn log1p(float16 x);
+float __ovld __cnfn log1p(float);
+float2 __ovld __cnfn log1p(float2);
+float3 __ovld __cnfn log1p(float3);
+float4 __ovld __cnfn log1p(float4);
+float8 __ovld __cnfn log1p(float8);
+float16 __ovld __cnfn log1p(float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn log1p(double x);
-double2 __ovld __cnfn log1p(double2 x);
-double3 __ovld __cnfn log1p(double3 x);
-double4 __ovld __cnfn log1p(double4 x);
-double8 __ovld __cnfn log1p(double8 x);
-double16 __ovld __cnfn log1p(double16 x);
+double __ovld __cnfn log1p(double);
+double2 __ovld __cnfn log1p(double2);
+double3 __ovld __cnfn log1p(double3);
+double4 __ovld __cnfn log1p(double4);
+double8 __ovld __cnfn log1p(double8);
+double16 __ovld __cnfn log1p(double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn log1p(half x);
-half2 __ovld __cnfn log1p(half2 x);
-half3 __ovld __cnfn log1p(half3 x);
-half4 __ovld __cnfn log1p(half4 x);
-half8 __ovld __cnfn log1p(half8 x);
-half16 __ovld __cnfn log1p(half16 x);
+half __ovld __cnfn log1p(half);
+half2 __ovld __cnfn log1p(half2);
+half3 __ovld __cnfn log1p(half3);
+half4 __ovld __cnfn log1p(half4);
+half8 __ovld __cnfn log1p(half8);
+half16 __ovld __cnfn log1p(half16);
#endif //cl_khr_fp16
/**
* Compute the exponent of x, which is the integral
* part of logr | x |.
*/
-float __ovld __cnfn logb(float x);
-float2 __ovld __cnfn logb(float2 x);
-float3 __ovld __cnfn logb(float3 x);
-float4 __ovld __cnfn logb(float4 x);
-float8 __ovld __cnfn logb(float8 x);
-float16 __ovld __cnfn logb(float16 x);
+float __ovld __cnfn logb(float);
+float2 __ovld __cnfn logb(float2);
+float3 __ovld __cnfn logb(float3);
+float4 __ovld __cnfn logb(float4);
+float8 __ovld __cnfn logb(float8);
+float16 __ovld __cnfn logb(float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn logb(double x);
-double2 __ovld __cnfn logb(double2 x);
-double3 __ovld __cnfn logb(double3 x);
-double4 __ovld __cnfn logb(double4 x);
-double8 __ovld __cnfn logb(double8 x);
-double16 __ovld __cnfn logb(double16 x);
+double __ovld __cnfn logb(double);
+double2 __ovld __cnfn logb(double2);
+double3 __ovld __cnfn logb(double3);
+double4 __ovld __cnfn logb(double4);
+double8 __ovld __cnfn logb(double8);
+double16 __ovld __cnfn logb(double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn logb(half x);
-half2 __ovld __cnfn logb(half2 x);
-half3 __ovld __cnfn logb(half3 x);
-half4 __ovld __cnfn logb(half4 x);
-half8 __ovld __cnfn logb(half8 x);
-half16 __ovld __cnfn logb(half16 x);
+half __ovld __cnfn logb(half);
+half2 __ovld __cnfn logb(half2);
+half3 __ovld __cnfn logb(half3);
+half4 __ovld __cnfn logb(half4);
+half8 __ovld __cnfn logb(half8);
+half16 __ovld __cnfn logb(half16);
#endif //cl_khr_fp16
/**
@@ -7787,81 +7787,81 @@ half16 __ovld __cnfn logb(half16 x);
* defined. mad is intended to be used where speed is
* preferred over accuracy.
*/
-float __ovld __cnfn mad(float a, float b, float c);
-float2 __ovld __cnfn mad(float2 a, float2 b, float2 c);
-float3 __ovld __cnfn mad(float3 a, float3 b, float3 c);
-float4 __ovld __cnfn mad(float4 a, float4 b, float4 c);
-float8 __ovld __cnfn mad(float8 a, float8 b, float8 c);
-float16 __ovld __cnfn mad(float16 a, float16 b, float16 c);
+float __ovld __cnfn mad(float, float, float);
+float2 __ovld __cnfn mad(float2, float2, float2);
+float3 __ovld __cnfn mad(float3, float3, float3);
+float4 __ovld __cnfn mad(float4, float4, float4);
+float8 __ovld __cnfn mad(float8, float8, float8);
+float16 __ovld __cnfn mad(float16, float16, float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn mad(double a, double b, double c);
-double2 __ovld __cnfn mad(double2 a, double2 b, double2 c);
-double3 __ovld __cnfn mad(double3 a, double3 b, double3 c);
-double4 __ovld __cnfn mad(double4 a, double4 b, double4 c);
-double8 __ovld __cnfn mad(double8 a, double8 b, double8 c);
-double16 __ovld __cnfn mad(double16 a, double16 b, double16 c);
+double __ovld __cnfn mad(double, double, double);
+double2 __ovld __cnfn mad(double2, double2, double2);
+double3 __ovld __cnfn mad(double3, double3, double3);
+double4 __ovld __cnfn mad(double4, double4, double4);
+double8 __ovld __cnfn mad(double8, double8, double8);
+double16 __ovld __cnfn mad(double16, double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn mad(half a, half b, half c);
-half2 __ovld __cnfn mad(half2 a, half2 b, half2 c);
-half3 __ovld __cnfn mad(half3 a, half3 b, half3 c);
-half4 __ovld __cnfn mad(half4 a, half4 b, half4 c);
-half8 __ovld __cnfn mad(half8 a, half8 b, half8 c);
-half16 __ovld __cnfn mad(half16 a, half16 b, half16 c);
+half __ovld __cnfn mad(half, half, half);
+half2 __ovld __cnfn mad(half2, half2, half2);
+half3 __ovld __cnfn mad(half3, half3, half3);
+half4 __ovld __cnfn mad(half4, half4, half4);
+half8 __ovld __cnfn mad(half8, half8, half8);
+half16 __ovld __cnfn mad(half16, half16, half16);
#endif //cl_khr_fp16
/**
* Returns x if | x | > | y |, y if | y | > | x |, otherwise
* fmax(x, y).
*/
-float __ovld __cnfn maxmag(float x, float y);
-float2 __ovld __cnfn maxmag(float2 x, float2 y);
-float3 __ovld __cnfn maxmag(float3 x, float3 y);
-float4 __ovld __cnfn maxmag(float4 x, float4 y);
-float8 __ovld __cnfn maxmag(float8 x, float8 y);
-float16 __ovld __cnfn maxmag(float16 x, float16 y);
+float __ovld __cnfn maxmag(float, float);
+float2 __ovld __cnfn maxmag(float2, float2);
+float3 __ovld __cnfn maxmag(float3, float3);
+float4 __ovld __cnfn maxmag(float4, float4);
+float8 __ovld __cnfn maxmag(float8, float8);
+float16 __ovld __cnfn maxmag(float16, float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn maxmag(double x, double y);
-double2 __ovld __cnfn maxmag(double2 x, double2 y);
-double3 __ovld __cnfn maxmag(double3 x, double3 y);
-double4 __ovld __cnfn maxmag(double4 x, double4 y);
-double8 __ovld __cnfn maxmag(double8 x, double8 y);
-double16 __ovld __cnfn maxmag(double16 x, double16 y);
+double __ovld __cnfn maxmag(double, double);
+double2 __ovld __cnfn maxmag(double2, double2);
+double3 __ovld __cnfn maxmag(double3, double3);
+double4 __ovld __cnfn maxmag(double4, double4);
+double8 __ovld __cnfn maxmag(double8, double8);
+double16 __ovld __cnfn maxmag(double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn maxmag(half x, half y);
-half2 __ovld __cnfn maxmag(half2 x, half2 y);
-half3 __ovld __cnfn maxmag(half3 x, half3 y);
-half4 __ovld __cnfn maxmag(half4 x, half4 y);
-half8 __ovld __cnfn maxmag(half8 x, half8 y);
-half16 __ovld __cnfn maxmag(half16 x, half16 y);
+half __ovld __cnfn maxmag(half, half);
+half2 __ovld __cnfn maxmag(half2, half2);
+half3 __ovld __cnfn maxmag(half3, half3);
+half4 __ovld __cnfn maxmag(half4, half4);
+half8 __ovld __cnfn maxmag(half8, half8);
+half16 __ovld __cnfn maxmag(half16, half16);
#endif //cl_khr_fp16
/**
* Returns x if | x | < | y |, y if | y | < | x |, otherwise
* fmin(x, y).
*/
-float __ovld __cnfn minmag(float x, float y);
-float2 __ovld __cnfn minmag(float2 x, float2 y);
-float3 __ovld __cnfn minmag(float3 x, float3 y);
-float4 __ovld __cnfn minmag(float4 x, float4 y);
-float8 __ovld __cnfn minmag(float8 x, float8 y);
-float16 __ovld __cnfn minmag(float16 x, float16 y);
+float __ovld __cnfn minmag(float, float);
+float2 __ovld __cnfn minmag(float2, float2);
+float3 __ovld __cnfn minmag(float3, float3);
+float4 __ovld __cnfn minmag(float4, float4);
+float8 __ovld __cnfn minmag(float8, float8);
+float16 __ovld __cnfn minmag(float16, float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn minmag(double x, double y);
-double2 __ovld __cnfn minmag(double2 x, double2 y);
-double3 __ovld __cnfn minmag(double3 x, double3 y);
-double4 __ovld __cnfn minmag(double4 x, double4 y);
-double8 __ovld __cnfn minmag(double8 x, double8 y);
-double16 __ovld __cnfn minmag(double16 x, double16 y);
+double __ovld __cnfn minmag(double, double);
+double2 __ovld __cnfn minmag(double2, double2);
+double3 __ovld __cnfn minmag(double3, double3);
+double4 __ovld __cnfn minmag(double4, double4);
+double8 __ovld __cnfn minmag(double8, double8);
+double16 __ovld __cnfn minmag(double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn minmag(half x, half y);
-half2 __ovld __cnfn minmag(half2 x, half2 y);
-half3 __ovld __cnfn minmag(half3 x, half3 y);
-half4 __ovld __cnfn minmag(half4 x, half4 y);
-half8 __ovld __cnfn minmag(half8 x, half8 y);
-half16 __ovld __cnfn minmag(half16 x, half16 y);
+half __ovld __cnfn minmag(half, half);
+half2 __ovld __cnfn minmag(half2, half2);
+half3 __ovld __cnfn minmag(half3, half3);
+half4 __ovld __cnfn minmag(half4, half4);
+half8 __ovld __cnfn minmag(half8, half8);
+half16 __ovld __cnfn minmag(half16, half16);
#endif //cl_khr_fp16
/**
@@ -7872,88 +7872,88 @@ half16 __ovld __cnfn minmag(half16 x, half16 y);
* pointed to by iptr.
*/
#if defined(__opencl_c_generic_address_space)
-float __ovld modf(float x, float *iptr);
-float2 __ovld modf(float2 x, float2 *iptr);
-float3 __ovld modf(float3 x, float3 *iptr);
-float4 __ovld modf(float4 x, float4 *iptr);
-float8 __ovld modf(float8 x, float8 *iptr);
-float16 __ovld modf(float16 x, float16 *iptr);
+float __ovld modf(float, float *);
+float2 __ovld modf(float2, float2 *);
+float3 __ovld modf(float3, float3 *);
+float4 __ovld modf(float4, float4 *);
+float8 __ovld modf(float8, float8 *);
+float16 __ovld modf(float16, float16 *);
#ifdef cl_khr_fp64
-double __ovld modf(double x, double *iptr);
-double2 __ovld modf(double2 x, double2 *iptr);
-double3 __ovld modf(double3 x, double3 *iptr);
-double4 __ovld modf(double4 x, double4 *iptr);
-double8 __ovld modf(double8 x, double8 *iptr);
-double16 __ovld modf(double16 x, double16 *iptr);
+double __ovld modf(double, double *);
+double2 __ovld modf(double2, double2 *);
+double3 __ovld modf(double3, double3 *);
+double4 __ovld modf(double4, double4 *);
+double8 __ovld modf(double8, double8 *);
+double16 __ovld modf(double16, double16 *);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld modf(half x, half *iptr);
-half2 __ovld modf(half2 x, half2 *iptr);
-half3 __ovld modf(half3 x, half3 *iptr);
-half4 __ovld modf(half4 x, half4 *iptr);
-half8 __ovld modf(half8 x, half8 *iptr);
-half16 __ovld modf(half16 x, half16 *iptr);
+half __ovld modf(half, half *);
+half2 __ovld modf(half2, half2 *);
+half3 __ovld modf(half3, half3 *);
+half4 __ovld modf(half4, half4 *);
+half8 __ovld modf(half8, half8 *);
+half16 __ovld modf(half16, half16 *);
#endif //cl_khr_fp16
#endif //defined(__opencl_c_generic_address_space)
#if defined(__opencl_c_named_address_space_builtins)
-float __ovld modf(float x, __global float *iptr);
-float2 __ovld modf(float2 x, __global float2 *iptr);
-float3 __ovld modf(float3 x, __global float3 *iptr);
-float4 __ovld modf(float4 x, __global float4 *iptr);
-float8 __ovld modf(float8 x, __global float8 *iptr);
-float16 __ovld modf(float16 x, __global float16 *iptr);
-float __ovld modf(float x, __local float *iptr);
-float2 __ovld modf(float2 x, __local float2 *iptr);
-float3 __ovld modf(float3 x, __local float3 *iptr);
-float4 __ovld modf(float4 x, __local float4 *iptr);
-float8 __ovld modf(float8 x, __local float8 *iptr);
-float16 __ovld modf(float16 x, __local float16 *iptr);
-float __ovld modf(float x, __private float *iptr);
-float2 __ovld modf(float2 x, __private float2 *iptr);
-float3 __ovld modf(float3 x, __private float3 *iptr);
-float4 __ovld modf(float4 x, __private float4 *iptr);
-float8 __ovld modf(float8 x, __private float8 *iptr);
-float16 __ovld modf(float16 x, __private float16 *iptr);
+float __ovld modf(float, __global float *);
+float2 __ovld modf(float2, __global float2 *);
+float3 __ovld modf(float3, __global float3 *);
+float4 __ovld modf(float4, __global float4 *);
+float8 __ovld modf(float8, __global float8 *);
+float16 __ovld modf(float16, __global float16 *);
+float __ovld modf(float, __local float *);
+float2 __ovld modf(float2, __local float2 *);
+float3 __ovld modf(float3, __local float3 *);
+float4 __ovld modf(float4, __local float4 *);
+float8 __ovld modf(float8, __local float8 *);
+float16 __ovld modf(float16, __local float16 *);
+float __ovld modf(float, __private float *);
+float2 __ovld modf(float2, __private float2 *);
+float3 __ovld modf(float3, __private float3 *);
+float4 __ovld modf(float4, __private float4 *);
+float8 __ovld modf(float8, __private float8 *);
+float16 __ovld modf(float16, __private float16 *);
#ifdef cl_khr_fp64
-double __ovld modf(double x, __global double *iptr);
-double2 __ovld modf(double2 x, __global double2 *iptr);
-double3 __ovld modf(double3 x, __global double3 *iptr);
-double4 __ovld modf(double4 x, __global double4 *iptr);
-double8 __ovld modf(double8 x, __global double8 *iptr);
-double16 __ovld modf(double16 x, __global double16 *iptr);
-double __ovld modf(double x, __local double *iptr);
-double2 __ovld modf(double2 x, __local double2 *iptr);
-double3 __ovld modf(double3 x, __local double3 *iptr);
-double4 __ovld modf(double4 x, __local double4 *iptr);
-double8 __ovld modf(double8 x, __local double8 *iptr);
-double16 __ovld modf(double16 x, __local double16 *iptr);
-double __ovld modf(double x, __private double *iptr);
-double2 __ovld modf(double2 x, __private double2 *iptr);
-double3 __ovld modf(double3 x, __private double3 *iptr);
-double4 __ovld modf(double4 x, __private double4 *iptr);
-double8 __ovld modf(double8 x, __private double8 *iptr);
-double16 __ovld modf(double16 x, __private double16 *iptr);
+double __ovld modf(double, __global double *);
+double2 __ovld modf(double2, __global double2 *);
+double3 __ovld modf(double3, __global double3 *);
+double4 __ovld modf(double4, __global double4 *);
+double8 __ovld modf(double8, __global double8 *);
+double16 __ovld modf(double16, __global double16 *);
+double __ovld modf(double, __local double *);
+double2 __ovld modf(double2, __local double2 *);
+double3 __ovld modf(double3, __local double3 *);
+double4 __ovld modf(double4, __local double4 *);
+double8 __ovld modf(double8, __local double8 *);
+double16 __ovld modf(double16, __local double16 *);
+double __ovld modf(double, __private double *);
+double2 __ovld modf(double2, __private double2 *);
+double3 __ovld modf(double3, __private double3 *);
+double4 __ovld modf(double4, __private double4 *);
+double8 __ovld modf(double8, __private double8 *);
+double16 __ovld modf(double16, __private double16 *);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld modf(half x, __global half *iptr);
-half2 __ovld modf(half2 x, __global half2 *iptr);
-half3 __ovld modf(half3 x, __global half3 *iptr);
-half4 __ovld modf(half4 x, __global half4 *iptr);
-half8 __ovld modf(half8 x, __global half8 *iptr);
-half16 __ovld modf(half16 x, __global half16 *iptr);
-half __ovld modf(half x, __local half *iptr);
-half2 __ovld modf(half2 x, __local half2 *iptr);
-half3 __ovld modf(half3 x, __local half3 *iptr);
-half4 __ovld modf(half4 x, __local half4 *iptr);
-half8 __ovld modf(half8 x, __local half8 *iptr);
-half16 __ovld modf(half16 x, __local half16 *iptr);
-half __ovld modf(half x, __private half *iptr);
-half2 __ovld modf(half2 x, __private half2 *iptr);
-half3 __ovld modf(half3 x, __private half3 *iptr);
-half4 __ovld modf(half4 x, __private half4 *iptr);
-half8 __ovld modf(half8 x, __private half8 *iptr);
-half16 __ovld modf(half16 x, __private half16 *iptr);
+half __ovld modf(half, __global half *);
+half2 __ovld modf(half2, __global half2 *);
+half3 __ovld modf(half3, __global half3 *);
+half4 __ovld modf(half4, __global half4 *);
+half8 __ovld modf(half8, __global half8 *);
+half16 __ovld modf(half16, __global half16 *);
+half __ovld modf(half, __local half *);
+half2 __ovld modf(half2, __local half2 *);
+half3 __ovld modf(half3, __local half3 *);
+half4 __ovld modf(half4, __local half4 *);
+half8 __ovld modf(half8, __local half8 *);
+half16 __ovld modf(half16, __local half16 *);
+half __ovld modf(half, __private half *);
+half2 __ovld modf(half2, __private half2 *);
+half3 __ovld modf(half3, __private half3 *);
+half4 __ovld modf(half4, __private half4 *);
+half8 __ovld modf(half8, __private half8 *);
+half16 __ovld modf(half16, __private half16 *);
#endif //cl_khr_fp16
#endif //defined(__opencl_c_named_address_space_builtins)
@@ -7961,27 +7961,27 @@ half16 __ovld modf(half16 x, __private half16 *iptr);
* Returns a quiet NaN. The nancode may be placed
* in the significand of the resulting NaN.
*/
-float __ovld __cnfn nan(uint nancode);
-float2 __ovld __cnfn nan(uint2 nancode);
-float3 __ovld __cnfn nan(uint3 nancode);
-float4 __ovld __cnfn nan(uint4 nancode);
-float8 __ovld __cnfn nan(uint8 nancode);
-float16 __ovld __cnfn nan(uint16 nancode);
+float __ovld __cnfn nan(uint);
+float2 __ovld __cnfn nan(uint2);
+float3 __ovld __cnfn nan(uint3);
+float4 __ovld __cnfn nan(uint4);
+float8 __ovld __cnfn nan(uint8);
+float16 __ovld __cnfn nan(uint16);
#ifdef cl_khr_fp64
-double __ovld __cnfn nan(ulong nancode);
-double2 __ovld __cnfn nan(ulong2 nancode);
-double3 __ovld __cnfn nan(ulong3 nancode);
-double4 __ovld __cnfn nan(ulong4 nancode);
-double8 __ovld __cnfn nan(ulong8 nancode);
-double16 __ovld __cnfn nan(ulong16 nancode);
+double __ovld __cnfn nan(ulong);
+double2 __ovld __cnfn nan(ulong2);
+double3 __ovld __cnfn nan(ulong3);
+double4 __ovld __cnfn nan(ulong4);
+double8 __ovld __cnfn nan(ulong8);
+double16 __ovld __cnfn nan(ulong16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn nan(ushort nancode);
-half2 __ovld __cnfn nan(ushort2 nancode);
-half3 __ovld __cnfn nan(ushort3 nancode);
-half4 __ovld __cnfn nan(ushort4 nancode);
-half8 __ovld __cnfn nan(ushort8 nancode);
-half16 __ovld __cnfn nan(ushort16 nancode);
+half __ovld __cnfn nan(ushort);
+half2 __ovld __cnfn nan(ushort2);
+half3 __ovld __cnfn nan(ushort3);
+half4 __ovld __cnfn nan(ushort4);
+half8 __ovld __cnfn nan(ushort8);
+half16 __ovld __cnfn nan(ushort16);
#endif //cl_khr_fp16
/**
@@ -7991,105 +7991,105 @@ half16 __ovld __cnfn nan(ushort16 nancode);
* largest representable floating-point number less
* than x.
*/
-float __ovld __cnfn nextafter(float x, float y);
-float2 __ovld __cnfn nextafter(float2 x, float2 y);
-float3 __ovld __cnfn nextafter(float3 x, float3 y);
-float4 __ovld __cnfn nextafter(float4 x, float4 y);
-float8 __ovld __cnfn nextafter(float8 x, float8 y);
-float16 __ovld __cnfn nextafter(float16 x, float16 y);
+float __ovld __cnfn nextafter(float, float);
+float2 __ovld __cnfn nextafter(float2, float2);
+float3 __ovld __cnfn nextafter(float3, float3);
+float4 __ovld __cnfn nextafter(float4, float4);
+float8 __ovld __cnfn nextafter(float8, float8);
+float16 __ovld __cnfn nextafter(float16, float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn nextafter(double x, double y);
-double2 __ovld __cnfn nextafter(double2 x, double2 y);
-double3 __ovld __cnfn nextafter(double3 x, double3 y);
-double4 __ovld __cnfn nextafter(double4 x, double4 y);
-double8 __ovld __cnfn nextafter(double8 x, double8 y);
-double16 __ovld __cnfn nextafter(double16 x, double16 y);
+double __ovld __cnfn nextafter(double, double);
+double2 __ovld __cnfn nextafter(double2, double2);
+double3 __ovld __cnfn nextafter(double3, double3);
+double4 __ovld __cnfn nextafter(double4, double4);
+double8 __ovld __cnfn nextafter(double8, double8);
+double16 __ovld __cnfn nextafter(double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn nextafter(half x, half y);
-half2 __ovld __cnfn nextafter(half2 x, half2 y);
-half3 __ovld __cnfn nextafter(half3 x, half3 y);
-half4 __ovld __cnfn nextafter(half4 x, half4 y);
-half8 __ovld __cnfn nextafter(half8 x, half8 y);
-half16 __ovld __cnfn nextafter(half16 x, half16 y);
+half __ovld __cnfn nextafter(half, half);
+half2 __ovld __cnfn nextafter(half2, half2);
+half3 __ovld __cnfn nextafter(half3, half3);
+half4 __ovld __cnfn nextafter(half4, half4);
+half8 __ovld __cnfn nextafter(half8, half8);
+half16 __ovld __cnfn nextafter(half16, half16);
#endif //cl_khr_fp16
/**
* Compute x to the power y.
*/
-float __ovld __cnfn pow(float x, float y);
-float2 __ovld __cnfn pow(float2 x, float2 y);
-float3 __ovld __cnfn pow(float3 x, float3 y);
-float4 __ovld __cnfn pow(float4 x, float4 y);
-float8 __ovld __cnfn pow(float8 x, float8 y);
-float16 __ovld __cnfn pow(float16 x, float16 y);
+float __ovld __cnfn pow(float, float);
+float2 __ovld __cnfn pow(float2, float2);
+float3 __ovld __cnfn pow(float3, float3);
+float4 __ovld __cnfn pow(float4, float4);
+float8 __ovld __cnfn pow(float8, float8);
+float16 __ovld __cnfn pow(float16, float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn pow(double x, double y);
-double2 __ovld __cnfn pow(double2 x, double2 y);
-double3 __ovld __cnfn pow(double3 x, double3 y);
-double4 __ovld __cnfn pow(double4 x, double4 y);
-double8 __ovld __cnfn pow(double8 x, double8 y);
-double16 __ovld __cnfn pow(double16 x, double16 y);
+double __ovld __cnfn pow(double, double);
+double2 __ovld __cnfn pow(double2, double2);
+double3 __ovld __cnfn pow(double3, double3);
+double4 __ovld __cnfn pow(double4, double4);
+double8 __ovld __cnfn pow(double8, double8);
+double16 __ovld __cnfn pow(double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn pow(half x, half y);
-half2 __ovld __cnfn pow(half2 x, half2 y);
-half3 __ovld __cnfn pow(half3 x, half3 y);
-half4 __ovld __cnfn pow(half4 x, half4 y);
-half8 __ovld __cnfn pow(half8 x, half8 y);
-half16 __ovld __cnfn pow(half16 x, half16 y);
+half __ovld __cnfn pow(half, half);
+half2 __ovld __cnfn pow(half2, half2);
+half3 __ovld __cnfn pow(half3, half3);
+half4 __ovld __cnfn pow(half4, half4);
+half8 __ovld __cnfn pow(half8, half8);
+half16 __ovld __cnfn pow(half16, half16);
#endif //cl_khr_fp16
/**
* Compute x to the power y, where y is an integer.
*/
-float __ovld __cnfn pown(float x, int y);
-float2 __ovld __cnfn pown(float2 x, int2 y);
-float3 __ovld __cnfn pown(float3 x, int3 y);
-float4 __ovld __cnfn pown(float4 x, int4 y);
-float8 __ovld __cnfn pown(float8 x, int8 y);
-float16 __ovld __cnfn pown(float16 x, int16 y);
+float __ovld __cnfn pown(float, int);
+float2 __ovld __cnfn pown(float2, int2);
+float3 __ovld __cnfn pown(float3, int3);
+float4 __ovld __cnfn pown(float4, int4);
+float8 __ovld __cnfn pown(float8, int8);
+float16 __ovld __cnfn pown(float16, int16);
#ifdef cl_khr_fp64
-double __ovld __cnfn pown(double x, int y);
-double2 __ovld __cnfn pown(double2 x, int2 y);
-double3 __ovld __cnfn pown(double3 x, int3 y);
-double4 __ovld __cnfn pown(double4 x, int4 y);
-double8 __ovld __cnfn pown(double8 x, int8 y);
-double16 __ovld __cnfn pown(double16 x, int16 y);
+double __ovld __cnfn pown(double, int);
+double2 __ovld __cnfn pown(double2, int2);
+double3 __ovld __cnfn pown(double3, int3);
+double4 __ovld __cnfn pown(double4, int4);
+double8 __ovld __cnfn pown(double8, int8);
+double16 __ovld __cnfn pown(double16, int16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn pown(half x, int y);
-half2 __ovld __cnfn pown(half2 x, int2 y);
-half3 __ovld __cnfn pown(half3 x, int3 y);
-half4 __ovld __cnfn pown(half4 x, int4 y);
-half8 __ovld __cnfn pown(half8 x, int8 y);
-half16 __ovld __cnfn pown(half16 x, int16 y);
+half __ovld __cnfn pown(half, int);
+half2 __ovld __cnfn pown(half2, int2);
+half3 __ovld __cnfn pown(half3, int3);
+half4 __ovld __cnfn pown(half4, int4);
+half8 __ovld __cnfn pown(half8, int8);
+half16 __ovld __cnfn pown(half16, int16);
#endif //cl_khr_fp16
/**
* Compute x to the power y, where x is >= 0.
*/
-float __ovld __cnfn powr(float x, float y);
-float2 __ovld __cnfn powr(float2 x, float2 y);
-float3 __ovld __cnfn powr(float3 x, float3 y);
-float4 __ovld __cnfn powr(float4 x, float4 y);
-float8 __ovld __cnfn powr(float8 x, float8 y);
-float16 __ovld __cnfn powr(float16 x, float16 y);
+float __ovld __cnfn powr(float, float);
+float2 __ovld __cnfn powr(float2, float2);
+float3 __ovld __cnfn powr(float3, float3);
+float4 __ovld __cnfn powr(float4, float4);
+float8 __ovld __cnfn powr(float8, float8);
+float16 __ovld __cnfn powr(float16, float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn powr(double x, double y);
-double2 __ovld __cnfn powr(double2 x, double2 y);
-double3 __ovld __cnfn powr(double3 x, double3 y);
-double4 __ovld __cnfn powr(double4 x, double4 y);
-double8 __ovld __cnfn powr(double8 x, double8 y);
-double16 __ovld __cnfn powr(double16 x, double16 y);
+double __ovld __cnfn powr(double, double);
+double2 __ovld __cnfn powr(double2, double2);
+double3 __ovld __cnfn powr(double3, double3);
+double4 __ovld __cnfn powr(double4, double4);
+double8 __ovld __cnfn powr(double8, double8);
+double16 __ovld __cnfn powr(double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn powr(half x, half y);
-half2 __ovld __cnfn powr(half2 x, half2 y);
-half3 __ovld __cnfn powr(half3 x, half3 y);
-half4 __ovld __cnfn powr(half4 x, half4 y);
-half8 __ovld __cnfn powr(half8 x, half8 y);
-half16 __ovld __cnfn powr(half16 x, half16 y);
+half __ovld __cnfn powr(half, half);
+half2 __ovld __cnfn powr(half2, half2);
+half3 __ovld __cnfn powr(half3, half3);
+half4 __ovld __cnfn powr(half4, half4);
+half8 __ovld __cnfn powr(half8, half8);
+half16 __ovld __cnfn powr(half16, half16);
#endif //cl_khr_fp16
/**
@@ -8098,27 +8098,27 @@ half16 __ovld __cnfn powr(half16 x, half16 y);
* are two integers closest to x/y, n shall be the even
* one. If r is zero, it is given the same sign as x.
*/
-float __ovld __cnfn remainder(float x, float y);
-float2 __ovld __cnfn remainder(float2 x, float2 y);
-float3 __ovld __cnfn remainder(float3 x, float3 y);
-float4 __ovld __cnfn remainder(float4 x, float4 y);
-float8 __ovld __cnfn remainder(float8 x, float8 y);
-float16 __ovld __cnfn remainder(float16 x, float16 y);
+float __ovld __cnfn remainder(float, float);
+float2 __ovld __cnfn remainder(float2, float2);
+float3 __ovld __cnfn remainder(float3, float3);
+float4 __ovld __cnfn remainder(float4, float4);
+float8 __ovld __cnfn remainder(float8, float8);
+float16 __ovld __cnfn remainder(float16, float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn remainder(double x, double y);
-double2 __ovld __cnfn remainder(double2 x, double2 y);
-double3 __ovld __cnfn remainder(double3 x, double3 y);
-double4 __ovld __cnfn remainder(double4 x, double4 y);
-double8 __ovld __cnfn remainder(double8 x, double8 y);
-double16 __ovld __cnfn remainder(double16 x, double16 y);
+double __ovld __cnfn remainder(double, double);
+double2 __ovld __cnfn remainder(double2, double2);
+double3 __ovld __cnfn remainder(double3, double3);
+double4 __ovld __cnfn remainder(double4, double4);
+double8 __ovld __cnfn remainder(double8, double8);
+double16 __ovld __cnfn remainder(double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn remainder(half x, half y);
-half2 __ovld __cnfn remainder(half2 x, half2 y);
-half3 __ovld __cnfn remainder(half3 x, half3 y);
-half4 __ovld __cnfn remainder(half4 x, half4 y);
-half8 __ovld __cnfn remainder(half8 x, half8 y);
-half16 __ovld __cnfn remainder(half16 x, half16 y);
+half __ovld __cnfn remainder(half, half);
+half2 __ovld __cnfn remainder(half2, half2);
+half3 __ovld __cnfn remainder(half3, half3);
+half4 __ovld __cnfn remainder(half4, half4);
+half8 __ovld __cnfn remainder(half8, half8);
+half16 __ovld __cnfn remainder(half16, half16);
#endif //cl_khr_fp16
/**
@@ -8134,88 +8134,88 @@ half16 __ovld __cnfn remainder(half16 x, half16 y);
* pointed to by quo.
*/
#if defined(__opencl_c_generic_address_space)
-float __ovld remquo(float x, float y, int *quo);
-float2 __ovld remquo(float2 x, float2 y, int2 *quo);
-float3 __ovld remquo(float3 x, float3 y, int3 *quo);
-float4 __ovld remquo(float4 x, float4 y, int4 *quo);
-float8 __ovld remquo(float8 x, float8 y, int8 *quo);
-float16 __ovld remquo(float16 x, float16 y, int16 *quo);
+float __ovld remquo(float, float, int *);
+float2 __ovld remquo(float2, float2, int2 *);
+float3 __ovld remquo(float3, float3, int3 *);
+float4 __ovld remquo(float4, float4, int4 *);
+float8 __ovld remquo(float8, float8, int8 *);
+float16 __ovld remquo(float16, float16, int16 *);
#ifdef cl_khr_fp64
-double __ovld remquo(double x, double y, int *quo);
-double2 __ovld remquo(double2 x, double2 y, int2 *quo);
-double3 __ovld remquo(double3 x, double3 y, int3 *quo);
-double4 __ovld remquo(double4 x, double4 y, int4 *quo);
-double8 __ovld remquo(double8 x, double8 y, int8 *quo);
-double16 __ovld remquo(double16 x, double16 y, int16 *quo);
+double __ovld remquo(double, double, int *);
+double2 __ovld remquo(double2, double2, int2 *);
+double3 __ovld remquo(double3, double3, int3 *);
+double4 __ovld remquo(double4, double4, int4 *);
+double8 __ovld remquo(double8, double8, int8 *);
+double16 __ovld remquo(double16, double16, int16 *);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld remquo(half x, half y, int *quo);
-half2 __ovld remquo(half2 x, half2 y, int2 *quo);
-half3 __ovld remquo(half3 x, half3 y, int3 *quo);
-half4 __ovld remquo(half4 x, half4 y, int4 *quo);
-half8 __ovld remquo(half8 x, half8 y, int8 *quo);
-half16 __ovld remquo(half16 x, half16 y, int16 *quo);
+half __ovld remquo(half, half, int *);
+half2 __ovld remquo(half2, half2, int2 *);
+half3 __ovld remquo(half3, half3, int3 *);
+half4 __ovld remquo(half4, half4, int4 *);
+half8 __ovld remquo(half8, half8, int8 *);
+half16 __ovld remquo(half16, half16, int16 *);
#endif //cl_khr_fp16
#endif //defined(__opencl_c_generic_address_space)
#if defined(__opencl_c_named_address_space_builtins)
-float __ovld remquo(float x, float y, __global int *quo);
-float2 __ovld remquo(float2 x, float2 y, __global int2 *quo);
-float3 __ovld remquo(float3 x, float3 y, __global int3 *quo);
-float4 __ovld remquo(float4 x, float4 y, __global int4 *quo);
-float8 __ovld remquo(float8 x, float8 y, __global int8 *quo);
-float16 __ovld remquo(float16 x, float16 y, __global int16 *quo);
-float __ovld remquo(float x, float y, __local int *quo);
-float2 __ovld remquo(float2 x, float2 y, __local int2 *quo);
-float3 __ovld remquo(float3 x, float3 y, __local int3 *quo);
-float4 __ovld remquo(float4 x, float4 y, __local int4 *quo);
-float8 __ovld remquo(float8 x, float8 y, __local int8 *quo);
-float16 __ovld remquo(float16 x, float16 y, __local int16 *quo);
-float __ovld remquo(float x, float y, __private int *quo);
-float2 __ovld remquo(float2 x, float2 y, __private int2 *quo);
-float3 __ovld remquo(float3 x, float3 y, __private int3 *quo);
-float4 __ovld remquo(float4 x, float4 y, __private int4 *quo);
-float8 __ovld remquo(float8 x, float8 y, __private int8 *quo);
-float16 __ovld remquo(float16 x, float16 y, __private int16 *quo);
+float __ovld remquo(float, float, __global int *);
+float2 __ovld remquo(float2, float2, __global int2 *);
+float3 __ovld remquo(float3, float3, __global int3 *);
+float4 __ovld remquo(float4, float4, __global int4 *);
+float8 __ovld remquo(float8, float8, __global int8 *);
+float16 __ovld remquo(float16, float16, __global int16 *);
+float __ovld remquo(float, float, __local int *);
+float2 __ovld remquo(float2, float2, __local int2 *);
+float3 __ovld remquo(float3, float3, __local int3 *);
+float4 __ovld remquo(float4, float4, __local int4 *);
+float8 __ovld remquo(float8, float8, __local int8 *);
+float16 __ovld remquo(float16, float16, __local int16 *);
+float __ovld remquo(float, float, __private int *);
+float2 __ovld remquo(float2, float2, __private int2 *);
+float3 __ovld remquo(float3, float3, __private int3 *);
+float4 __ovld remquo(float4, float4, __private int4 *);
+float8 __ovld remquo(float8, float8, __private int8 *);
+float16 __ovld remquo(float16, float16, __private int16 *);
#ifdef cl_khr_fp64
-double __ovld remquo(double x, double y, __global int *quo);
-double2 __ovld remquo(double2 x, double2 y, __global int2 *quo);
-double3 __ovld remquo(double3 x, double3 y, __global int3 *quo);
-double4 __ovld remquo(double4 x, double4 y, __global int4 *quo);
-double8 __ovld remquo(double8 x, double8 y, __global int8 *quo);
-double16 __ovld remquo(double16 x, double16 y, __global int16 *quo);
-double __ovld remquo(double x, double y, __local int *quo);
-double2 __ovld remquo(double2 x, double2 y, __local int2 *quo);
-double3 __ovld remquo(double3 x, double3 y, __local int3 *quo);
-double4 __ovld remquo(double4 x, double4 y, __local int4 *quo);
-double8 __ovld remquo(double8 x, double8 y, __local int8 *quo);
-double16 __ovld remquo(double16 x, double16 y, __local int16 *quo);
-double __ovld remquo(double x, double y, __private int *quo);
-double2 __ovld remquo(double2 x, double2 y, __private int2 *quo);
-double3 __ovld remquo(double3 x, double3 y, __private int3 *quo);
-double4 __ovld remquo(double4 x, double4 y, __private int4 *quo);
-double8 __ovld remquo(double8 x, double8 y, __private int8 *quo);
-double16 __ovld remquo(double16 x, double16 y, __private int16 *quo);
+double __ovld remquo(double, double, __global int *);
+double2 __ovld remquo(double2, double2, __global int2 *);
+double3 __ovld remquo(double3, double3, __global int3 *);
+double4 __ovld remquo(double4, double4, __global int4 *);
+double8 __ovld remquo(double8, double8, __global int8 *);
+double16 __ovld remquo(double16, double16, __global int16 *);
+double __ovld remquo(double, double, __local int *);
+double2 __ovld remquo(double2, double2, __local int2 *);
+double3 __ovld remquo(double3, double3, __local int3 *);
+double4 __ovld remquo(double4, double4, __local int4 *);
+double8 __ovld remquo(double8, double8, __local int8 *);
+double16 __ovld remquo(double16, double16, __local int16 *);
+double __ovld remquo(double, double, __private int *);
+double2 __ovld remquo(double2, double2, __private int2 *);
+double3 __ovld remquo(double3, double3, __private int3 *);
+double4 __ovld remquo(double4, double4, __private int4 *);
+double8 __ovld remquo(double8, double8, __private int8 *);
+double16 __ovld remquo(double16, double16, __private int16 *);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld remquo(half x, half y, __global int *quo);
-half2 __ovld remquo(half2 x, half2 y, __global int2 *quo);
-half3 __ovld remquo(half3 x, half3 y, __global int3 *quo);
-half4 __ovld remquo(half4 x, half4 y, __global int4 *quo);
-half8 __ovld remquo(half8 x, half8 y, __global int8 *quo);
-half16 __ovld remquo(half16 x, half16 y, __global int16 *quo);
-half __ovld remquo(half x, half y, __local int *quo);
-half2 __ovld remquo(half2 x, half2 y, __local int2 *quo);
-half3 __ovld remquo(half3 x, half3 y, __local int3 *quo);
-half4 __ovld remquo(half4 x, half4 y, __local int4 *quo);
-half8 __ovld remquo(half8 x, half8 y, __local int8 *quo);
-half16 __ovld remquo(half16 x, half16 y, __local int16 *quo);
-half __ovld remquo(half x, half y, __private int *quo);
-half2 __ovld remquo(half2 x, half2 y, __private int2 *quo);
-half3 __ovld remquo(half3 x, half3 y, __private int3 *quo);
-half4 __ovld remquo(half4 x, half4 y, __private int4 *quo);
-half8 __ovld remquo(half8 x, half8 y, __private int8 *quo);
-half16 __ovld remquo(half16 x, half16 y, __private int16 *quo);
+half __ovld remquo(half, half, __global int *);
+half2 __ovld remquo(half2, half2, __global int2 *);
+half3 __ovld remquo(half3, half3, __global int3 *);
+half4 __ovld remquo(half4, half4, __global int4 *);
+half8 __ovld remquo(half8, half8, __global int8 *);
+half16 __ovld remquo(half16, half16, __global int16 *);
+half __ovld remquo(half, half, __local int *);
+half2 __ovld remquo(half2, half2, __local int2 *);
+half3 __ovld remquo(half3, half3, __local int3 *);
+half4 __ovld remquo(half4, half4, __local int4 *);
+half8 __ovld remquo(half8, half8, __local int8 *);
+half16 __ovld remquo(half16, half16, __local int16 *);
+half __ovld remquo(half, half, __private int *);
+half2 __ovld remquo(half2, half2, __private int2 *);
+half3 __ovld remquo(half3, half3, __private int3 *);
+half4 __ovld remquo(half4, half4, __private int4 *);
+half8 __ovld remquo(half8, half8, __private int8 *);
+half16 __ovld remquo(half16, half16, __private int16 *);
#endif //cl_khr_fp16
#endif //defined(__opencl_c_named_address_space_builtins)
/**
@@ -8250,27 +8250,27 @@ half16 __ovld __cnfn rint(half16);
/**
* Compute x to the power 1/y.
*/
-float __ovld __cnfn rootn(float x, int y);
-float2 __ovld __cnfn rootn(float2 x, int2 y);
-float3 __ovld __cnfn rootn(float3 x, int3 y);
-float4 __ovld __cnfn rootn(float4 x, int4 y);
-float8 __ovld __cnfn rootn(float8 x, int8 y);
-float16 __ovld __cnfn rootn(float16 x, int16 y);
+float __ovld __cnfn rootn(float, int);
+float2 __ovld __cnfn rootn(float2, int2);
+float3 __ovld __cnfn rootn(float3, int3);
+float4 __ovld __cnfn rootn(float4, int4);
+float8 __ovld __cnfn rootn(float8, int8);
+float16 __ovld __cnfn rootn(float16, int16);
#ifdef cl_khr_fp64
-double __ovld __cnfn rootn(double x, int y);
-double2 __ovld __cnfn rootn(double2 x, int2 y);
-double3 __ovld __cnfn rootn(double3 x, int3 y);
-double4 __ovld __cnfn rootn(double4 x, int4 y);
-double8 __ovld __cnfn rootn(double8 x, int8 y);
-double16 __ovld __cnfn rootn(double16 x, int16 y);
+double __ovld __cnfn rootn(double, int);
+double2 __ovld __cnfn rootn(double2, int2);
+double3 __ovld __cnfn rootn(double3, int3);
+double4 __ovld __cnfn rootn(double4, int4);
+double8 __ovld __cnfn rootn(double8, int8);
+double16 __ovld __cnfn rootn(double16, int16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn rootn(half x, int y);
-half2 __ovld __cnfn rootn(half2 x, int2 y);
-half3 __ovld __cnfn rootn(half3 x, int3 y);
-half4 __ovld __cnfn rootn(half4 x, int4 y);
-half8 __ovld __cnfn rootn(half8 x, int8 y);
-half16 __ovld __cnfn rootn(half16 x, int16 y);
+half __ovld __cnfn rootn(half, int);
+half2 __ovld __cnfn rootn(half2, int2);
+half3 __ovld __cnfn rootn(half3, int3);
+half4 __ovld __cnfn rootn(half4, int4);
+half8 __ovld __cnfn rootn(half8, int8);
+half16 __ovld __cnfn rootn(half16, int16);
#endif //cl_khr_fp16
/**
@@ -8278,27 +8278,27 @@ half16 __ovld __cnfn rootn(half16 x, int16 y);
* halfway cases away from zero, regardless of the
* current rounding direction.
*/
-float __ovld __cnfn round(float x);
-float2 __ovld __cnfn round(float2 x);
-float3 __ovld __cnfn round(float3 x);
-float4 __ovld __cnfn round(float4 x);
-float8 __ovld __cnfn round(float8 x);
-float16 __ovld __cnfn round(float16 x);
+float __ovld __cnfn round(float);
+float2 __ovld __cnfn round(float2);
+float3 __ovld __cnfn round(float3);
+float4 __ovld __cnfn round(float4);
+float8 __ovld __cnfn round(float8);
+float16 __ovld __cnfn round(float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn round(double x);
-double2 __ovld __cnfn round(double2 x);
-double3 __ovld __cnfn round(double3 x);
-double4 __ovld __cnfn round(double4 x);
-double8 __ovld __cnfn round(double8 x);
-double16 __ovld __cnfn round(double16 x);
+double __ovld __cnfn round(double);
+double2 __ovld __cnfn round(double2);
+double3 __ovld __cnfn round(double3);
+double4 __ovld __cnfn round(double4);
+double8 __ovld __cnfn round(double8);
+double16 __ovld __cnfn round(double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn round(half x);
-half2 __ovld __cnfn round(half2 x);
-half3 __ovld __cnfn round(half3 x);
-half4 __ovld __cnfn round(half4 x);
-half8 __ovld __cnfn round(half8 x);
-half16 __ovld __cnfn round(half16 x);
+half __ovld __cnfn round(half);
+half2 __ovld __cnfn round(half2);
+half3 __ovld __cnfn round(half3);
+half4 __ovld __cnfn round(half4);
+half8 __ovld __cnfn round(half8);
+half16 __ovld __cnfn round(half16);
#endif //cl_khr_fp16
/**
@@ -8359,88 +8359,88 @@ half16 __ovld __cnfn sin(half16);
* in cosval.
*/
#if defined(__opencl_c_generic_address_space)
-float __ovld sincos(float x, float *cosval);
-float2 __ovld sincos(float2 x, float2 *cosval);
-float3 __ovld sincos(float3 x, float3 *cosval);
-float4 __ovld sincos(float4 x, float4 *cosval);
-float8 __ovld sincos(float8 x, float8 *cosval);
-float16 __ovld sincos(float16 x, float16 *cosval);
+float __ovld sincos(float, float *);
+float2 __ovld sincos(float2, float2 *);
+float3 __ovld sincos(float3, float3 *);
+float4 __ovld sincos(float4, float4 *);
+float8 __ovld sincos(float8, float8 *);
+float16 __ovld sincos(float16, float16 *);
#ifdef cl_khr_fp64
-double __ovld sincos(double x, double *cosval);
-double2 __ovld sincos(double2 x, double2 *cosval);
-double3 __ovld sincos(double3 x, double3 *cosval);
-double4 __ovld sincos(double4 x, double4 *cosval);
-double8 __ovld sincos(double8 x, double8 *cosval);
-double16 __ovld sincos(double16 x, double16 *cosval);
+double __ovld sincos(double, double *);
+double2 __ovld sincos(double2, double2 *);
+double3 __ovld sincos(double3, double3 *);
+double4 __ovld sincos(double4, double4 *);
+double8 __ovld sincos(double8, double8 *);
+double16 __ovld sincos(double16, double16 *);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld sincos(half x, half *cosval);
-half2 __ovld sincos(half2 x, half2 *cosval);
-half3 __ovld sincos(half3 x, half3 *cosval);
-half4 __ovld sincos(half4 x, half4 *cosval);
-half8 __ovld sincos(half8 x, half8 *cosval);
-half16 __ovld sincos(half16 x, half16 *cosval);
+half __ovld sincos(half, half *);
+half2 __ovld sincos(half2, half2 *);
+half3 __ovld sincos(half3, half3 *);
+half4 __ovld sincos(half4, half4 *);
+half8 __ovld sincos(half8, half8 *);
+half16 __ovld sincos(half16, half16 *);
#endif //cl_khr_fp16
#endif //defined(__opencl_c_generic_address_space)
#if defined(__opencl_c_named_address_space_builtins)
-float __ovld sincos(float x, __global float *cosval);
-float2 __ovld sincos(float2 x, __global float2 *cosval);
-float3 __ovld sincos(float3 x, __global float3 *cosval);
-float4 __ovld sincos(float4 x, __global float4 *cosval);
-float8 __ovld sincos(float8 x, __global float8 *cosval);
-float16 __ovld sincos(float16 x, __global float16 *cosval);
-float __ovld sincos(float x, __local float *cosval);
-float2 __ovld sincos(float2 x, __local float2 *cosval);
-float3 __ovld sincos(float3 x, __local float3 *cosval);
-float4 __ovld sincos(float4 x, __local float4 *cosval);
-float8 __ovld sincos(float8 x, __local float8 *cosval);
-float16 __ovld sincos(float16 x, __local float16 *cosval);
-float __ovld sincos(float x, __private float *cosval);
-float2 __ovld sincos(float2 x, __private float2 *cosval);
-float3 __ovld sincos(float3 x, __private float3 *cosval);
-float4 __ovld sincos(float4 x, __private float4 *cosval);
-float8 __ovld sincos(float8 x, __private float8 *cosval);
-float16 __ovld sincos(float16 x, __private float16 *cosval);
+float __ovld sincos(float, __global float *);
+float2 __ovld sincos(float2, __global float2 *);
+float3 __ovld sincos(float3, __global float3 *);
+float4 __ovld sincos(float4, __global float4 *);
+float8 __ovld sincos(float8, __global float8 *);
+float16 __ovld sincos(float16, __global float16 *);
+float __ovld sincos(float, __local float *);
+float2 __ovld sincos(float2, __local float2 *);
+float3 __ovld sincos(float3, __local float3 *);
+float4 __ovld sincos(float4, __local float4 *);
+float8 __ovld sincos(float8, __local float8 *);
+float16 __ovld sincos(float16, __local float16 *);
+float __ovld sincos(float, __private float *);
+float2 __ovld sincos(float2, __private float2 *);
+float3 __ovld sincos(float3, __private float3 *);
+float4 __ovld sincos(float4, __private float4 *);
+float8 __ovld sincos(float8, __private float8 *);
+float16 __ovld sincos(float16, __private float16 *);
#ifdef cl_khr_fp64
-double __ovld sincos(double x, __global double *cosval);
-double2 __ovld sincos(double2 x, __global double2 *cosval);
-double3 __ovld sincos(double3 x, __global double3 *cosval);
-double4 __ovld sincos(double4 x, __global double4 *cosval);
-double8 __ovld sincos(double8 x, __global double8 *cosval);
-double16 __ovld sincos(double16 x, __global double16 *cosval);
-double __ovld sincos(double x, __local double *cosval);
-double2 __ovld sincos(double2 x, __local double2 *cosval);
-double3 __ovld sincos(double3 x, __local double3 *cosval);
-double4 __ovld sincos(double4 x, __local double4 *cosval);
-double8 __ovld sincos(double8 x, __local double8 *cosval);
-double16 __ovld sincos(double16 x, __local double16 *cosval);
-double __ovld sincos(double x, __private double *cosval);
-double2 __ovld sincos(double2 x, __private double2 *cosval);
-double3 __ovld sincos(double3 x, __private double3 *cosval);
-double4 __ovld sincos(double4 x, __private double4 *cosval);
-double8 __ovld sincos(double8 x, __private double8 *cosval);
-double16 __ovld sincos(double16 x, __private double16 *cosval);
+double __ovld sincos(double, __global double *);
+double2 __ovld sincos(double2, __global double2 *);
+double3 __ovld sincos(double3, __global double3 *);
+double4 __ovld sincos(double4, __global double4 *);
+double8 __ovld sincos(double8, __global double8 *);
+double16 __ovld sincos(double16, __global double16 *);
+double __ovld sincos(double, __local double *);
+double2 __ovld sincos(double2, __local double2 *);
+double3 __ovld sincos(double3, __local double3 *);
+double4 __ovld sincos(double4, __local double4 *);
+double8 __ovld sincos(double8, __local double8 *);
+double16 __ovld sincos(double16, __local double16 *);
+double __ovld sincos(double, __private double *);
+double2 __ovld sincos(double2, __private double2 *);
+double3 __ovld sincos(double3, __private double3 *);
+double4 __ovld sincos(double4, __private double4 *);
+double8 __ovld sincos(double8, __private double8 *);
+double16 __ovld sincos(double16, __private double16 *);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld sincos(half x, __global half *cosval);
-half2 __ovld sincos(half2 x, __global half2 *cosval);
-half3 __ovld sincos(half3 x, __global half3 *cosval);
-half4 __ovld sincos(half4 x, __global half4 *cosval);
-half8 __ovld sincos(half8 x, __global half8 *cosval);
-half16 __ovld sincos(half16 x, __global half16 *cosval);
-half __ovld sincos(half x, __local half *cosval);
-half2 __ovld sincos(half2 x, __local half2 *cosval);
-half3 __ovld sincos(half3 x, __local half3 *cosval);
-half4 __ovld sincos(half4 x, __local half4 *cosval);
-half8 __ovld sincos(half8 x, __local half8 *cosval);
-half16 __ovld sincos(half16 x, __local half16 *cosval);
-half __ovld sincos(half x, __private half *cosval);
-half2 __ovld sincos(half2 x, __private half2 *cosval);
-half3 __ovld sincos(half3 x, __private half3 *cosval);
-half4 __ovld sincos(half4 x, __private half4 *cosval);
-half8 __ovld sincos(half8 x, __private half8 *cosval);
-half16 __ovld sincos(half16 x, __private half16 *cosval);
+half __ovld sincos(half, __global half *);
+half2 __ovld sincos(half2, __global half2 *);
+half3 __ovld sincos(half3, __global half3 *);
+half4 __ovld sincos(half4, __global half4 *);
+half8 __ovld sincos(half8, __global half8 *);
+half16 __ovld sincos(half16, __global half16 *);
+half __ovld sincos(half, __local half *);
+half2 __ovld sincos(half2, __local half2 *);
+half3 __ovld sincos(half3, __local half3 *);
+half4 __ovld sincos(half4, __local half4 *);
+half8 __ovld sincos(half8, __local half8 *);
+half16 __ovld sincos(half16, __local half16 *);
+half __ovld sincos(half, __private half *);
+half2 __ovld sincos(half2, __private half2 *);
+half3 __ovld sincos(half3, __private half3 *);
+half4 __ovld sincos(half4, __private half4 *);
+half8 __ovld sincos(half8, __private half8 *);
+half16 __ovld sincos(half16, __private half16 *);
#endif //cl_khr_fp16
#endif //defined(__opencl_c_named_address_space_builtins)
@@ -8473,27 +8473,27 @@ half16 __ovld __cnfn sinh(half16);
/**
* Compute sin (PI * x).
*/
-float __ovld __cnfn sinpi(float x);
-float2 __ovld __cnfn sinpi(float2 x);
-float3 __ovld __cnfn sinpi(float3 x);
-float4 __ovld __cnfn sinpi(float4 x);
-float8 __ovld __cnfn sinpi(float8 x);
-float16 __ovld __cnfn sinpi(float16 x);
+float __ovld __cnfn sinpi(float);
+float2 __ovld __cnfn sinpi(float2);
+float3 __ovld __cnfn sinpi(float3);
+float4 __ovld __cnfn sinpi(float4);
+float8 __ovld __cnfn sinpi(float8);
+float16 __ovld __cnfn sinpi(float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn sinpi(double x);
-double2 __ovld __cnfn sinpi(double2 x);
-double3 __ovld __cnfn sinpi(double3 x);
-double4 __ovld __cnfn sinpi(double4 x);
-double8 __ovld __cnfn sinpi(double8 x);
-double16 __ovld __cnfn sinpi(double16 x);
+double __ovld __cnfn sinpi(double);
+double2 __ovld __cnfn sinpi(double2);
+double3 __ovld __cnfn sinpi(double3);
+double4 __ovld __cnfn sinpi(double4);
+double8 __ovld __cnfn sinpi(double8);
+double16 __ovld __cnfn sinpi(double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn sinpi(half x);
-half2 __ovld __cnfn sinpi(half2 x);
-half3 __ovld __cnfn sinpi(half3 x);
-half4 __ovld __cnfn sinpi(half4 x);
-half8 __ovld __cnfn sinpi(half8 x);
-half16 __ovld __cnfn sinpi(half16 x);
+half __ovld __cnfn sinpi(half);
+half2 __ovld __cnfn sinpi(half2);
+half3 __ovld __cnfn sinpi(half3);
+half4 __ovld __cnfn sinpi(half4);
+half8 __ovld __cnfn sinpi(half8);
+half16 __ovld __cnfn sinpi(half16);
#endif //cl_khr_fp16
/**
@@ -8577,27 +8577,27 @@ half16 __ovld __cnfn tanh(half16);
/**
* Compute tan (PI * x).
*/
-float __ovld __cnfn tanpi(float x);
-float2 __ovld __cnfn tanpi(float2 x);
-float3 __ovld __cnfn tanpi(float3 x);
-float4 __ovld __cnfn tanpi(float4 x);
-float8 __ovld __cnfn tanpi(float8 x);
-float16 __ovld __cnfn tanpi(float16 x);
+float __ovld __cnfn tanpi(float);
+float2 __ovld __cnfn tanpi(float2);
+float3 __ovld __cnfn tanpi(float3);
+float4 __ovld __cnfn tanpi(float4);
+float8 __ovld __cnfn tanpi(float8);
+float16 __ovld __cnfn tanpi(float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn tanpi(double x);
-double2 __ovld __cnfn tanpi(double2 x);
-double3 __ovld __cnfn tanpi(double3 x);
-double4 __ovld __cnfn tanpi(double4 x);
-double8 __ovld __cnfn tanpi(double8 x);
-double16 __ovld __cnfn tanpi(double16 x);
+double __ovld __cnfn tanpi(double);
+double2 __ovld __cnfn tanpi(double2);
+double3 __ovld __cnfn tanpi(double3);
+double4 __ovld __cnfn tanpi(double4);
+double8 __ovld __cnfn tanpi(double8);
+double16 __ovld __cnfn tanpi(double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn tanpi(half x);
-half2 __ovld __cnfn tanpi(half2 x);
-half3 __ovld __cnfn tanpi(half3 x);
-half4 __ovld __cnfn tanpi(half4 x);
-half8 __ovld __cnfn tanpi(half8 x);
-half16 __ovld __cnfn tanpi(half16 x);
+half __ovld __cnfn tanpi(half);
+half2 __ovld __cnfn tanpi(half2);
+half3 __ovld __cnfn tanpi(half3);
+half4 __ovld __cnfn tanpi(half4);
+half8 __ovld __cnfn tanpi(half8);
+half16 __ovld __cnfn tanpi(half16);
#endif //cl_khr_fp16
/**
@@ -8656,711 +8656,711 @@ half16 __ovld __cnfn trunc(half16);
/**
* Compute cosine. x must be in the range -2^16 ... +2^16.
*/
-float __ovld __cnfn half_cos(float x);
-float2 __ovld __cnfn half_cos(float2 x);
-float3 __ovld __cnfn half_cos(float3 x);
-float4 __ovld __cnfn half_cos(float4 x);
-float8 __ovld __cnfn half_cos(float8 x);
-float16 __ovld __cnfn half_cos(float16 x);
+float __ovld __cnfn half_cos(float);
+float2 __ovld __cnfn half_cos(float2);
+float3 __ovld __cnfn half_cos(float3);
+float4 __ovld __cnfn half_cos(float4);
+float8 __ovld __cnfn half_cos(float8);
+float16 __ovld __cnfn half_cos(float16);
/**
* Compute x / y.
*/
-float __ovld __cnfn half_divide(float x, float y);
-float2 __ovld __cnfn half_divide(float2 x, float2 y);
-float3 __ovld __cnfn half_divide(float3 x, float3 y);
-float4 __ovld __cnfn half_divide(float4 x, float4 y);
-float8 __ovld __cnfn half_divide(float8 x, float8 y);
-float16 __ovld __cnfn half_divide(float16 x, float16 y);
+float __ovld __cnfn half_divide(float, float);
+float2 __ovld __cnfn half_divide(float2, float2);
+float3 __ovld __cnfn half_divide(float3, float3);
+float4 __ovld __cnfn half_divide(float4, float4);
+float8 __ovld __cnfn half_divide(float8, float8);
+float16 __ovld __cnfn half_divide(float16, float16);
/**
* Compute the base- e exponential of x.
*/
-float __ovld __cnfn half_exp(float x);
-float2 __ovld __cnfn half_exp(float2 x);
-float3 __ovld __cnfn half_exp(float3 x);
-float4 __ovld __cnfn half_exp(float4 x);
-float8 __ovld __cnfn half_exp(float8 x);
-float16 __ovld __cnfn half_exp(float16 x);
+float __ovld __cnfn half_exp(float);
+float2 __ovld __cnfn half_exp(float2);
+float3 __ovld __cnfn half_exp(float3);
+float4 __ovld __cnfn half_exp(float4);
+float8 __ovld __cnfn half_exp(float8);
+float16 __ovld __cnfn half_exp(float16);
/**
* Compute the base- 2 exponential of x.
*/
-float __ovld __cnfn half_exp2(float x);
-float2 __ovld __cnfn half_exp2(float2 x);
-float3 __ovld __cnfn half_exp2(float3 x);
-float4 __ovld __cnfn half_exp2(float4 x);
-float8 __ovld __cnfn half_exp2(float8 x);
-float16 __ovld __cnfn half_exp2(float16 x);
+float __ovld __cnfn half_exp2(float);
+float2 __ovld __cnfn half_exp2(float2);
+float3 __ovld __cnfn half_exp2(float3);
+float4 __ovld __cnfn half_exp2(float4);
+float8 __ovld __cnfn half_exp2(float8);
+float16 __ovld __cnfn half_exp2(float16);
/**
* Compute the base- 10 exponential of x.
*/
-float __ovld __cnfn half_exp10(float x);
-float2 __ovld __cnfn half_exp10(float2 x);
-float3 __ovld __cnfn half_exp10(float3 x);
-float4 __ovld __cnfn half_exp10(float4 x);
-float8 __ovld __cnfn half_exp10(float8 x);
-float16 __ovld __cnfn half_exp10(float16 x);
+float __ovld __cnfn half_exp10(float);
+float2 __ovld __cnfn half_exp10(float2);
+float3 __ovld __cnfn half_exp10(float3);
+float4 __ovld __cnfn half_exp10(float4);
+float8 __ovld __cnfn half_exp10(float8);
+float16 __ovld __cnfn half_exp10(float16);
/**
* Compute natural logarithm.
*/
-float __ovld __cnfn half_log(float x);
-float2 __ovld __cnfn half_log(float2 x);
-float3 __ovld __cnfn half_log(float3 x);
-float4 __ovld __cnfn half_log(float4 x);
-float8 __ovld __cnfn half_log(float8 x);
-float16 __ovld __cnfn half_log(float16 x);
+float __ovld __cnfn half_log(float);
+float2 __ovld __cnfn half_log(float2);
+float3 __ovld __cnfn half_log(float3);
+float4 __ovld __cnfn half_log(float4);
+float8 __ovld __cnfn half_log(float8);
+float16 __ovld __cnfn half_log(float16);
/**
* Compute a base 2 logarithm.
*/
-float __ovld __cnfn half_log2(float x);
-float2 __ovld __cnfn half_log2(float2 x);
-float3 __ovld __cnfn half_log2(float3 x);
-float4 __ovld __cnfn half_log2(float4 x);
-float8 __ovld __cnfn half_log2(float8 x);
-float16 __ovld __cnfn half_log2(float16 x);
+float __ovld __cnfn half_log2(float);
+float2 __ovld __cnfn half_log2(float2);
+float3 __ovld __cnfn half_log2(float3);
+float4 __ovld __cnfn half_log2(float4);
+float8 __ovld __cnfn half_log2(float8);
+float16 __ovld __cnfn half_log2(float16);
/**
* Compute a base 10 logarithm.
*/
-float __ovld __cnfn half_log10(float x);
-float2 __ovld __cnfn half_log10(float2 x);
-float3 __ovld __cnfn half_log10(float3 x);
-float4 __ovld __cnfn half_log10(float4 x);
-float8 __ovld __cnfn half_log10(float8 x);
-float16 __ovld __cnfn half_log10(float16 x);
+float __ovld __cnfn half_log10(float);
+float2 __ovld __cnfn half_log10(float2);
+float3 __ovld __cnfn half_log10(float3);
+float4 __ovld __cnfn half_log10(float4);
+float8 __ovld __cnfn half_log10(float8);
+float16 __ovld __cnfn half_log10(float16);
/**
* Compute x to the power y, where x is >= 0.
*/
-float __ovld __cnfn half_powr(float x, float y);
-float2 __ovld __cnfn half_powr(float2 x, float2 y);
-float3 __ovld __cnfn half_powr(float3 x, float3 y);
-float4 __ovld __cnfn half_powr(float4 x, float4 y);
-float8 __ovld __cnfn half_powr(float8 x, float8 y);
-float16 __ovld __cnfn half_powr(float16 x, float16 y);
+float __ovld __cnfn half_powr(float, float);
+float2 __ovld __cnfn half_powr(float2, float2);
+float3 __ovld __cnfn half_powr(float3, float3);
+float4 __ovld __cnfn half_powr(float4, float4);
+float8 __ovld __cnfn half_powr(float8, float8);
+float16 __ovld __cnfn half_powr(float16, float16);
/**
* Compute reciprocal.
*/
-float __ovld __cnfn half_recip(float x);
-float2 __ovld __cnfn half_recip(float2 x);
-float3 __ovld __cnfn half_recip(float3 x);
-float4 __ovld __cnfn half_recip(float4 x);
-float8 __ovld __cnfn half_recip(float8 x);
-float16 __ovld __cnfn half_recip(float16 x);
+float __ovld __cnfn half_recip(float);
+float2 __ovld __cnfn half_recip(float2);
+float3 __ovld __cnfn half_recip(float3);
+float4 __ovld __cnfn half_recip(float4);
+float8 __ovld __cnfn half_recip(float8);
+float16 __ovld __cnfn half_recip(float16);
/**
* Compute inverse square root.
*/
-float __ovld __cnfn half_rsqrt(float x);
-float2 __ovld __cnfn half_rsqrt(float2 x);
-float3 __ovld __cnfn half_rsqrt(float3 x);
-float4 __ovld __cnfn half_rsqrt(float4 x);
-float8 __ovld __cnfn half_rsqrt(float8 x);
-float16 __ovld __cnfn half_rsqrt(float16 x);
+float __ovld __cnfn half_rsqrt(float);
+float2 __ovld __cnfn half_rsqrt(float2);
+float3 __ovld __cnfn half_rsqrt(float3);
+float4 __ovld __cnfn half_rsqrt(float4);
+float8 __ovld __cnfn half_rsqrt(float8);
+float16 __ovld __cnfn half_rsqrt(float16);
/**
* Compute sine. x must be in the range -2^16 ... +2^16.
*/
-float __ovld __cnfn half_sin(float x);
-float2 __ovld __cnfn half_sin(float2 x);
-float3 __ovld __cnfn half_sin(float3 x);
-float4 __ovld __cnfn half_sin(float4 x);
-float8 __ovld __cnfn half_sin(float8 x);
-float16 __ovld __cnfn half_sin(float16 x);
+float __ovld __cnfn half_sin(float);
+float2 __ovld __cnfn half_sin(float2);
+float3 __ovld __cnfn half_sin(float3);
+float4 __ovld __cnfn half_sin(float4);
+float8 __ovld __cnfn half_sin(float8);
+float16 __ovld __cnfn half_sin(float16);
/**
* Compute square root.
*/
-float __ovld __cnfn half_sqrt(float x);
-float2 __ovld __cnfn half_sqrt(float2 x);
-float3 __ovld __cnfn half_sqrt(float3 x);
-float4 __ovld __cnfn half_sqrt(float4 x);
-float8 __ovld __cnfn half_sqrt(float8 x);
-float16 __ovld __cnfn half_sqrt(float16 x);
+float __ovld __cnfn half_sqrt(float);
+float2 __ovld __cnfn half_sqrt(float2);
+float3 __ovld __cnfn half_sqrt(float3);
+float4 __ovld __cnfn half_sqrt(float4);
+float8 __ovld __cnfn half_sqrt(float8);
+float16 __ovld __cnfn half_sqrt(float16);
/**
* Compute tangent. x must be in the range -216 ... +216.
*/
-float __ovld __cnfn half_tan(float x);
-float2 __ovld __cnfn half_tan(float2 x);
-float3 __ovld __cnfn half_tan(float3 x);
-float4 __ovld __cnfn half_tan(float4 x);
-float8 __ovld __cnfn half_tan(float8 x);
-float16 __ovld __cnfn half_tan(float16 x);
+float __ovld __cnfn half_tan(float);
+float2 __ovld __cnfn half_tan(float2);
+float3 __ovld __cnfn half_tan(float3);
+float4 __ovld __cnfn half_tan(float4);
+float8 __ovld __cnfn half_tan(float8);
+float16 __ovld __cnfn half_tan(float16);
/**
* Compute cosine over an implementation-defined range.
* The maximum error is implementation-defined.
*/
-float __ovld __cnfn native_cos(float x);
-float2 __ovld __cnfn native_cos(float2 x);
-float3 __ovld __cnfn native_cos(float3 x);
-float4 __ovld __cnfn native_cos(float4 x);
-float8 __ovld __cnfn native_cos(float8 x);
-float16 __ovld __cnfn native_cos(float16 x);
+float __ovld __cnfn native_cos(float);
+float2 __ovld __cnfn native_cos(float2);
+float3 __ovld __cnfn native_cos(float3);
+float4 __ovld __cnfn native_cos(float4);
+float8 __ovld __cnfn native_cos(float8);
+float16 __ovld __cnfn native_cos(float16);
/**
* Compute x / y over an implementation-defined range.
* The maximum error is implementation-defined.
*/
-float __ovld __cnfn native_divide(float x, float y);
-float2 __ovld __cnfn native_divide(float2 x, float2 y);
-float3 __ovld __cnfn native_divide(float3 x, float3 y);
-float4 __ovld __cnfn native_divide(float4 x, float4 y);
-float8 __ovld __cnfn native_divide(float8 x, float8 y);
-float16 __ovld __cnfn native_divide(float16 x, float16 y);
+float __ovld __cnfn native_divide(float, float);
+float2 __ovld __cnfn native_divide(float2, float2);
+float3 __ovld __cnfn native_divide(float3, float3);
+float4 __ovld __cnfn native_divide(float4, float4);
+float8 __ovld __cnfn native_divide(float8, float8);
+float16 __ovld __cnfn native_divide(float16, float16);
/**
* Compute the base- e exponential of x over an
* implementation-defined range. The maximum error is
* implementation-defined.
*/
-float __ovld __cnfn native_exp(float x);
-float2 __ovld __cnfn native_exp(float2 x);
-float3 __ovld __cnfn native_exp(float3 x);
-float4 __ovld __cnfn native_exp(float4 x);
-float8 __ovld __cnfn native_exp(float8 x);
-float16 __ovld __cnfn native_exp(float16 x);
+float __ovld __cnfn native_exp(float);
+float2 __ovld __cnfn native_exp(float2);
+float3 __ovld __cnfn native_exp(float3);
+float4 __ovld __cnfn native_exp(float4);
+float8 __ovld __cnfn native_exp(float8);
+float16 __ovld __cnfn native_exp(float16);
/**
* Compute the base- 2 exponential of x over an
* implementation-defined range. The maximum error is
* implementation-defined.
*/
-float __ovld __cnfn native_exp2(float x);
-float2 __ovld __cnfn native_exp2(float2 x);
-float3 __ovld __cnfn native_exp2(float3 x);
-float4 __ovld __cnfn native_exp2(float4 x);
-float8 __ovld __cnfn native_exp2(float8 x);
-float16 __ovld __cnfn native_exp2(float16 x);
+float __ovld __cnfn native_exp2(float);
+float2 __ovld __cnfn native_exp2(float2);
+float3 __ovld __cnfn native_exp2(float3);
+float4 __ovld __cnfn native_exp2(float4);
+float8 __ovld __cnfn native_exp2(float8);
+float16 __ovld __cnfn native_exp2(float16);
/**
* Compute the base- 10 exponential of x over an
* implementation-defined range. The maximum error is
* implementation-defined.
*/
-float __ovld __cnfn native_exp10(float x);
-float2 __ovld __cnfn native_exp10(float2 x);
-float3 __ovld __cnfn native_exp10(float3 x);
-float4 __ovld __cnfn native_exp10(float4 x);
-float8 __ovld __cnfn native_exp10(float8 x);
-float16 __ovld __cnfn native_exp10(float16 x);
+float __ovld __cnfn native_exp10(float);
+float2 __ovld __cnfn native_exp10(float2);
+float3 __ovld __cnfn native_exp10(float3);
+float4 __ovld __cnfn native_exp10(float4);
+float8 __ovld __cnfn native_exp10(float8);
+float16 __ovld __cnfn native_exp10(float16);
/**
* Compute natural logarithm over an implementationdefined
* range. The maximum error is implementation
* defined.
*/
-float __ovld __cnfn native_log(float x);
-float2 __ovld __cnfn native_log(float2 x);
-float3 __ovld __cnfn native_log(float3 x);
-float4 __ovld __cnfn native_log(float4 x);
-float8 __ovld __cnfn native_log(float8 x);
-float16 __ovld __cnfn native_log(float16 x);
+float __ovld __cnfn native_log(float);
+float2 __ovld __cnfn native_log(float2);
+float3 __ovld __cnfn native_log(float3);
+float4 __ovld __cnfn native_log(float4);
+float8 __ovld __cnfn native_log(float8);
+float16 __ovld __cnfn native_log(float16);
/**
* Compute a base 2 logarithm over an implementationdefined
* range. The maximum error is implementationdefined.
*/
-float __ovld __cnfn native_log2(float x);
-float2 __ovld __cnfn native_log2(float2 x);
-float3 __ovld __cnfn native_log2(float3 x);
-float4 __ovld __cnfn native_log2(float4 x);
-float8 __ovld __cnfn native_log2(float8 x);
-float16 __ovld __cnfn native_log2(float16 x);
+float __ovld __cnfn native_log2(float);
+float2 __ovld __cnfn native_log2(float2);
+float3 __ovld __cnfn native_log2(float3);
+float4 __ovld __cnfn native_log2(float4);
+float8 __ovld __cnfn native_log2(float8);
+float16 __ovld __cnfn native_log2(float16);
/**
* Compute a base 10 logarithm over an implementationdefined
* range. The maximum error is implementationdefined.
*/
-float __ovld __cnfn native_log10(float x);
-float2 __ovld __cnfn native_log10(float2 x);
-float3 __ovld __cnfn native_log10(float3 x);
-float4 __ovld __cnfn native_log10(float4 x);
-float8 __ovld __cnfn native_log10(float8 x);
-float16 __ovld __cnfn native_log10(float16 x);
+float __ovld __cnfn native_log10(float);
+float2 __ovld __cnfn native_log10(float2);
+float3 __ovld __cnfn native_log10(float3);
+float4 __ovld __cnfn native_log10(float4);
+float8 __ovld __cnfn native_log10(float8);
+float16 __ovld __cnfn native_log10(float16);
/**
* Compute x to the power y, where x is >= 0. The range of
* x and y are implementation-defined. The maximum error
* is implementation-defined.
*/
-float __ovld __cnfn native_powr(float x, float y);
-float2 __ovld __cnfn native_powr(float2 x, float2 y);
-float3 __ovld __cnfn native_powr(float3 x, float3 y);
-float4 __ovld __cnfn native_powr(float4 x, float4 y);
-float8 __ovld __cnfn native_powr(float8 x, float8 y);
-float16 __ovld __cnfn native_powr(float16 x, float16 y);
+float __ovld __cnfn native_powr(float, float);
+float2 __ovld __cnfn native_powr(float2, float2);
+float3 __ovld __cnfn native_powr(float3, float3);
+float4 __ovld __cnfn native_powr(float4, float4);
+float8 __ovld __cnfn native_powr(float8, float8);
+float16 __ovld __cnfn native_powr(float16, float16);
/**
* Compute reciprocal over an implementation-defined
* range. The maximum error is implementation-defined.
*/
-float __ovld __cnfn native_recip(float x);
-float2 __ovld __cnfn native_recip(float2 x);
-float3 __ovld __cnfn native_recip(float3 x);
-float4 __ovld __cnfn native_recip(float4 x);
-float8 __ovld __cnfn native_recip(float8 x);
-float16 __ovld __cnfn native_recip(float16 x);
+float __ovld __cnfn native_recip(float);
+float2 __ovld __cnfn native_recip(float2);
+float3 __ovld __cnfn native_recip(float3);
+float4 __ovld __cnfn native_recip(float4);
+float8 __ovld __cnfn native_recip(float8);
+float16 __ovld __cnfn native_recip(float16);
/**
* Compute inverse square root over an implementationdefined
* range. The maximum error is implementationdefined.
*/
-float __ovld __cnfn native_rsqrt(float x);
-float2 __ovld __cnfn native_rsqrt(float2 x);
-float3 __ovld __cnfn native_rsqrt(float3 x);
-float4 __ovld __cnfn native_rsqrt(float4 x);
-float8 __ovld __cnfn native_rsqrt(float8 x);
-float16 __ovld __cnfn native_rsqrt(float16 x);
+float __ovld __cnfn native_rsqrt(float);
+float2 __ovld __cnfn native_rsqrt(float2);
+float3 __ovld __cnfn native_rsqrt(float3);
+float4 __ovld __cnfn native_rsqrt(float4);
+float8 __ovld __cnfn native_rsqrt(float8);
+float16 __ovld __cnfn native_rsqrt(float16);
/**
* Compute sine over an implementation-defined range.
* The maximum error is implementation-defined.
*/
-float __ovld __cnfn native_sin(float x);
-float2 __ovld __cnfn native_sin(float2 x);
-float3 __ovld __cnfn native_sin(float3 x);
-float4 __ovld __cnfn native_sin(float4 x);
-float8 __ovld __cnfn native_sin(float8 x);
-float16 __ovld __cnfn native_sin(float16 x);
+float __ovld __cnfn native_sin(float);
+float2 __ovld __cnfn native_sin(float2);
+float3 __ovld __cnfn native_sin(float3);
+float4 __ovld __cnfn native_sin(float4);
+float8 __ovld __cnfn native_sin(float8);
+float16 __ovld __cnfn native_sin(float16);
/**
* Compute square root over an implementation-defined
* range. The maximum error is implementation-defined.
*/
-float __ovld __cnfn native_sqrt(float x);
-float2 __ovld __cnfn native_sqrt(float2 x);
-float3 __ovld __cnfn native_sqrt(float3 x);
-float4 __ovld __cnfn native_sqrt(float4 x);
-float8 __ovld __cnfn native_sqrt(float8 x);
-float16 __ovld __cnfn native_sqrt(float16 x);
+float __ovld __cnfn native_sqrt(float);
+float2 __ovld __cnfn native_sqrt(float2);
+float3 __ovld __cnfn native_sqrt(float3);
+float4 __ovld __cnfn native_sqrt(float4);
+float8 __ovld __cnfn native_sqrt(float8);
+float16 __ovld __cnfn native_sqrt(float16);
/**
* Compute tangent over an implementation-defined range.
* The maximum error is implementation-defined.
*/
-float __ovld __cnfn native_tan(float x);
-float2 __ovld __cnfn native_tan(float2 x);
-float3 __ovld __cnfn native_tan(float3 x);
-float4 __ovld __cnfn native_tan(float4 x);
-float8 __ovld __cnfn native_tan(float8 x);
-float16 __ovld __cnfn native_tan(float16 x);
+float __ovld __cnfn native_tan(float);
+float2 __ovld __cnfn native_tan(float2);
+float3 __ovld __cnfn native_tan(float3);
+float4 __ovld __cnfn native_tan(float4);
+float8 __ovld __cnfn native_tan(float8);
+float16 __ovld __cnfn native_tan(float16);
// OpenCL v1.1 s6.11.3, v1.2 s6.12.3, v2.0 s6.13.3 - Integer Functions
/**
* Returns | x |.
*/
-uchar __ovld __cnfn abs(char x);
-uchar __ovld __cnfn abs(uchar x);
-uchar2 __ovld __cnfn abs(char2 x);
-uchar2 __ovld __cnfn abs(uchar2 x);
-uchar3 __ovld __cnfn abs(char3 x);
-uchar3 __ovld __cnfn abs(uchar3 x);
-uchar4 __ovld __cnfn abs(char4 x);
-uchar4 __ovld __cnfn abs(uchar4 x);
-uchar8 __ovld __cnfn abs(char8 x);
-uchar8 __ovld __cnfn abs(uchar8 x);
-uchar16 __ovld __cnfn abs(char16 x);
-uchar16 __ovld __cnfn abs(uchar16 x);
-ushort __ovld __cnfn abs(short x);
-ushort __ovld __cnfn abs(ushort x);
-ushort2 __ovld __cnfn abs(short2 x);
-ushort2 __ovld __cnfn abs(ushort2 x);
-ushort3 __ovld __cnfn abs(short3 x);
-ushort3 __ovld __cnfn abs(ushort3 x);
-ushort4 __ovld __cnfn abs(short4 x);
-ushort4 __ovld __cnfn abs(ushort4 x);
-ushort8 __ovld __cnfn abs(short8 x);
-ushort8 __ovld __cnfn abs(ushort8 x);
-ushort16 __ovld __cnfn abs(short16 x);
-ushort16 __ovld __cnfn abs(ushort16 x);
-uint __ovld __cnfn abs(int x);
-uint __ovld __cnfn abs(uint x);
-uint2 __ovld __cnfn abs(int2 x);
-uint2 __ovld __cnfn abs(uint2 x);
-uint3 __ovld __cnfn abs(int3 x);
-uint3 __ovld __cnfn abs(uint3 x);
-uint4 __ovld __cnfn abs(int4 x);
-uint4 __ovld __cnfn abs(uint4 x);
-uint8 __ovld __cnfn abs(int8 x);
-uint8 __ovld __cnfn abs(uint8 x);
-uint16 __ovld __cnfn abs(int16 x);
-uint16 __ovld __cnfn abs(uint16 x);
-ulong __ovld __cnfn abs(long x);
-ulong __ovld __cnfn abs(ulong x);
-ulong2 __ovld __cnfn abs(long2 x);
-ulong2 __ovld __cnfn abs(ulong2 x);
-ulong3 __ovld __cnfn abs(long3 x);
-ulong3 __ovld __cnfn abs(ulong3 x);
-ulong4 __ovld __cnfn abs(long4 x);
-ulong4 __ovld __cnfn abs(ulong4 x);
-ulong8 __ovld __cnfn abs(long8 x);
-ulong8 __ovld __cnfn abs(ulong8 x);
-ulong16 __ovld __cnfn abs(long16 x);
-ulong16 __ovld __cnfn abs(ulong16 x);
+uchar __ovld __cnfn abs(char);
+uchar __ovld __cnfn abs(uchar);
+uchar2 __ovld __cnfn abs(char2);
+uchar2 __ovld __cnfn abs(uchar2);
+uchar3 __ovld __cnfn abs(char3);
+uchar3 __ovld __cnfn abs(uchar3);
+uchar4 __ovld __cnfn abs(char4);
+uchar4 __ovld __cnfn abs(uchar4);
+uchar8 __ovld __cnfn abs(char8);
+uchar8 __ovld __cnfn abs(uchar8);
+uchar16 __ovld __cnfn abs(char16);
+uchar16 __ovld __cnfn abs(uchar16);
+ushort __ovld __cnfn abs(short);
+ushort __ovld __cnfn abs(ushort);
+ushort2 __ovld __cnfn abs(short2);
+ushort2 __ovld __cnfn abs(ushort2);
+ushort3 __ovld __cnfn abs(short3);
+ushort3 __ovld __cnfn abs(ushort3);
+ushort4 __ovld __cnfn abs(short4);
+ushort4 __ovld __cnfn abs(ushort4);
+ushort8 __ovld __cnfn abs(short8);
+ushort8 __ovld __cnfn abs(ushort8);
+ushort16 __ovld __cnfn abs(short16);
+ushort16 __ovld __cnfn abs(ushort16);
+uint __ovld __cnfn abs(int);
+uint __ovld __cnfn abs(uint);
+uint2 __ovld __cnfn abs(int2);
+uint2 __ovld __cnfn abs(uint2);
+uint3 __ovld __cnfn abs(int3);
+uint3 __ovld __cnfn abs(uint3);
+uint4 __ovld __cnfn abs(int4);
+uint4 __ovld __cnfn abs(uint4);
+uint8 __ovld __cnfn abs(int8);
+uint8 __ovld __cnfn abs(uint8);
+uint16 __ovld __cnfn abs(int16);
+uint16 __ovld __cnfn abs(uint16);
+ulong __ovld __cnfn abs(long);
+ulong __ovld __cnfn abs(ulong);
+ulong2 __ovld __cnfn abs(long2);
+ulong2 __ovld __cnfn abs(ulong2);
+ulong3 __ovld __cnfn abs(long3);
+ulong3 __ovld __cnfn abs(ulong3);
+ulong4 __ovld __cnfn abs(long4);
+ulong4 __ovld __cnfn abs(ulong4);
+ulong8 __ovld __cnfn abs(long8);
+ulong8 __ovld __cnfn abs(ulong8);
+ulong16 __ovld __cnfn abs(long16);
+ulong16 __ovld __cnfn abs(ulong16);
/**
* Returns | x - y | without modulo overflow.
*/
-uchar __ovld __cnfn abs_diff(char x, char y);
-uchar __ovld __cnfn abs_diff(uchar x, uchar y);
-uchar2 __ovld __cnfn abs_diff(char2 x, char2 y);
-uchar2 __ovld __cnfn abs_diff(uchar2 x, uchar2 y);
-uchar3 __ovld __cnfn abs_diff(char3 x, char3 y);
-uchar3 __ovld __cnfn abs_diff(uchar3 x, uchar3 y);
-uchar4 __ovld __cnfn abs_diff(char4 x, char4 y);
-uchar4 __ovld __cnfn abs_diff(uchar4 x, uchar4 y);
-uchar8 __ovld __cnfn abs_diff(char8 x, char8 y);
-uchar8 __ovld __cnfn abs_diff(uchar8 x, uchar8 y);
-uchar16 __ovld __cnfn abs_diff(char16 x, char16 y);
-uchar16 __ovld __cnfn abs_diff(uchar16 x, uchar16 y);
-ushort __ovld __cnfn abs_diff(short x, short y);
-ushort __ovld __cnfn abs_diff(ushort x, ushort y);
-ushort2 __ovld __cnfn abs_diff(short2 x, short2 y);
-ushort2 __ovld __cnfn abs_diff(ushort2 x, ushort2 y);
-ushort3 __ovld __cnfn abs_diff(short3 x, short3 y);
-ushort3 __ovld __cnfn abs_diff(ushort3 x, ushort3 y);
-ushort4 __ovld __cnfn abs_diff(short4 x, short4 y);
-ushort4 __ovld __cnfn abs_diff(ushort4 x, ushort4 y);
-ushort8 __ovld __cnfn abs_diff(short8 x, short8 y);
-ushort8 __ovld __cnfn abs_diff(ushort8 x, ushort8 y);
-ushort16 __ovld __cnfn abs_diff(short16 x, short16 y);
-ushort16 __ovld __cnfn abs_diff(ushort16 x, ushort16 y);
-uint __ovld __cnfn abs_diff(int x, int y);
-uint __ovld __cnfn abs_diff(uint x, uint y);
-uint2 __ovld __cnfn abs_diff(int2 x, int2 y);
-uint2 __ovld __cnfn abs_diff(uint2 x, uint2 y);
-uint3 __ovld __cnfn abs_diff(int3 x, int3 y);
-uint3 __ovld __cnfn abs_diff(uint3 x, uint3 y);
-uint4 __ovld __cnfn abs_diff(int4 x, int4 y);
-uint4 __ovld __cnfn abs_diff(uint4 x, uint4 y);
-uint8 __ovld __cnfn abs_diff(int8 x, int8 y);
-uint8 __ovld __cnfn abs_diff(uint8 x, uint8 y);
-uint16 __ovld __cnfn abs_diff(int16 x, int16 y);
-uint16 __ovld __cnfn abs_diff(uint16 x, uint16 y);
-ulong __ovld __cnfn abs_diff(long x, long y);
-ulong __ovld __cnfn abs_diff(ulong x, ulong y);
-ulong2 __ovld __cnfn abs_diff(long2 x, long2 y);
-ulong2 __ovld __cnfn abs_diff(ulong2 x, ulong2 y);
-ulong3 __ovld __cnfn abs_diff(long3 x, long3 y);
-ulong3 __ovld __cnfn abs_diff(ulong3 x, ulong3 y);
-ulong4 __ovld __cnfn abs_diff(long4 x, long4 y);
-ulong4 __ovld __cnfn abs_diff(ulong4 x, ulong4 y);
-ulong8 __ovld __cnfn abs_diff(long8 x, long8 y);
-ulong8 __ovld __cnfn abs_diff(ulong8 x, ulong8 y);
-ulong16 __ovld __cnfn abs_diff(long16 x, long16 y);
-ulong16 __ovld __cnfn abs_diff(ulong16 x, ulong16 y);
+uchar __ovld __cnfn abs_diff(char, char);
+uchar __ovld __cnfn abs_diff(uchar, uchar);
+uchar2 __ovld __cnfn abs_diff(char2, char2);
+uchar2 __ovld __cnfn abs_diff(uchar2, uchar2);
+uchar3 __ovld __cnfn abs_diff(char3, char3);
+uchar3 __ovld __cnfn abs_diff(uchar3, uchar3);
+uchar4 __ovld __cnfn abs_diff(char4, char4);
+uchar4 __ovld __cnfn abs_diff(uchar4, uchar4);
+uchar8 __ovld __cnfn abs_diff(char8, char8);
+uchar8 __ovld __cnfn abs_diff(uchar8, uchar8);
+uchar16 __ovld __cnfn abs_diff(char16, char16);
+uchar16 __ovld __cnfn abs_diff(uchar16, uchar16);
+ushort __ovld __cnfn abs_diff(short, short);
+ushort __ovld __cnfn abs_diff(ushort, ushort);
+ushort2 __ovld __cnfn abs_diff(short2, short2);
+ushort2 __ovld __cnfn abs_diff(ushort2, ushort2);
+ushort3 __ovld __cnfn abs_diff(short3, short3);
+ushort3 __ovld __cnfn abs_diff(ushort3, ushort3);
+ushort4 __ovld __cnfn abs_diff(short4, short4);
+ushort4 __ovld __cnfn abs_diff(ushort4, ushort4);
+ushort8 __ovld __cnfn abs_diff(short8, short8);
+ushort8 __ovld __cnfn abs_diff(ushort8, ushort8);
+ushort16 __ovld __cnfn abs_diff(short16, short16);
+ushort16 __ovld __cnfn abs_diff(ushort16, ushort16);
+uint __ovld __cnfn abs_diff(int, int);
+uint __ovld __cnfn abs_diff(uint, uint);
+uint2 __ovld __cnfn abs_diff(int2, int2);
+uint2 __ovld __cnfn abs_diff(uint2, uint2);
+uint3 __ovld __cnfn abs_diff(int3, int3);
+uint3 __ovld __cnfn abs_diff(uint3, uint3);
+uint4 __ovld __cnfn abs_diff(int4, int4);
+uint4 __ovld __cnfn abs_diff(uint4, uint4);
+uint8 __ovld __cnfn abs_diff(int8, int8);
+uint8 __ovld __cnfn abs_diff(uint8, uint8);
+uint16 __ovld __cnfn abs_diff(int16, int16);
+uint16 __ovld __cnfn abs_diff(uint16, uint16);
+ulong __ovld __cnfn abs_diff(long, long);
+ulong __ovld __cnfn abs_diff(ulong, ulong);
+ulong2 __ovld __cnfn abs_diff(long2, long2);
+ulong2 __ovld __cnfn abs_diff(ulong2, ulong2);
+ulong3 __ovld __cnfn abs_diff(long3, long3);
+ulong3 __ovld __cnfn abs_diff(ulong3, ulong3);
+ulong4 __ovld __cnfn abs_diff(long4, long4);
+ulong4 __ovld __cnfn abs_diff(ulong4, ulong4);
+ulong8 __ovld __cnfn abs_diff(long8, long8);
+ulong8 __ovld __cnfn abs_diff(ulong8, ulong8);
+ulong16 __ovld __cnfn abs_diff(long16, long16);
+ulong16 __ovld __cnfn abs_diff(ulong16, ulong16);
/**
* Returns x + y and saturates the result.
*/
-char __ovld __cnfn add_sat(char x, char y);
-uchar __ovld __cnfn add_sat(uchar x, uchar y);
-char2 __ovld __cnfn add_sat(char2 x, char2 y);
-uchar2 __ovld __cnfn add_sat(uchar2 x, uchar2 y);
-char3 __ovld __cnfn add_sat(char3 x, char3 y);
-uchar3 __ovld __cnfn add_sat(uchar3 x, uchar3 y);
-char4 __ovld __cnfn add_sat(char4 x, char4 y);
-uchar4 __ovld __cnfn add_sat(uchar4 x, uchar4 y);
-char8 __ovld __cnfn add_sat(char8 x, char8 y);
-uchar8 __ovld __cnfn add_sat(uchar8 x, uchar8 y);
-char16 __ovld __cnfn add_sat(char16 x, char16 y);
-uchar16 __ovld __cnfn add_sat(uchar16 x, uchar16 y);
-short __ovld __cnfn add_sat(short x, short y);
-ushort __ovld __cnfn add_sat(ushort x, ushort y);
-short2 __ovld __cnfn add_sat(short2 x, short2 y);
-ushort2 __ovld __cnfn add_sat(ushort2 x, ushort2 y);
-short3 __ovld __cnfn add_sat(short3 x, short3 y);
-ushort3 __ovld __cnfn add_sat(ushort3 x, ushort3 y);
-short4 __ovld __cnfn add_sat(short4 x, short4 y);
-ushort4 __ovld __cnfn add_sat(ushort4 x, ushort4 y);
-short8 __ovld __cnfn add_sat(short8 x, short8 y);
-ushort8 __ovld __cnfn add_sat(ushort8 x, ushort8 y);
-short16 __ovld __cnfn add_sat(short16 x, short16 y);
-ushort16 __ovld __cnfn add_sat(ushort16 x, ushort16 y);
-int __ovld __cnfn add_sat(int x, int y);
-uint __ovld __cnfn add_sat(uint x, uint y);
-int2 __ovld __cnfn add_sat(int2 x, int2 y);
-uint2 __ovld __cnfn add_sat(uint2 x, uint2 y);
-int3 __ovld __cnfn add_sat(int3 x, int3 y);
-uint3 __ovld __cnfn add_sat(uint3 x, uint3 y);
-int4 __ovld __cnfn add_sat(int4 x, int4 y);
-uint4 __ovld __cnfn add_sat(uint4 x, uint4 y);
-int8 __ovld __cnfn add_sat(int8 x, int8 y);
-uint8 __ovld __cnfn add_sat(uint8 x, uint8 y);
-int16 __ovld __cnfn add_sat(int16 x, int16 y);
-uint16 __ovld __cnfn add_sat(uint16 x, uint16 y);
-long __ovld __cnfn add_sat(long x, long y);
-ulong __ovld __cnfn add_sat(ulong x, ulong y);
-long2 __ovld __cnfn add_sat(long2 x, long2 y);
-ulong2 __ovld __cnfn add_sat(ulong2 x, ulong2 y);
-long3 __ovld __cnfn add_sat(long3 x, long3 y);
-ulong3 __ovld __cnfn add_sat(ulong3 x, ulong3 y);
-long4 __ovld __cnfn add_sat(long4 x, long4 y);
-ulong4 __ovld __cnfn add_sat(ulong4 x, ulong4 y);
-long8 __ovld __cnfn add_sat(long8 x, long8 y);
-ulong8 __ovld __cnfn add_sat(ulong8 x, ulong8 y);
-long16 __ovld __cnfn add_sat(long16 x, long16 y);
-ulong16 __ovld __cnfn add_sat(ulong16 x, ulong16 y);
+char __ovld __cnfn add_sat(char, char);
+uchar __ovld __cnfn add_sat(uchar, uchar);
+char2 __ovld __cnfn add_sat(char2, char2);
+uchar2 __ovld __cnfn add_sat(uchar2, uchar2);
+char3 __ovld __cnfn add_sat(char3, char3);
+uchar3 __ovld __cnfn add_sat(uchar3, uchar3);
+char4 __ovld __cnfn add_sat(char4, char4);
+uchar4 __ovld __cnfn add_sat(uchar4, uchar4);
+char8 __ovld __cnfn add_sat(char8, char8);
+uchar8 __ovld __cnfn add_sat(uchar8, uchar8);
+char16 __ovld __cnfn add_sat(char16, char16);
+uchar16 __ovld __cnfn add_sat(uchar16, uchar16);
+short __ovld __cnfn add_sat(short, short);
+ushort __ovld __cnfn add_sat(ushort, ushort);
+short2 __ovld __cnfn add_sat(short2, short2);
+ushort2 __ovld __cnfn add_sat(ushort2, ushort2);
+short3 __ovld __cnfn add_sat(short3, short3);
+ushort3 __ovld __cnfn add_sat(ushort3, ushort3);
+short4 __ovld __cnfn add_sat(short4, short4);
+ushort4 __ovld __cnfn add_sat(ushort4, ushort4);
+short8 __ovld __cnfn add_sat(short8, short8);
+ushort8 __ovld __cnfn add_sat(ushort8, ushort8);
+short16 __ovld __cnfn add_sat(short16, short16);
+ushort16 __ovld __cnfn add_sat(ushort16, ushort16);
+int __ovld __cnfn add_sat(int, int);
+uint __ovld __cnfn add_sat(uint, uint);
+int2 __ovld __cnfn add_sat(int2, int2);
+uint2 __ovld __cnfn add_sat(uint2, uint2);
+int3 __ovld __cnfn add_sat(int3, int3);
+uint3 __ovld __cnfn add_sat(uint3, uint3);
+int4 __ovld __cnfn add_sat(int4, int4);
+uint4 __ovld __cnfn add_sat(uint4, uint4);
+int8 __ovld __cnfn add_sat(int8, int8);
+uint8 __ovld __cnfn add_sat(uint8, uint8);
+int16 __ovld __cnfn add_sat(int16, int16);
+uint16 __ovld __cnfn add_sat(uint16, uint16);
+long __ovld __cnfn add_sat(long, long);
+ulong __ovld __cnfn add_sat(ulong, ulong);
+long2 __ovld __cnfn add_sat(long2, long2);
+ulong2 __ovld __cnfn add_sat(ulong2, ulong2);
+long3 __ovld __cnfn add_sat(long3, long3);
+ulong3 __ovld __cnfn add_sat(ulong3, ulong3);
+long4 __ovld __cnfn add_sat(long4, long4);
+ulong4 __ovld __cnfn add_sat(ulong4, ulong4);
+long8 __ovld __cnfn add_sat(long8, long8);
+ulong8 __ovld __cnfn add_sat(ulong8, ulong8);
+long16 __ovld __cnfn add_sat(long16, long16);
+ulong16 __ovld __cnfn add_sat(ulong16, ulong16);
/**
* Returns (x + y) >> 1. The intermediate sum does
* not modulo overflow.
*/
-char __ovld __cnfn hadd(char x, char y);
-uchar __ovld __cnfn hadd(uchar x, uchar y);
-char2 __ovld __cnfn hadd(char2 x, char2 y);
-uchar2 __ovld __cnfn hadd(uchar2 x, uchar2 y);
-char3 __ovld __cnfn hadd(char3 x, char3 y);
-uchar3 __ovld __cnfn hadd(uchar3 x, uchar3 y);
-char4 __ovld __cnfn hadd(char4 x, char4 y);
-uchar4 __ovld __cnfn hadd(uchar4 x, uchar4 y);
-char8 __ovld __cnfn hadd(char8 x, char8 y);
-uchar8 __ovld __cnfn hadd(uchar8 x, uchar8 y);
-char16 __ovld __cnfn hadd(char16 x, char16 y);
-uchar16 __ovld __cnfn hadd(uchar16 x, uchar16 y);
-short __ovld __cnfn hadd(short x, short y);
-ushort __ovld __cnfn hadd(ushort x, ushort y);
-short2 __ovld __cnfn hadd(short2 x, short2 y);
-ushort2 __ovld __cnfn hadd(ushort2 x, ushort2 y);
-short3 __ovld __cnfn hadd(short3 x, short3 y);
-ushort3 __ovld __cnfn hadd(ushort3 x, ushort3 y);
-short4 __ovld __cnfn hadd(short4 x, short4 y);
-ushort4 __ovld __cnfn hadd(ushort4 x, ushort4 y);
-short8 __ovld __cnfn hadd(short8 x, short8 y);
-ushort8 __ovld __cnfn hadd(ushort8 x, ushort8 y);
-short16 __ovld __cnfn hadd(short16 x, short16 y);
-ushort16 __ovld __cnfn hadd(ushort16 x, ushort16 y);
-int __ovld __cnfn hadd(int x, int y);
-uint __ovld __cnfn hadd(uint x, uint y);
-int2 __ovld __cnfn hadd(int2 x, int2 y);
-uint2 __ovld __cnfn hadd(uint2 x, uint2 y);
-int3 __ovld __cnfn hadd(int3 x, int3 y);
-uint3 __ovld __cnfn hadd(uint3 x, uint3 y);
-int4 __ovld __cnfn hadd(int4 x, int4 y);
-uint4 __ovld __cnfn hadd(uint4 x, uint4 y);
-int8 __ovld __cnfn hadd(int8 x, int8 y);
-uint8 __ovld __cnfn hadd(uint8 x, uint8 y);
-int16 __ovld __cnfn hadd(int16 x, int16 y);
-uint16 __ovld __cnfn hadd(uint16 x, uint16 y);
-long __ovld __cnfn hadd(long x, long y);
-ulong __ovld __cnfn hadd(ulong x, ulong y);
-long2 __ovld __cnfn hadd(long2 x, long2 y);
-ulong2 __ovld __cnfn hadd(ulong2 x, ulong2 y);
-long3 __ovld __cnfn hadd(long3 x, long3 y);
-ulong3 __ovld __cnfn hadd(ulong3 x, ulong3 y);
-long4 __ovld __cnfn hadd(long4 x, long4 y);
-ulong4 __ovld __cnfn hadd(ulong4 x, ulong4 y);
-long8 __ovld __cnfn hadd(long8 x, long8 y);
-ulong8 __ovld __cnfn hadd(ulong8 x, ulong8 y);
-long16 __ovld __cnfn hadd(long16 x, long16 y);
-ulong16 __ovld __cnfn hadd(ulong16 x, ulong16 y);
+char __ovld __cnfn hadd(char, char);
+uchar __ovld __cnfn hadd(uchar, uchar);
+char2 __ovld __cnfn hadd(char2, char2);
+uchar2 __ovld __cnfn hadd(uchar2, uchar2);
+char3 __ovld __cnfn hadd(char3, char3);
+uchar3 __ovld __cnfn hadd(uchar3, uchar3);
+char4 __ovld __cnfn hadd(char4, char4);
+uchar4 __ovld __cnfn hadd(uchar4, uchar4);
+char8 __ovld __cnfn hadd(char8, char8);
+uchar8 __ovld __cnfn hadd(uchar8, uchar8);
+char16 __ovld __cnfn hadd(char16, char16);
+uchar16 __ovld __cnfn hadd(uchar16, uchar16);
+short __ovld __cnfn hadd(short, short);
+ushort __ovld __cnfn hadd(ushort, ushort);
+short2 __ovld __cnfn hadd(short2, short2);
+ushort2 __ovld __cnfn hadd(ushort2, ushort2);
+short3 __ovld __cnfn hadd(short3, short3);
+ushort3 __ovld __cnfn hadd(ushort3, ushort3);
+short4 __ovld __cnfn hadd(short4, short4);
+ushort4 __ovld __cnfn hadd(ushort4, ushort4);
+short8 __ovld __cnfn hadd(short8, short8);
+ushort8 __ovld __cnfn hadd(ushort8, ushort8);
+short16 __ovld __cnfn hadd(short16, short16);
+ushort16 __ovld __cnfn hadd(ushort16, ushort16);
+int __ovld __cnfn hadd(int, int);
+uint __ovld __cnfn hadd(uint, uint);
+int2 __ovld __cnfn hadd(int2, int2);
+uint2 __ovld __cnfn hadd(uint2, uint2);
+int3 __ovld __cnfn hadd(int3, int3);
+uint3 __ovld __cnfn hadd(uint3, uint3);
+int4 __ovld __cnfn hadd(int4, int4);
+uint4 __ovld __cnfn hadd(uint4, uint4);
+int8 __ovld __cnfn hadd(int8, int8);
+uint8 __ovld __cnfn hadd(uint8, uint8);
+int16 __ovld __cnfn hadd(int16, int16);
+uint16 __ovld __cnfn hadd(uint16, uint16);
+long __ovld __cnfn hadd(long, long);
+ulong __ovld __cnfn hadd(ulong, ulong);
+long2 __ovld __cnfn hadd(long2, long2);
+ulong2 __ovld __cnfn hadd(ulong2, ulong2);
+long3 __ovld __cnfn hadd(long3, long3);
+ulong3 __ovld __cnfn hadd(ulong3, ulong3);
+long4 __ovld __cnfn hadd(long4, long4);
+ulong4 __ovld __cnfn hadd(ulong4, ulong4);
+long8 __ovld __cnfn hadd(long8, long8);
+ulong8 __ovld __cnfn hadd(ulong8, ulong8);
+long16 __ovld __cnfn hadd(long16, long16);
+ulong16 __ovld __cnfn hadd(ulong16, ulong16);
/**
* Returns (x + y + 1) >> 1. The intermediate sum
* does not modulo overflow.
*/
-char __ovld __cnfn rhadd(char x, char y);
-uchar __ovld __cnfn rhadd(uchar x, uchar y);
-char2 __ovld __cnfn rhadd(char2 x, char2 y);
-uchar2 __ovld __cnfn rhadd(uchar2 x, uchar2 y);
-char3 __ovld __cnfn rhadd(char3 x, char3 y);
-uchar3 __ovld __cnfn rhadd(uchar3 x, uchar3 y);
-char4 __ovld __cnfn rhadd(char4 x, char4 y);
-uchar4 __ovld __cnfn rhadd(uchar4 x, uchar4 y);
-char8 __ovld __cnfn rhadd(char8 x, char8 y);
-uchar8 __ovld __cnfn rhadd(uchar8 x, uchar8 y);
-char16 __ovld __cnfn rhadd(char16 x, char16 y);
-uchar16 __ovld __cnfn rhadd(uchar16 x, uchar16 y);
-short __ovld __cnfn rhadd(short x, short y);
-ushort __ovld __cnfn rhadd(ushort x, ushort y);
-short2 __ovld __cnfn rhadd(short2 x, short2 y);
-ushort2 __ovld __cnfn rhadd(ushort2 x, ushort2 y);
-short3 __ovld __cnfn rhadd(short3 x, short3 y);
-ushort3 __ovld __cnfn rhadd(ushort3 x, ushort3 y);
-short4 __ovld __cnfn rhadd(short4 x, short4 y);
-ushort4 __ovld __cnfn rhadd(ushort4 x, ushort4 y);
-short8 __ovld __cnfn rhadd(short8 x, short8 y);
-ushort8 __ovld __cnfn rhadd(ushort8 x, ushort8 y);
-short16 __ovld __cnfn rhadd(short16 x, short16 y);
-ushort16 __ovld __cnfn rhadd(ushort16 x, ushort16 y);
-int __ovld __cnfn rhadd(int x, int y);
-uint __ovld __cnfn rhadd(uint x, uint y);
-int2 __ovld __cnfn rhadd(int2 x, int2 y);
-uint2 __ovld __cnfn rhadd(uint2 x, uint2 y);
-int3 __ovld __cnfn rhadd(int3 x, int3 y);
-uint3 __ovld __cnfn rhadd(uint3 x, uint3 y);
-int4 __ovld __cnfn rhadd(int4 x, int4 y);
-uint4 __ovld __cnfn rhadd(uint4 x, uint4 y);
-int8 __ovld __cnfn rhadd(int8 x, int8 y);
-uint8 __ovld __cnfn rhadd(uint8 x, uint8 y);
-int16 __ovld __cnfn rhadd(int16 x, int16 y);
-uint16 __ovld __cnfn rhadd(uint16 x, uint16 y);
-long __ovld __cnfn rhadd(long x, long y);
-ulong __ovld __cnfn rhadd(ulong x, ulong y);
-long2 __ovld __cnfn rhadd(long2 x, long2 y);
-ulong2 __ovld __cnfn rhadd(ulong2 x, ulong2 y);
-long3 __ovld __cnfn rhadd(long3 x, long3 y);
-ulong3 __ovld __cnfn rhadd(ulong3 x, ulong3 y);
-long4 __ovld __cnfn rhadd(long4 x, long4 y);
-ulong4 __ovld __cnfn rhadd(ulong4 x, ulong4 y);
-long8 __ovld __cnfn rhadd(long8 x, long8 y);
-ulong8 __ovld __cnfn rhadd(ulong8 x, ulong8 y);
-long16 __ovld __cnfn rhadd(long16 x, long16 y);
-ulong16 __ovld __cnfn rhadd(ulong16 x, ulong16 y);
+char __ovld __cnfn rhadd(char, char);
+uchar __ovld __cnfn rhadd(uchar, uchar);
+char2 __ovld __cnfn rhadd(char2, char2);
+uchar2 __ovld __cnfn rhadd(uchar2, uchar2);
+char3 __ovld __cnfn rhadd(char3, char3);
+uchar3 __ovld __cnfn rhadd(uchar3, uchar3);
+char4 __ovld __cnfn rhadd(char4, char4);
+uchar4 __ovld __cnfn rhadd(uchar4, uchar4);
+char8 __ovld __cnfn rhadd(char8, char8);
+uchar8 __ovld __cnfn rhadd(uchar8, uchar8);
+char16 __ovld __cnfn rhadd(char16, char16);
+uchar16 __ovld __cnfn rhadd(uchar16, uchar16);
+short __ovld __cnfn rhadd(short, short);
+ushort __ovld __cnfn rhadd(ushort, ushort);
+short2 __ovld __cnfn rhadd(short2, short2);
+ushort2 __ovld __cnfn rhadd(ushort2, ushort2);
+short3 __ovld __cnfn rhadd(short3, short3);
+ushort3 __ovld __cnfn rhadd(ushort3, ushort3);
+short4 __ovld __cnfn rhadd(short4, short4);
+ushort4 __ovld __cnfn rhadd(ushort4, ushort4);
+short8 __ovld __cnfn rhadd(short8, short8);
+ushort8 __ovld __cnfn rhadd(ushort8, ushort8);
+short16 __ovld __cnfn rhadd(short16, short16);
+ushort16 __ovld __cnfn rhadd(ushort16, ushort16);
+int __ovld __cnfn rhadd(int, int);
+uint __ovld __cnfn rhadd(uint, uint);
+int2 __ovld __cnfn rhadd(int2, int2);
+uint2 __ovld __cnfn rhadd(uint2, uint2);
+int3 __ovld __cnfn rhadd(int3, int3);
+uint3 __ovld __cnfn rhadd(uint3, uint3);
+int4 __ovld __cnfn rhadd(int4, int4);
+uint4 __ovld __cnfn rhadd(uint4, uint4);
+int8 __ovld __cnfn rhadd(int8, int8);
+uint8 __ovld __cnfn rhadd(uint8, uint8);
+int16 __ovld __cnfn rhadd(int16, int16);
+uint16 __ovld __cnfn rhadd(uint16, uint16);
+long __ovld __cnfn rhadd(long, long);
+ulong __ovld __cnfn rhadd(ulong, ulong);
+long2 __ovld __cnfn rhadd(long2, long2);
+ulong2 __ovld __cnfn rhadd(ulong2, ulong2);
+long3 __ovld __cnfn rhadd(long3, long3);
+ulong3 __ovld __cnfn rhadd(ulong3, ulong3);
+long4 __ovld __cnfn rhadd(long4, long4);
+ulong4 __ovld __cnfn rhadd(ulong4, ulong4);
+long8 __ovld __cnfn rhadd(long8, long8);
+ulong8 __ovld __cnfn rhadd(ulong8, ulong8);
+long16 __ovld __cnfn rhadd(long16, long16);
+ulong16 __ovld __cnfn rhadd(ulong16, ulong16);
/**
* Returns min(max(x, minval), maxval).
* Results are undefined if minval > maxval.
*/
-char __ovld __cnfn clamp(char x, char minval, char maxval);
-uchar __ovld __cnfn clamp(uchar x, uchar minval, uchar maxval);
-char2 __ovld __cnfn clamp(char2 x, char2 minval, char2 maxval);
-uchar2 __ovld __cnfn clamp(uchar2 x, uchar2 minval, uchar2 maxval);
-char3 __ovld __cnfn clamp(char3 x, char3 minval, char3 maxval);
-uchar3 __ovld __cnfn clamp(uchar3 x, uchar3 minval, uchar3 maxval);
-char4 __ovld __cnfn clamp(char4 x, char4 minval, char4 maxval);
-uchar4 __ovld __cnfn clamp(uchar4 x, uchar4 minval, uchar4 maxval);
-char8 __ovld __cnfn clamp(char8 x, char8 minval, char8 maxval);
-uchar8 __ovld __cnfn clamp(uchar8 x, uchar8 minval, uchar8 maxval);
-char16 __ovld __cnfn clamp(char16 x, char16 minval, char16 maxval);
-uchar16 __ovld __cnfn clamp(uchar16 x, uchar16 minval, uchar16 maxval);
-short __ovld __cnfn clamp(short x, short minval, short maxval);
-ushort __ovld __cnfn clamp(ushort x, ushort minval, ushort maxval);
-short2 __ovld __cnfn clamp(short2 x, short2 minval, short2 maxval);
-ushort2 __ovld __cnfn clamp(ushort2 x, ushort2 minval, ushort2 maxval);
-short3 __ovld __cnfn clamp(short3 x, short3 minval, short3 maxval);
-ushort3 __ovld __cnfn clamp(ushort3 x, ushort3 minval, ushort3 maxval);
-short4 __ovld __cnfn clamp(short4 x, short4 minval, short4 maxval);
-ushort4 __ovld __cnfn clamp(ushort4 x, ushort4 minval, ushort4 maxval);
-short8 __ovld __cnfn clamp(short8 x, short8 minval, short8 maxval);
-ushort8 __ovld __cnfn clamp(ushort8 x, ushort8 minval, ushort8 maxval);
-short16 __ovld __cnfn clamp(short16 x, short16 minval, short16 maxval);
-ushort16 __ovld __cnfn clamp(ushort16 x, ushort16 minval, ushort16 maxval);
-int __ovld __cnfn clamp(int x, int minval, int maxval);
-uint __ovld __cnfn clamp(uint x, uint minval, uint maxval);
-int2 __ovld __cnfn clamp(int2 x, int2 minval, int2 maxval);
-uint2 __ovld __cnfn clamp(uint2 x, uint2 minval, uint2 maxval);
-int3 __ovld __cnfn clamp(int3 x, int3 minval, int3 maxval);
-uint3 __ovld __cnfn clamp(uint3 x, uint3 minval, uint3 maxval);
-int4 __ovld __cnfn clamp(int4 x, int4 minval, int4 maxval);
-uint4 __ovld __cnfn clamp(uint4 x, uint4 minval, uint4 maxval);
-int8 __ovld __cnfn clamp(int8 x, int8 minval, int8 maxval);
-uint8 __ovld __cnfn clamp(uint8 x, uint8 minval, uint8 maxval);
-int16 __ovld __cnfn clamp(int16 x, int16 minval, int16 maxval);
-uint16 __ovld __cnfn clamp(uint16 x, uint16 minval, uint16 maxval);
-long __ovld __cnfn clamp(long x, long minval, long maxval);
-ulong __ovld __cnfn clamp(ulong x, ulong minval, ulong maxval);
-long2 __ovld __cnfn clamp(long2 x, long2 minval, long2 maxval);
-ulong2 __ovld __cnfn clamp(ulong2 x, ulong2 minval, ulong2 maxval);
-long3 __ovld __cnfn clamp(long3 x, long3 minval, long3 maxval);
-ulong3 __ovld __cnfn clamp(ulong3 x, ulong3 minval, ulong3 maxval);
-long4 __ovld __cnfn clamp(long4 x, long4 minval, long4 maxval);
-ulong4 __ovld __cnfn clamp(ulong4 x, ulong4 minval, ulong4 maxval);
-long8 __ovld __cnfn clamp(long8 x, long8 minval, long8 maxval);
-ulong8 __ovld __cnfn clamp(ulong8 x, ulong8 minval, ulong8 maxval);
-long16 __ovld __cnfn clamp(long16 x, long16 minval, long16 maxval);
-ulong16 __ovld __cnfn clamp(ulong16 x, ulong16 minval, ulong16 maxval);
-char2 __ovld __cnfn clamp(char2 x, char minval, char maxval);
-uchar2 __ovld __cnfn clamp(uchar2 x, uchar minval, uchar maxval);
-char3 __ovld __cnfn clamp(char3 x, char minval, char maxval);
-uchar3 __ovld __cnfn clamp(uchar3 x, uchar minval, uchar maxval);
-char4 __ovld __cnfn clamp(char4 x, char minval, char maxval);
-uchar4 __ovld __cnfn clamp(uchar4 x, uchar minval, uchar maxval);
-char8 __ovld __cnfn clamp(char8 x, char minval, char maxval);
-uchar8 __ovld __cnfn clamp(uchar8 x, uchar minval, uchar maxval);
-char16 __ovld __cnfn clamp(char16 x, char minval, char maxval);
-uchar16 __ovld __cnfn clamp(uchar16 x, uchar minval, uchar maxval);
-short2 __ovld __cnfn clamp(short2 x, short minval, short maxval);
-ushort2 __ovld __cnfn clamp(ushort2 x, ushort minval, ushort maxval);
-short3 __ovld __cnfn clamp(short3 x, short minval, short maxval);
-ushort3 __ovld __cnfn clamp(ushort3 x, ushort minval, ushort maxval);
-short4 __ovld __cnfn clamp(short4 x, short minval, short maxval);
-ushort4 __ovld __cnfn clamp(ushort4 x, ushort minval, ushort maxval);
-short8 __ovld __cnfn clamp(short8 x, short minval, short maxval);
-ushort8 __ovld __cnfn clamp(ushort8 x, ushort minval, ushort maxval);
-short16 __ovld __cnfn clamp(short16 x, short minval, short maxval);
-ushort16 __ovld __cnfn clamp(ushort16 x, ushort minval, ushort maxval);
-int2 __ovld __cnfn clamp(int2 x, int minval, int maxval);
-uint2 __ovld __cnfn clamp(uint2 x, uint minval, uint maxval);
-int3 __ovld __cnfn clamp(int3 x, int minval, int maxval);
-uint3 __ovld __cnfn clamp(uint3 x, uint minval, uint maxval);
-int4 __ovld __cnfn clamp(int4 x, int minval, int maxval);
-uint4 __ovld __cnfn clamp(uint4 x, uint minval, uint maxval);
-int8 __ovld __cnfn clamp(int8 x, int minval, int maxval);
-uint8 __ovld __cnfn clamp(uint8 x, uint minval, uint maxval);
-int16 __ovld __cnfn clamp(int16 x, int minval, int maxval);
-uint16 __ovld __cnfn clamp(uint16 x, uint minval, uint maxval);
-long2 __ovld __cnfn clamp(long2 x, long minval, long maxval);
-ulong2 __ovld __cnfn clamp(ulong2 x, ulong minval, ulong maxval);
-long3 __ovld __cnfn clamp(long3 x, long minval, long maxval);
-ulong3 __ovld __cnfn clamp(ulong3 x, ulong minval, ulong maxval);
-long4 __ovld __cnfn clamp(long4 x, long minval, long maxval);
-ulong4 __ovld __cnfn clamp(ulong4 x, ulong minval, ulong maxval);
-long8 __ovld __cnfn clamp(long8 x, long minval, long maxval);
-ulong8 __ovld __cnfn clamp(ulong8 x, ulong minval, ulong maxval);
-long16 __ovld __cnfn clamp(long16 x, long minval, long maxval);
-ulong16 __ovld __cnfn clamp(ulong16 x, ulong minval, ulong maxval);
+char __ovld __cnfn clamp(char, char, char);
+uchar __ovld __cnfn clamp(uchar, uchar, uchar);
+char2 __ovld __cnfn clamp(char2, char2, char2);
+uchar2 __ovld __cnfn clamp(uchar2, uchar2, uchar2);
+char3 __ovld __cnfn clamp(char3, char3, char3);
+uchar3 __ovld __cnfn clamp(uchar3, uchar3, uchar3);
+char4 __ovld __cnfn clamp(char4, char4, char4);
+uchar4 __ovld __cnfn clamp(uchar4, uchar4, uchar4);
+char8 __ovld __cnfn clamp(char8, char8, char8);
+uchar8 __ovld __cnfn clamp(uchar8, uchar8, uchar8);
+char16 __ovld __cnfn clamp(char16, char16, char16);
+uchar16 __ovld __cnfn clamp(uchar16, uchar16, uchar16);
+short __ovld __cnfn clamp(short, short, short);
+ushort __ovld __cnfn clamp(ushort, ushort, ushort);
+short2 __ovld __cnfn clamp(short2, short2, short2);
+ushort2 __ovld __cnfn clamp(ushort2, ushort2, ushort2);
+short3 __ovld __cnfn clamp(short3, short3, short3);
+ushort3 __ovld __cnfn clamp(ushort3, ushort3, ushort3);
+short4 __ovld __cnfn clamp(short4, short4, short4);
+ushort4 __ovld __cnfn clamp(ushort4, ushort4, ushort4);
+short8 __ovld __cnfn clamp(short8, short8, short8);
+ushort8 __ovld __cnfn clamp(ushort8, ushort8, ushort8);
+short16 __ovld __cnfn clamp(short16, short16, short16);
+ushort16 __ovld __cnfn clamp(ushort16, ushort16, ushort16);
+int __ovld __cnfn clamp(int, int, int);
+uint __ovld __cnfn clamp(uint, uint, uint);
+int2 __ovld __cnfn clamp(int2, int2, int2);
+uint2 __ovld __cnfn clamp(uint2, uint2, uint2);
+int3 __ovld __cnfn clamp(int3, int3, int3);
+uint3 __ovld __cnfn clamp(uint3, uint3, uint3);
+int4 __ovld __cnfn clamp(int4, int4, int4);
+uint4 __ovld __cnfn clamp(uint4, uint4, uint4);
+int8 __ovld __cnfn clamp(int8, int8, int8);
+uint8 __ovld __cnfn clamp(uint8, uint8, uint8);
+int16 __ovld __cnfn clamp(int16, int16, int16);
+uint16 __ovld __cnfn clamp(uint16, uint16, uint16);
+long __ovld __cnfn clamp(long, long, long);
+ulong __ovld __cnfn clamp(ulong, ulong, ulong);
+long2 __ovld __cnfn clamp(long2, long2, long2);
+ulong2 __ovld __cnfn clamp(ulong2, ulong2, ulong2);
+long3 __ovld __cnfn clamp(long3, long3, long3);
+ulong3 __ovld __cnfn clamp(ulong3, ulong3, ulong3);
+long4 __ovld __cnfn clamp(long4, long4, long4);
+ulong4 __ovld __cnfn clamp(ulong4, ulong4, ulong4);
+long8 __ovld __cnfn clamp(long8, long8, long8);
+ulong8 __ovld __cnfn clamp(ulong8, ulong8, ulong8);
+long16 __ovld __cnfn clamp(long16, long16, long16);
+ulong16 __ovld __cnfn clamp(ulong16, ulong16, ulong16);
+char2 __ovld __cnfn clamp(char2, char, char);
+uchar2 __ovld __cnfn clamp(uchar2, uchar, uchar);
+char3 __ovld __cnfn clamp(char3, char, char);
+uchar3 __ovld __cnfn clamp(uchar3, uchar, uchar);
+char4 __ovld __cnfn clamp(char4, char, char);
+uchar4 __ovld __cnfn clamp(uchar4, uchar, uchar);
+char8 __ovld __cnfn clamp(char8, char, char);
+uchar8 __ovld __cnfn clamp(uchar8, uchar, uchar);
+char16 __ovld __cnfn clamp(char16, char, char);
+uchar16 __ovld __cnfn clamp(uchar16, uchar, uchar);
+short2 __ovld __cnfn clamp(short2, short, short);
+ushort2 __ovld __cnfn clamp(ushort2, ushort, ushort);
+short3 __ovld __cnfn clamp(short3, short, short);
+ushort3 __ovld __cnfn clamp(ushort3, ushort, ushort);
+short4 __ovld __cnfn clamp(short4, short, short);
+ushort4 __ovld __cnfn clamp(ushort4, ushort, ushort);
+short8 __ovld __cnfn clamp(short8, short, short);
+ushort8 __ovld __cnfn clamp(ushort8, ushort, ushort);
+short16 __ovld __cnfn clamp(short16, short, short);
+ushort16 __ovld __cnfn clamp(ushort16, ushort, ushort);
+int2 __ovld __cnfn clamp(int2, int, int);
+uint2 __ovld __cnfn clamp(uint2, uint, uint);
+int3 __ovld __cnfn clamp(int3, int, int);
+uint3 __ovld __cnfn clamp(uint3, uint, uint);
+int4 __ovld __cnfn clamp(int4, int, int);
+uint4 __ovld __cnfn clamp(uint4, uint, uint);
+int8 __ovld __cnfn clamp(int8, int, int);
+uint8 __ovld __cnfn clamp(uint8, uint, uint);
+int16 __ovld __cnfn clamp(int16, int, int);
+uint16 __ovld __cnfn clamp(uint16, uint, uint);
+long2 __ovld __cnfn clamp(long2, long, long);
+ulong2 __ovld __cnfn clamp(ulong2, ulong, ulong);
+long3 __ovld __cnfn clamp(long3, long, long);
+ulong3 __ovld __cnfn clamp(ulong3, ulong, ulong);
+long4 __ovld __cnfn clamp(long4, long, long);
+ulong4 __ovld __cnfn clamp(ulong4, ulong, ulong);
+long8 __ovld __cnfn clamp(long8, long, long);
+ulong8 __ovld __cnfn clamp(ulong8, ulong, ulong);
+long16 __ovld __cnfn clamp(long16, long, long);
+ulong16 __ovld __cnfn clamp(ulong16, ulong, ulong);
/**
* Returns the number of leading 0-bits in x, starting
* at the most significant bit position.
*/
-char __ovld __cnfn clz(char x);
-uchar __ovld __cnfn clz(uchar x);
-char2 __ovld __cnfn clz(char2 x);
-uchar2 __ovld __cnfn clz(uchar2 x);
-char3 __ovld __cnfn clz(char3 x);
-uchar3 __ovld __cnfn clz(uchar3 x);
-char4 __ovld __cnfn clz(char4 x);
-uchar4 __ovld __cnfn clz(uchar4 x);
-char8 __ovld __cnfn clz(char8 x);
-uchar8 __ovld __cnfn clz(uchar8 x);
-char16 __ovld __cnfn clz(char16 x);
-uchar16 __ovld __cnfn clz(uchar16 x);
-short __ovld __cnfn clz(short x);
-ushort __ovld __cnfn clz(ushort x);
-short2 __ovld __cnfn clz(short2 x);
-ushort2 __ovld __cnfn clz(ushort2 x);
-short3 __ovld __cnfn clz(short3 x);
-ushort3 __ovld __cnfn clz(ushort3 x);
-short4 __ovld __cnfn clz(short4 x);
-ushort4 __ovld __cnfn clz(ushort4 x);
-short8 __ovld __cnfn clz(short8 x);
-ushort8 __ovld __cnfn clz(ushort8 x);
-short16 __ovld __cnfn clz(short16 x);
-ushort16 __ovld __cnfn clz(ushort16 x);
-int __ovld __cnfn clz(int x);
-uint __ovld __cnfn clz(uint x);
-int2 __ovld __cnfn clz(int2 x);
-uint2 __ovld __cnfn clz(uint2 x);
-int3 __ovld __cnfn clz(int3 x);
-uint3 __ovld __cnfn clz(uint3 x);
-int4 __ovld __cnfn clz(int4 x);
-uint4 __ovld __cnfn clz(uint4 x);
-int8 __ovld __cnfn clz(int8 x);
-uint8 __ovld __cnfn clz(uint8 x);
-int16 __ovld __cnfn clz(int16 x);
-uint16 __ovld __cnfn clz(uint16 x);
-long __ovld __cnfn clz(long x);
-ulong __ovld __cnfn clz(ulong x);
-long2 __ovld __cnfn clz(long2 x);
-ulong2 __ovld __cnfn clz(ulong2 x);
-long3 __ovld __cnfn clz(long3 x);
-ulong3 __ovld __cnfn clz(ulong3 x);
-long4 __ovld __cnfn clz(long4 x);
-ulong4 __ovld __cnfn clz(ulong4 x);
-long8 __ovld __cnfn clz(long8 x);
-ulong8 __ovld __cnfn clz(ulong8 x);
-long16 __ovld __cnfn clz(long16 x);
-ulong16 __ovld __cnfn clz(ulong16 x);
+char __ovld __cnfn clz(char);
+uchar __ovld __cnfn clz(uchar);
+char2 __ovld __cnfn clz(char2);
+uchar2 __ovld __cnfn clz(uchar2);
+char3 __ovld __cnfn clz(char3);
+uchar3 __ovld __cnfn clz(uchar3);
+char4 __ovld __cnfn clz(char4);
+uchar4 __ovld __cnfn clz(uchar4);
+char8 __ovld __cnfn clz(char8);
+uchar8 __ovld __cnfn clz(uchar8);
+char16 __ovld __cnfn clz(char16);
+uchar16 __ovld __cnfn clz(uchar16);
+short __ovld __cnfn clz(short);
+ushort __ovld __cnfn clz(ushort);
+short2 __ovld __cnfn clz(short2);
+ushort2 __ovld __cnfn clz(ushort2);
+short3 __ovld __cnfn clz(short3);
+ushort3 __ovld __cnfn clz(ushort3);
+short4 __ovld __cnfn clz(short4);
+ushort4 __ovld __cnfn clz(ushort4);
+short8 __ovld __cnfn clz(short8);
+ushort8 __ovld __cnfn clz(ushort8);
+short16 __ovld __cnfn clz(short16);
+ushort16 __ovld __cnfn clz(ushort16);
+int __ovld __cnfn clz(int);
+uint __ovld __cnfn clz(uint);
+int2 __ovld __cnfn clz(int2);
+uint2 __ovld __cnfn clz(uint2);
+int3 __ovld __cnfn clz(int3);
+uint3 __ovld __cnfn clz(uint3);
+int4 __ovld __cnfn clz(int4);
+uint4 __ovld __cnfn clz(uint4);
+int8 __ovld __cnfn clz(int8);
+uint8 __ovld __cnfn clz(uint8);
+int16 __ovld __cnfn clz(int16);
+uint16 __ovld __cnfn clz(uint16);
+long __ovld __cnfn clz(long);
+ulong __ovld __cnfn clz(ulong);
+long2 __ovld __cnfn clz(long2);
+ulong2 __ovld __cnfn clz(ulong2);
+long3 __ovld __cnfn clz(long3);
+ulong3 __ovld __cnfn clz(ulong3);
+long4 __ovld __cnfn clz(long4);
+ulong4 __ovld __cnfn clz(ulong4);
+long8 __ovld __cnfn clz(long8);
+ulong8 __ovld __cnfn clz(ulong8);
+long16 __ovld __cnfn clz(long16);
+ulong16 __ovld __cnfn clz(ulong16);
/**
* Returns the count of trailing 0-bits in x. If x is 0,
@@ -9368,396 +9368,396 @@ ulong16 __ovld __cnfn clz(ulong16 x);
* component type of x, if x is a vector.
*/
#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-char __ovld __cnfn ctz(char x);
-uchar __ovld __cnfn ctz(uchar x);
-char2 __ovld __cnfn ctz(char2 x);
-uchar2 __ovld __cnfn ctz(uchar2 x);
-char3 __ovld __cnfn ctz(char3 x);
-uchar3 __ovld __cnfn ctz(uchar3 x);
-char4 __ovld __cnfn ctz(char4 x);
-uchar4 __ovld __cnfn ctz(uchar4 x);
-char8 __ovld __cnfn ctz(char8 x);
-uchar8 __ovld __cnfn ctz(uchar8 x);
-char16 __ovld __cnfn ctz(char16 x);
-uchar16 __ovld __cnfn ctz(uchar16 x);
-short __ovld __cnfn ctz(short x);
-ushort __ovld __cnfn ctz(ushort x);
-short2 __ovld __cnfn ctz(short2 x);
-ushort2 __ovld __cnfn ctz(ushort2 x);
-short3 __ovld __cnfn ctz(short3 x);
-ushort3 __ovld __cnfn ctz(ushort3 x);
-short4 __ovld __cnfn ctz(short4 x);
-ushort4 __ovld __cnfn ctz(ushort4 x);
-short8 __ovld __cnfn ctz(short8 x);
-ushort8 __ovld __cnfn ctz(ushort8 x);
-short16 __ovld __cnfn ctz(short16 x);
-ushort16 __ovld __cnfn ctz(ushort16 x);
-int __ovld __cnfn ctz(int x);
-uint __ovld __cnfn ctz(uint x);
-int2 __ovld __cnfn ctz(int2 x);
-uint2 __ovld __cnfn ctz(uint2 x);
-int3 __ovld __cnfn ctz(int3 x);
-uint3 __ovld __cnfn ctz(uint3 x);
-int4 __ovld __cnfn ctz(int4 x);
-uint4 __ovld __cnfn ctz(uint4 x);
-int8 __ovld __cnfn ctz(int8 x);
-uint8 __ovld __cnfn ctz(uint8 x);
-int16 __ovld __cnfn ctz(int16 x);
-uint16 __ovld __cnfn ctz(uint16 x);
-long __ovld __cnfn ctz(long x);
-ulong __ovld __cnfn ctz(ulong x);
-long2 __ovld __cnfn ctz(long2 x);
-ulong2 __ovld __cnfn ctz(ulong2 x);
-long3 __ovld __cnfn ctz(long3 x);
-ulong3 __ovld __cnfn ctz(ulong3 x);
-long4 __ovld __cnfn ctz(long4 x);
-ulong4 __ovld __cnfn ctz(ulong4 x);
-long8 __ovld __cnfn ctz(long8 x);
-ulong8 __ovld __cnfn ctz(ulong8 x);
-long16 __ovld __cnfn ctz(long16 x);
-ulong16 __ovld __cnfn ctz(ulong16 x);
+char __ovld __cnfn ctz(char);
+uchar __ovld __cnfn ctz(uchar);
+char2 __ovld __cnfn ctz(char2);
+uchar2 __ovld __cnfn ctz(uchar2);
+char3 __ovld __cnfn ctz(char3);
+uchar3 __ovld __cnfn ctz(uchar3);
+char4 __ovld __cnfn ctz(char4);
+uchar4 __ovld __cnfn ctz(uchar4);
+char8 __ovld __cnfn ctz(char8);
+uchar8 __ovld __cnfn ctz(uchar8);
+char16 __ovld __cnfn ctz(char16);
+uchar16 __ovld __cnfn ctz(uchar16);
+short __ovld __cnfn ctz(short);
+ushort __ovld __cnfn ctz(ushort);
+short2 __ovld __cnfn ctz(short2);
+ushort2 __ovld __cnfn ctz(ushort2);
+short3 __ovld __cnfn ctz(short3);
+ushort3 __ovld __cnfn ctz(ushort3);
+short4 __ovld __cnfn ctz(short4);
+ushort4 __ovld __cnfn ctz(ushort4);
+short8 __ovld __cnfn ctz(short8);
+ushort8 __ovld __cnfn ctz(ushort8);
+short16 __ovld __cnfn ctz(short16);
+ushort16 __ovld __cnfn ctz(ushort16);
+int __ovld __cnfn ctz(int);
+uint __ovld __cnfn ctz(uint);
+int2 __ovld __cnfn ctz(int2);
+uint2 __ovld __cnfn ctz(uint2);
+int3 __ovld __cnfn ctz(int3);
+uint3 __ovld __cnfn ctz(uint3);
+int4 __ovld __cnfn ctz(int4);
+uint4 __ovld __cnfn ctz(uint4);
+int8 __ovld __cnfn ctz(int8);
+uint8 __ovld __cnfn ctz(uint8);
+int16 __ovld __cnfn ctz(int16);
+uint16 __ovld __cnfn ctz(uint16);
+long __ovld __cnfn ctz(long);
+ulong __ovld __cnfn ctz(ulong);
+long2 __ovld __cnfn ctz(long2);
+ulong2 __ovld __cnfn ctz(ulong2);
+long3 __ovld __cnfn ctz(long3);
+ulong3 __ovld __cnfn ctz(ulong3);
+long4 __ovld __cnfn ctz(long4);
+ulong4 __ovld __cnfn ctz(ulong4);
+long8 __ovld __cnfn ctz(long8);
+ulong8 __ovld __cnfn ctz(ulong8);
+long16 __ovld __cnfn ctz(long16);
+ulong16 __ovld __cnfn ctz(ulong16);
#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
/**
* Returns mul_hi(a, b) + c.
*/
-char __ovld __cnfn mad_hi(char a, char b, char c);
-uchar __ovld __cnfn mad_hi(uchar a, uchar b, uchar c);
-char2 __ovld __cnfn mad_hi(char2 a, char2 b, char2 c);
-uchar2 __ovld __cnfn mad_hi(uchar2 a, uchar2 b, uchar2 c);
-char3 __ovld __cnfn mad_hi(char3 a, char3 b, char3 c);
-uchar3 __ovld __cnfn mad_hi(uchar3 a, uchar3 b, uchar3 c);
-char4 __ovld __cnfn mad_hi(char4 a, char4 b, char4 c);
-uchar4 __ovld __cnfn mad_hi(uchar4 a, uchar4 b, uchar4 c);
-char8 __ovld __cnfn mad_hi(char8 a, char8 b, char8 c);
-uchar8 __ovld __cnfn mad_hi(uchar8 a, uchar8 b, uchar8 c);
-char16 __ovld __cnfn mad_hi(char16 a, char16 b, char16 c);
-uchar16 __ovld __cnfn mad_hi(uchar16 a, uchar16 b, uchar16 c);
-short __ovld __cnfn mad_hi(short a, short b, short c);
-ushort __ovld __cnfn mad_hi(ushort a, ushort b, ushort c);
-short2 __ovld __cnfn mad_hi(short2 a, short2 b, short2 c);
-ushort2 __ovld __cnfn mad_hi(ushort2 a, ushort2 b, ushort2 c);
-short3 __ovld __cnfn mad_hi(short3 a, short3 b, short3 c);
-ushort3 __ovld __cnfn mad_hi(ushort3 a, ushort3 b, ushort3 c);
-short4 __ovld __cnfn mad_hi(short4 a, short4 b, short4 c);
-ushort4 __ovld __cnfn mad_hi(ushort4 a, ushort4 b, ushort4 c);
-short8 __ovld __cnfn mad_hi(short8 a, short8 b, short8 c);
-ushort8 __ovld __cnfn mad_hi(ushort8 a, ushort8 b, ushort8 c);
-short16 __ovld __cnfn mad_hi(short16 a, short16 b, short16 c);
-ushort16 __ovld __cnfn mad_hi(ushort16 a, ushort16 b, ushort16 c);
-int __ovld __cnfn mad_hi(int a, int b, int c);
-uint __ovld __cnfn mad_hi(uint a, uint b, uint c);
-int2 __ovld __cnfn mad_hi(int2 a, int2 b, int2 c);
-uint2 __ovld __cnfn mad_hi(uint2 a, uint2 b, uint2 c);
-int3 __ovld __cnfn mad_hi(int3 a, int3 b, int3 c);
-uint3 __ovld __cnfn mad_hi(uint3 a, uint3 b, uint3 c);
-int4 __ovld __cnfn mad_hi(int4 a, int4 b, int4 c);
-uint4 __ovld __cnfn mad_hi(uint4 a, uint4 b, uint4 c);
-int8 __ovld __cnfn mad_hi(int8 a, int8 b, int8 c);
-uint8 __ovld __cnfn mad_hi(uint8 a, uint8 b, uint8 c);
-int16 __ovld __cnfn mad_hi(int16 a, int16 b, int16 c);
-uint16 __ovld __cnfn mad_hi(uint16 a, uint16 b, uint16 c);
-long __ovld __cnfn mad_hi(long a, long b, long c);
-ulong __ovld __cnfn mad_hi(ulong a, ulong b, ulong c);
-long2 __ovld __cnfn mad_hi(long2 a, long2 b, long2 c);
-ulong2 __ovld __cnfn mad_hi(ulong2 a, ulong2 b, ulong2 c);
-long3 __ovld __cnfn mad_hi(long3 a, long3 b, long3 c);
-ulong3 __ovld __cnfn mad_hi(ulong3 a, ulong3 b, ulong3 c);
-long4 __ovld __cnfn mad_hi(long4 a, long4 b, long4 c);
-ulong4 __ovld __cnfn mad_hi(ulong4 a, ulong4 b, ulong4 c);
-long8 __ovld __cnfn mad_hi(long8 a, long8 b, long8 c);
-ulong8 __ovld __cnfn mad_hi(ulong8 a, ulong8 b, ulong8 c);
-long16 __ovld __cnfn mad_hi(long16 a, long16 b, long16 c);
-ulong16 __ovld __cnfn mad_hi(ulong16 a, ulong16 b, ulong16 c);
+char __ovld __cnfn mad_hi(char, char, char);
+uchar __ovld __cnfn mad_hi(uchar, uchar, uchar);
+char2 __ovld __cnfn mad_hi(char2, char2, char2);
+uchar2 __ovld __cnfn mad_hi(uchar2, uchar2, uchar2);
+char3 __ovld __cnfn mad_hi(char3, char3, char3);
+uchar3 __ovld __cnfn mad_hi(uchar3, uchar3, uchar3);
+char4 __ovld __cnfn mad_hi(char4, char4, char4);
+uchar4 __ovld __cnfn mad_hi(uchar4, uchar4, uchar4);
+char8 __ovld __cnfn mad_hi(char8, char8, char8);
+uchar8 __ovld __cnfn mad_hi(uchar8, uchar8, uchar8);
+char16 __ovld __cnfn mad_hi(char16, char16, char16);
+uchar16 __ovld __cnfn mad_hi(uchar16, uchar16, uchar16);
+short __ovld __cnfn mad_hi(short, short, short);
+ushort __ovld __cnfn mad_hi(ushort, ushort, ushort);
+short2 __ovld __cnfn mad_hi(short2, short2, short2);
+ushort2 __ovld __cnfn mad_hi(ushort2, ushort2, ushort2);
+short3 __ovld __cnfn mad_hi(short3, short3, short3);
+ushort3 __ovld __cnfn mad_hi(ushort3, ushort3, ushort3);
+short4 __ovld __cnfn mad_hi(short4, short4, short4);
+ushort4 __ovld __cnfn mad_hi(ushort4, ushort4, ushort4);
+short8 __ovld __cnfn mad_hi(short8, short8, short8);
+ushort8 __ovld __cnfn mad_hi(ushort8, ushort8, ushort8);
+short16 __ovld __cnfn mad_hi(short16, short16, short16);
+ushort16 __ovld __cnfn mad_hi(ushort16, ushort16, ushort16);
+int __ovld __cnfn mad_hi(int, int, int);
+uint __ovld __cnfn mad_hi(uint, uint, uint);
+int2 __ovld __cnfn mad_hi(int2, int2, int2);
+uint2 __ovld __cnfn mad_hi(uint2, uint2, uint2);
+int3 __ovld __cnfn mad_hi(int3, int3, int3);
+uint3 __ovld __cnfn mad_hi(uint3, uint3, uint3);
+int4 __ovld __cnfn mad_hi(int4, int4, int4);
+uint4 __ovld __cnfn mad_hi(uint4, uint4, uint4);
+int8 __ovld __cnfn mad_hi(int8, int8, int8);
+uint8 __ovld __cnfn mad_hi(uint8, uint8, uint8);
+int16 __ovld __cnfn mad_hi(int16, int16, int16);
+uint16 __ovld __cnfn mad_hi(uint16, uint16, uint16);
+long __ovld __cnfn mad_hi(long, long, long);
+ulong __ovld __cnfn mad_hi(ulong, ulong, ulong);
+long2 __ovld __cnfn mad_hi(long2, long2, long2);
+ulong2 __ovld __cnfn mad_hi(ulong2, ulong2, ulong2);
+long3 __ovld __cnfn mad_hi(long3, long3, long3);
+ulong3 __ovld __cnfn mad_hi(ulong3, ulong3, ulong3);
+long4 __ovld __cnfn mad_hi(long4, long4, long4);
+ulong4 __ovld __cnfn mad_hi(ulong4, ulong4, ulong4);
+long8 __ovld __cnfn mad_hi(long8, long8, long8);
+ulong8 __ovld __cnfn mad_hi(ulong8, ulong8, ulong8);
+long16 __ovld __cnfn mad_hi(long16, long16, long16);
+ulong16 __ovld __cnfn mad_hi(ulong16, ulong16, ulong16);
/**
* Returns a * b + c and saturates the result.
*/
-char __ovld __cnfn mad_sat(char a, char b, char c);
-uchar __ovld __cnfn mad_sat(uchar a, uchar b, uchar c);
-char2 __ovld __cnfn mad_sat(char2 a, char2 b, char2 c);
-uchar2 __ovld __cnfn mad_sat(uchar2 a, uchar2 b, uchar2 c);
-char3 __ovld __cnfn mad_sat(char3 a, char3 b, char3 c);
-uchar3 __ovld __cnfn mad_sat(uchar3 a, uchar3 b, uchar3 c);
-char4 __ovld __cnfn mad_sat(char4 a, char4 b, char4 c);
-uchar4 __ovld __cnfn mad_sat(uchar4 a, uchar4 b, uchar4 c);
-char8 __ovld __cnfn mad_sat(char8 a, char8 b, char8 c);
-uchar8 __ovld __cnfn mad_sat(uchar8 a, uchar8 b, uchar8 c);
-char16 __ovld __cnfn mad_sat(char16 a, char16 b, char16 c);
-uchar16 __ovld __cnfn mad_sat(uchar16 a, uchar16 b, uchar16 c);
-short __ovld __cnfn mad_sat(short a, short b, short c);
-ushort __ovld __cnfn mad_sat(ushort a, ushort b, ushort c);
-short2 __ovld __cnfn mad_sat(short2 a, short2 b, short2 c);
-ushort2 __ovld __cnfn mad_sat(ushort2 a, ushort2 b, ushort2 c);
-short3 __ovld __cnfn mad_sat(short3 a, short3 b, short3 c);
-ushort3 __ovld __cnfn mad_sat(ushort3 a, ushort3 b, ushort3 c);
-short4 __ovld __cnfn mad_sat(short4 a, short4 b, short4 c);
-ushort4 __ovld __cnfn mad_sat(ushort4 a, ushort4 b, ushort4 c);
-short8 __ovld __cnfn mad_sat(short8 a, short8 b, short8 c);
-ushort8 __ovld __cnfn mad_sat(ushort8 a, ushort8 b, ushort8 c);
-short16 __ovld __cnfn mad_sat(short16 a, short16 b, short16 c);
-ushort16 __ovld __cnfn mad_sat(ushort16 a, ushort16 b, ushort16 c);
-int __ovld __cnfn mad_sat(int a, int b, int c);
-uint __ovld __cnfn mad_sat(uint a, uint b, uint c);
-int2 __ovld __cnfn mad_sat(int2 a, int2 b, int2 c);
-uint2 __ovld __cnfn mad_sat(uint2 a, uint2 b, uint2 c);
-int3 __ovld __cnfn mad_sat(int3 a, int3 b, int3 c);
-uint3 __ovld __cnfn mad_sat(uint3 a, uint3 b, uint3 c);
-int4 __ovld __cnfn mad_sat(int4 a, int4 b, int4 c);
-uint4 __ovld __cnfn mad_sat(uint4 a, uint4 b, uint4 c);
-int8 __ovld __cnfn mad_sat(int8 a, int8 b, int8 c);
-uint8 __ovld __cnfn mad_sat(uint8 a, uint8 b, uint8 c);
-int16 __ovld __cnfn mad_sat(int16 a, int16 b, int16 c);
-uint16 __ovld __cnfn mad_sat(uint16 a, uint16 b, uint16 c);
-long __ovld __cnfn mad_sat(long a, long b, long c);
-ulong __ovld __cnfn mad_sat(ulong a, ulong b, ulong c);
-long2 __ovld __cnfn mad_sat(long2 a, long2 b, long2 c);
-ulong2 __ovld __cnfn mad_sat(ulong2 a, ulong2 b, ulong2 c);
-long3 __ovld __cnfn mad_sat(long3 a, long3 b, long3 c);
-ulong3 __ovld __cnfn mad_sat(ulong3 a, ulong3 b, ulong3 c);
-long4 __ovld __cnfn mad_sat(long4 a, long4 b, long4 c);
-ulong4 __ovld __cnfn mad_sat(ulong4 a, ulong4 b, ulong4 c);
-long8 __ovld __cnfn mad_sat(long8 a, long8 b, long8 c);
-ulong8 __ovld __cnfn mad_sat(ulong8 a, ulong8 b, ulong8 c);
-long16 __ovld __cnfn mad_sat(long16 a, long16 b, long16 c);
-ulong16 __ovld __cnfn mad_sat(ulong16 a, ulong16 b, ulong16 c);
+char __ovld __cnfn mad_sat(char, char, char);
+uchar __ovld __cnfn mad_sat(uchar, uchar, uchar);
+char2 __ovld __cnfn mad_sat(char2, char2, char2);
+uchar2 __ovld __cnfn mad_sat(uchar2, uchar2, uchar2);
+char3 __ovld __cnfn mad_sat(char3, char3, char3);
+uchar3 __ovld __cnfn mad_sat(uchar3, uchar3, uchar3);
+char4 __ovld __cnfn mad_sat(char4, char4, char4);
+uchar4 __ovld __cnfn mad_sat(uchar4, uchar4, uchar4);
+char8 __ovld __cnfn mad_sat(char8, char8, char8);
+uchar8 __ovld __cnfn mad_sat(uchar8, uchar8, uchar8);
+char16 __ovld __cnfn mad_sat(char16, char16, char16);
+uchar16 __ovld __cnfn mad_sat(uchar16, uchar16, uchar16);
+short __ovld __cnfn mad_sat(short, short, short);
+ushort __ovld __cnfn mad_sat(ushort, ushort, ushort);
+short2 __ovld __cnfn mad_sat(short2, short2, short2);
+ushort2 __ovld __cnfn mad_sat(ushort2, ushort2, ushort2);
+short3 __ovld __cnfn mad_sat(short3, short3, short3);
+ushort3 __ovld __cnfn mad_sat(ushort3, ushort3, ushort3);
+short4 __ovld __cnfn mad_sat(short4, short4, short4);
+ushort4 __ovld __cnfn mad_sat(ushort4, ushort4, ushort4);
+short8 __ovld __cnfn mad_sat(short8, short8, short8);
+ushort8 __ovld __cnfn mad_sat(ushort8, ushort8, ushort8);
+short16 __ovld __cnfn mad_sat(short16, short16, short16);
+ushort16 __ovld __cnfn mad_sat(ushort16, ushort16, ushort16);
+int __ovld __cnfn mad_sat(int, int, int);
+uint __ovld __cnfn mad_sat(uint, uint, uint);
+int2 __ovld __cnfn mad_sat(int2, int2, int2);
+uint2 __ovld __cnfn mad_sat(uint2, uint2, uint2);
+int3 __ovld __cnfn mad_sat(int3, int3, int3);
+uint3 __ovld __cnfn mad_sat(uint3, uint3, uint3);
+int4 __ovld __cnfn mad_sat(int4, int4, int4);
+uint4 __ovld __cnfn mad_sat(uint4, uint4, uint4);
+int8 __ovld __cnfn mad_sat(int8, int8, int8);
+uint8 __ovld __cnfn mad_sat(uint8, uint8, uint8);
+int16 __ovld __cnfn mad_sat(int16, int16, int16);
+uint16 __ovld __cnfn mad_sat(uint16, uint16, uint16);
+long __ovld __cnfn mad_sat(long, long, long);
+ulong __ovld __cnfn mad_sat(ulong, ulong, ulong);
+long2 __ovld __cnfn mad_sat(long2, long2, long2);
+ulong2 __ovld __cnfn mad_sat(ulong2, ulong2, ulong2);
+long3 __ovld __cnfn mad_sat(long3, long3, long3);
+ulong3 __ovld __cnfn mad_sat(ulong3, ulong3, ulong3);
+long4 __ovld __cnfn mad_sat(long4, long4, long4);
+ulong4 __ovld __cnfn mad_sat(ulong4, ulong4, ulong4);
+long8 __ovld __cnfn mad_sat(long8, long8, long8);
+ulong8 __ovld __cnfn mad_sat(ulong8, ulong8, ulong8);
+long16 __ovld __cnfn mad_sat(long16, long16, long16);
+ulong16 __ovld __cnfn mad_sat(ulong16, ulong16, ulong16);
/**
* Returns y if x < y, otherwise it returns x.
*/
-char __ovld __cnfn max(char x, char y);
-uchar __ovld __cnfn max(uchar x, uchar y);
-char2 __ovld __cnfn max(char2 x, char2 y);
-uchar2 __ovld __cnfn max(uchar2 x, uchar2 y);
-char3 __ovld __cnfn max(char3 x, char3 y);
-uchar3 __ovld __cnfn max(uchar3 x, uchar3 y);
-char4 __ovld __cnfn max(char4 x, char4 y);
-uchar4 __ovld __cnfn max(uchar4 x, uchar4 y);
-char8 __ovld __cnfn max(char8 x, char8 y);
-uchar8 __ovld __cnfn max(uchar8 x, uchar8 y);
-char16 __ovld __cnfn max(char16 x, char16 y);
-uchar16 __ovld __cnfn max(uchar16 x, uchar16 y);
-short __ovld __cnfn max(short x, short y);
-ushort __ovld __cnfn max(ushort x, ushort y);
-short2 __ovld __cnfn max(short2 x, short2 y);
-ushort2 __ovld __cnfn max(ushort2 x, ushort2 y);
-short3 __ovld __cnfn max(short3 x, short3 y);
-ushort3 __ovld __cnfn max(ushort3 x, ushort3 y);
-short4 __ovld __cnfn max(short4 x, short4 y);
-ushort4 __ovld __cnfn max(ushort4 x, ushort4 y);
-short8 __ovld __cnfn max(short8 x, short8 y);
-ushort8 __ovld __cnfn max(ushort8 x, ushort8 y);
-short16 __ovld __cnfn max(short16 x, short16 y);
-ushort16 __ovld __cnfn max(ushort16 x, ushort16 y);
-int __ovld __cnfn max(int x, int y);
-uint __ovld __cnfn max(uint x, uint y);
-int2 __ovld __cnfn max(int2 x, int2 y);
-uint2 __ovld __cnfn max(uint2 x, uint2 y);
-int3 __ovld __cnfn max(int3 x, int3 y);
-uint3 __ovld __cnfn max(uint3 x, uint3 y);
-int4 __ovld __cnfn max(int4 x, int4 y);
-uint4 __ovld __cnfn max(uint4 x, uint4 y);
-int8 __ovld __cnfn max(int8 x, int8 y);
-uint8 __ovld __cnfn max(uint8 x, uint8 y);
-int16 __ovld __cnfn max(int16 x, int16 y);
-uint16 __ovld __cnfn max(uint16 x, uint16 y);
-long __ovld __cnfn max(long x, long y);
-ulong __ovld __cnfn max(ulong x, ulong y);
-long2 __ovld __cnfn max(long2 x, long2 y);
-ulong2 __ovld __cnfn max(ulong2 x, ulong2 y);
-long3 __ovld __cnfn max(long3 x, long3 y);
-ulong3 __ovld __cnfn max(ulong3 x, ulong3 y);
-long4 __ovld __cnfn max(long4 x, long4 y);
-ulong4 __ovld __cnfn max(ulong4 x, ulong4 y);
-long8 __ovld __cnfn max(long8 x, long8 y);
-ulong8 __ovld __cnfn max(ulong8 x, ulong8 y);
-long16 __ovld __cnfn max(long16 x, long16 y);
-ulong16 __ovld __cnfn max(ulong16 x, ulong16 y);
-char2 __ovld __cnfn max(char2 x, char y);
-uchar2 __ovld __cnfn max(uchar2 x, uchar y);
-char3 __ovld __cnfn max(char3 x, char y);
-uchar3 __ovld __cnfn max(uchar3 x, uchar y);
-char4 __ovld __cnfn max(char4 x, char y);
-uchar4 __ovld __cnfn max(uchar4 x, uchar y);
-char8 __ovld __cnfn max(char8 x, char y);
-uchar8 __ovld __cnfn max(uchar8 x, uchar y);
-char16 __ovld __cnfn max(char16 x, char y);
-uchar16 __ovld __cnfn max(uchar16 x, uchar y);
-short2 __ovld __cnfn max(short2 x, short y);
-ushort2 __ovld __cnfn max(ushort2 x, ushort y);
-short3 __ovld __cnfn max(short3 x, short y);
-ushort3 __ovld __cnfn max(ushort3 x, ushort y);
-short4 __ovld __cnfn max(short4 x, short y);
-ushort4 __ovld __cnfn max(ushort4 x, ushort y);
-short8 __ovld __cnfn max(short8 x, short y);
-ushort8 __ovld __cnfn max(ushort8 x, ushort y);
-short16 __ovld __cnfn max(short16 x, short y);
-ushort16 __ovld __cnfn max(ushort16 x, ushort y);
-int2 __ovld __cnfn max(int2 x, int y);
-uint2 __ovld __cnfn max(uint2 x, uint y);
-int3 __ovld __cnfn max(int3 x, int y);
-uint3 __ovld __cnfn max(uint3 x, uint y);
-int4 __ovld __cnfn max(int4 x, int y);
-uint4 __ovld __cnfn max(uint4 x, uint y);
-int8 __ovld __cnfn max(int8 x, int y);
-uint8 __ovld __cnfn max(uint8 x, uint y);
-int16 __ovld __cnfn max(int16 x, int y);
-uint16 __ovld __cnfn max(uint16 x, uint y);
-long2 __ovld __cnfn max(long2 x, long y);
-ulong2 __ovld __cnfn max(ulong2 x, ulong y);
-long3 __ovld __cnfn max(long3 x, long y);
-ulong3 __ovld __cnfn max(ulong3 x, ulong y);
-long4 __ovld __cnfn max(long4 x, long y);
-ulong4 __ovld __cnfn max(ulong4 x, ulong y);
-long8 __ovld __cnfn max(long8 x, long y);
-ulong8 __ovld __cnfn max(ulong8 x, ulong y);
-long16 __ovld __cnfn max(long16 x, long y);
-ulong16 __ovld __cnfn max(ulong16 x, ulong y);
+char __ovld __cnfn max(char, char);
+uchar __ovld __cnfn max(uchar, uchar);
+char2 __ovld __cnfn max(char2, char2);
+uchar2 __ovld __cnfn max(uchar2, uchar2);
+char3 __ovld __cnfn max(char3, char3);
+uchar3 __ovld __cnfn max(uchar3, uchar3);
+char4 __ovld __cnfn max(char4, char4);
+uchar4 __ovld __cnfn max(uchar4, uchar4);
+char8 __ovld __cnfn max(char8, char8);
+uchar8 __ovld __cnfn max(uchar8, uchar8);
+char16 __ovld __cnfn max(char16, char16);
+uchar16 __ovld __cnfn max(uchar16, uchar16);
+short __ovld __cnfn max(short, short);
+ushort __ovld __cnfn max(ushort, ushort);
+short2 __ovld __cnfn max(short2, short2);
+ushort2 __ovld __cnfn max(ushort2, ushort2);
+short3 __ovld __cnfn max(short3, short3);
+ushort3 __ovld __cnfn max(ushort3, ushort3);
+short4 __ovld __cnfn max(short4, short4);
+ushort4 __ovld __cnfn max(ushort4, ushort4);
+short8 __ovld __cnfn max(short8, short8);
+ushort8 __ovld __cnfn max(ushort8, ushort8);
+short16 __ovld __cnfn max(short16, short16);
+ushort16 __ovld __cnfn max(ushort16, ushort16);
+int __ovld __cnfn max(int, int);
+uint __ovld __cnfn max(uint, uint);
+int2 __ovld __cnfn max(int2, int2);
+uint2 __ovld __cnfn max(uint2, uint2);
+int3 __ovld __cnfn max(int3, int3);
+uint3 __ovld __cnfn max(uint3, uint3);
+int4 __ovld __cnfn max(int4, int4);
+uint4 __ovld __cnfn max(uint4, uint4);
+int8 __ovld __cnfn max(int8, int8);
+uint8 __ovld __cnfn max(uint8, uint8);
+int16 __ovld __cnfn max(int16, int16);
+uint16 __ovld __cnfn max(uint16, uint16);
+long __ovld __cnfn max(long, long);
+ulong __ovld __cnfn max(ulong, ulong);
+long2 __ovld __cnfn max(long2, long2);
+ulong2 __ovld __cnfn max(ulong2, ulong2);
+long3 __ovld __cnfn max(long3, long3);
+ulong3 __ovld __cnfn max(ulong3, ulong3);
+long4 __ovld __cnfn max(long4, long4);
+ulong4 __ovld __cnfn max(ulong4, ulong4);
+long8 __ovld __cnfn max(long8, long8);
+ulong8 __ovld __cnfn max(ulong8, ulong8);
+long16 __ovld __cnfn max(long16, long16);
+ulong16 __ovld __cnfn max(ulong16, ulong16);
+char2 __ovld __cnfn max(char2, char);
+uchar2 __ovld __cnfn max(uchar2, uchar);
+char3 __ovld __cnfn max(char3, char);
+uchar3 __ovld __cnfn max(uchar3, uchar);
+char4 __ovld __cnfn max(char4, char);
+uchar4 __ovld __cnfn max(uchar4, uchar);
+char8 __ovld __cnfn max(char8, char);
+uchar8 __ovld __cnfn max(uchar8, uchar);
+char16 __ovld __cnfn max(char16, char);
+uchar16 __ovld __cnfn max(uchar16, uchar);
+short2 __ovld __cnfn max(short2, short);
+ushort2 __ovld __cnfn max(ushort2, ushort);
+short3 __ovld __cnfn max(short3, short);
+ushort3 __ovld __cnfn max(ushort3, ushort);
+short4 __ovld __cnfn max(short4, short);
+ushort4 __ovld __cnfn max(ushort4, ushort);
+short8 __ovld __cnfn max(short8, short);
+ushort8 __ovld __cnfn max(ushort8, ushort);
+short16 __ovld __cnfn max(short16, short);
+ushort16 __ovld __cnfn max(ushort16, ushort);
+int2 __ovld __cnfn max(int2, int);
+uint2 __ovld __cnfn max(uint2, uint);
+int3 __ovld __cnfn max(int3, int);
+uint3 __ovld __cnfn max(uint3, uint);
+int4 __ovld __cnfn max(int4, int);
+uint4 __ovld __cnfn max(uint4, uint);
+int8 __ovld __cnfn max(int8, int);
+uint8 __ovld __cnfn max(uint8, uint);
+int16 __ovld __cnfn max(int16, int);
+uint16 __ovld __cnfn max(uint16, uint);
+long2 __ovld __cnfn max(long2, long);
+ulong2 __ovld __cnfn max(ulong2, ulong);
+long3 __ovld __cnfn max(long3, long);
+ulong3 __ovld __cnfn max(ulong3, ulong);
+long4 __ovld __cnfn max(long4, long);
+ulong4 __ovld __cnfn max(ulong4, ulong);
+long8 __ovld __cnfn max(long8, long);
+ulong8 __ovld __cnfn max(ulong8, ulong);
+long16 __ovld __cnfn max(long16, long);
+ulong16 __ovld __cnfn max(ulong16, ulong);
/**
* Returns y if y < x, otherwise it returns x.
*/
-char __ovld __cnfn min(char x, char y);
-uchar __ovld __cnfn min(uchar x, uchar y);
-char2 __ovld __cnfn min(char2 x, char2 y);
-uchar2 __ovld __cnfn min(uchar2 x, uchar2 y);
-char3 __ovld __cnfn min(char3 x, char3 y);
-uchar3 __ovld __cnfn min(uchar3 x, uchar3 y);
-char4 __ovld __cnfn min(char4 x, char4 y);
-uchar4 __ovld __cnfn min(uchar4 x, uchar4 y);
-char8 __ovld __cnfn min(char8 x, char8 y);
-uchar8 __ovld __cnfn min(uchar8 x, uchar8 y);
-char16 __ovld __cnfn min(char16 x, char16 y);
-uchar16 __ovld __cnfn min(uchar16 x, uchar16 y);
-short __ovld __cnfn min(short x, short y);
-ushort __ovld __cnfn min(ushort x, ushort y);
-short2 __ovld __cnfn min(short2 x, short2 y);
-ushort2 __ovld __cnfn min(ushort2 x, ushort2 y);
-short3 __ovld __cnfn min(short3 x, short3 y);
-ushort3 __ovld __cnfn min(ushort3 x, ushort3 y);
-short4 __ovld __cnfn min(short4 x, short4 y);
-ushort4 __ovld __cnfn min(ushort4 x, ushort4 y);
-short8 __ovld __cnfn min(short8 x, short8 y);
-ushort8 __ovld __cnfn min(ushort8 x, ushort8 y);
-short16 __ovld __cnfn min(short16 x, short16 y);
-ushort16 __ovld __cnfn min(ushort16 x, ushort16 y);
-int __ovld __cnfn min(int x, int y);
-uint __ovld __cnfn min(uint x, uint y);
-int2 __ovld __cnfn min(int2 x, int2 y);
-uint2 __ovld __cnfn min(uint2 x, uint2 y);
-int3 __ovld __cnfn min(int3 x, int3 y);
-uint3 __ovld __cnfn min(uint3 x, uint3 y);
-int4 __ovld __cnfn min(int4 x, int4 y);
-uint4 __ovld __cnfn min(uint4 x, uint4 y);
-int8 __ovld __cnfn min(int8 x, int8 y);
-uint8 __ovld __cnfn min(uint8 x, uint8 y);
-int16 __ovld __cnfn min(int16 x, int16 y);
-uint16 __ovld __cnfn min(uint16 x, uint16 y);
-long __ovld __cnfn min(long x, long y);
-ulong __ovld __cnfn min(ulong x, ulong y);
-long2 __ovld __cnfn min(long2 x, long2 y);
-ulong2 __ovld __cnfn min(ulong2 x, ulong2 y);
-long3 __ovld __cnfn min(long3 x, long3 y);
-ulong3 __ovld __cnfn min(ulong3 x, ulong3 y);
-long4 __ovld __cnfn min(long4 x, long4 y);
-ulong4 __ovld __cnfn min(ulong4 x, ulong4 y);
-long8 __ovld __cnfn min(long8 x, long8 y);
-ulong8 __ovld __cnfn min(ulong8 x, ulong8 y);
-long16 __ovld __cnfn min(long16 x, long16 y);
-ulong16 __ovld __cnfn min(ulong16 x, ulong16 y);
-char2 __ovld __cnfn min(char2 x, char y);
-uchar2 __ovld __cnfn min(uchar2 x, uchar y);
-char3 __ovld __cnfn min(char3 x, char y);
-uchar3 __ovld __cnfn min(uchar3 x, uchar y);
-char4 __ovld __cnfn min(char4 x, char y);
-uchar4 __ovld __cnfn min(uchar4 x, uchar y);
-char8 __ovld __cnfn min(char8 x, char y);
-uchar8 __ovld __cnfn min(uchar8 x, uchar y);
-char16 __ovld __cnfn min(char16 x, char y);
-uchar16 __ovld __cnfn min(uchar16 x, uchar y);
-short2 __ovld __cnfn min(short2 x, short y);
-ushort2 __ovld __cnfn min(ushort2 x, ushort y);
-short3 __ovld __cnfn min(short3 x, short y);
-ushort3 __ovld __cnfn min(ushort3 x, ushort y);
-short4 __ovld __cnfn min(short4 x, short y);
-ushort4 __ovld __cnfn min(ushort4 x, ushort y);
-short8 __ovld __cnfn min(short8 x, short y);
-ushort8 __ovld __cnfn min(ushort8 x, ushort y);
-short16 __ovld __cnfn min(short16 x, short y);
-ushort16 __ovld __cnfn min(ushort16 x, ushort y);
-int2 __ovld __cnfn min(int2 x, int y);
-uint2 __ovld __cnfn min(uint2 x, uint y);
-int3 __ovld __cnfn min(int3 x, int y);
-uint3 __ovld __cnfn min(uint3 x, uint y);
-int4 __ovld __cnfn min(int4 x, int y);
-uint4 __ovld __cnfn min(uint4 x, uint y);
-int8 __ovld __cnfn min(int8 x, int y);
-uint8 __ovld __cnfn min(uint8 x, uint y);
-int16 __ovld __cnfn min(int16 x, int y);
-uint16 __ovld __cnfn min(uint16 x, uint y);
-long2 __ovld __cnfn min(long2 x, long y);
-ulong2 __ovld __cnfn min(ulong2 x, ulong y);
-long3 __ovld __cnfn min(long3 x, long y);
-ulong3 __ovld __cnfn min(ulong3 x, ulong y);
-long4 __ovld __cnfn min(long4 x, long y);
-ulong4 __ovld __cnfn min(ulong4 x, ulong y);
-long8 __ovld __cnfn min(long8 x, long y);
-ulong8 __ovld __cnfn min(ulong8 x, ulong y);
-long16 __ovld __cnfn min(long16 x, long y);
-ulong16 __ovld __cnfn min(ulong16 x, ulong y);
+char __ovld __cnfn min(char, char);
+uchar __ovld __cnfn min(uchar, uchar);
+char2 __ovld __cnfn min(char2, char2);
+uchar2 __ovld __cnfn min(uchar2, uchar2);
+char3 __ovld __cnfn min(char3, char3);
+uchar3 __ovld __cnfn min(uchar3, uchar3);
+char4 __ovld __cnfn min(char4, char4);
+uchar4 __ovld __cnfn min(uchar4, uchar4);
+char8 __ovld __cnfn min(char8, char8);
+uchar8 __ovld __cnfn min(uchar8, uchar8);
+char16 __ovld __cnfn min(char16, char16);
+uchar16 __ovld __cnfn min(uchar16, uchar16);
+short __ovld __cnfn min(short, short);
+ushort __ovld __cnfn min(ushort, ushort);
+short2 __ovld __cnfn min(short2, short2);
+ushort2 __ovld __cnfn min(ushort2, ushort2);
+short3 __ovld __cnfn min(short3, short3);
+ushort3 __ovld __cnfn min(ushort3, ushort3);
+short4 __ovld __cnfn min(short4, short4);
+ushort4 __ovld __cnfn min(ushort4, ushort4);
+short8 __ovld __cnfn min(short8, short8);
+ushort8 __ovld __cnfn min(ushort8, ushort8);
+short16 __ovld __cnfn min(short16, short16);
+ushort16 __ovld __cnfn min(ushort16, ushort16);
+int __ovld __cnfn min(int, int);
+uint __ovld __cnfn min(uint, uint);
+int2 __ovld __cnfn min(int2, int2);
+uint2 __ovld __cnfn min(uint2, uint2);
+int3 __ovld __cnfn min(int3, int3);
+uint3 __ovld __cnfn min(uint3, uint3);
+int4 __ovld __cnfn min(int4, int4);
+uint4 __ovld __cnfn min(uint4, uint4);
+int8 __ovld __cnfn min(int8, int8);
+uint8 __ovld __cnfn min(uint8, uint8);
+int16 __ovld __cnfn min(int16, int16);
+uint16 __ovld __cnfn min(uint16, uint16);
+long __ovld __cnfn min(long, long);
+ulong __ovld __cnfn min(ulong, ulong);
+long2 __ovld __cnfn min(long2, long2);
+ulong2 __ovld __cnfn min(ulong2, ulong2);
+long3 __ovld __cnfn min(long3, long3);
+ulong3 __ovld __cnfn min(ulong3, ulong3);
+long4 __ovld __cnfn min(long4, long4);
+ulong4 __ovld __cnfn min(ulong4, ulong4);
+long8 __ovld __cnfn min(long8, long8);
+ulong8 __ovld __cnfn min(ulong8, ulong8);
+long16 __ovld __cnfn min(long16, long16);
+ulong16 __ovld __cnfn min(ulong16, ulong16);
+char2 __ovld __cnfn min(char2, char);
+uchar2 __ovld __cnfn min(uchar2, uchar);
+char3 __ovld __cnfn min(char3, char);
+uchar3 __ovld __cnfn min(uchar3, uchar);
+char4 __ovld __cnfn min(char4, char);
+uchar4 __ovld __cnfn min(uchar4, uchar);
+char8 __ovld __cnfn min(char8, char);
+uchar8 __ovld __cnfn min(uchar8, uchar);
+char16 __ovld __cnfn min(char16, char);
+uchar16 __ovld __cnfn min(uchar16, uchar);
+short2 __ovld __cnfn min(short2, short);
+ushort2 __ovld __cnfn min(ushort2, ushort);
+short3 __ovld __cnfn min(short3, short);
+ushort3 __ovld __cnfn min(ushort3, ushort);
+short4 __ovld __cnfn min(short4, short);
+ushort4 __ovld __cnfn min(ushort4, ushort);
+short8 __ovld __cnfn min(short8, short);
+ushort8 __ovld __cnfn min(ushort8, ushort);
+short16 __ovld __cnfn min(short16, short);
+ushort16 __ovld __cnfn min(ushort16, ushort);
+int2 __ovld __cnfn min(int2, int);
+uint2 __ovld __cnfn min(uint2, uint);
+int3 __ovld __cnfn min(int3, int);
+uint3 __ovld __cnfn min(uint3, uint);
+int4 __ovld __cnfn min(int4, int);
+uint4 __ovld __cnfn min(uint4, uint);
+int8 __ovld __cnfn min(int8, int);
+uint8 __ovld __cnfn min(uint8, uint);
+int16 __ovld __cnfn min(int16, int);
+uint16 __ovld __cnfn min(uint16, uint);
+long2 __ovld __cnfn min(long2, long);
+ulong2 __ovld __cnfn min(ulong2, ulong);
+long3 __ovld __cnfn min(long3, long);
+ulong3 __ovld __cnfn min(ulong3, ulong);
+long4 __ovld __cnfn min(long4, long);
+ulong4 __ovld __cnfn min(ulong4, ulong);
+long8 __ovld __cnfn min(long8, long);
+ulong8 __ovld __cnfn min(ulong8, ulong);
+long16 __ovld __cnfn min(long16, long);
+ulong16 __ovld __cnfn min(ulong16, ulong);
/**
* Computes x * y and returns the high half of the
* product of x and y.
*/
-char __ovld __cnfn mul_hi(char x, char y);
-uchar __ovld __cnfn mul_hi(uchar x, uchar y);
-char2 __ovld __cnfn mul_hi(char2 x, char2 y);
-uchar2 __ovld __cnfn mul_hi(uchar2 x, uchar2 y);
-char3 __ovld __cnfn mul_hi(char3 x, char3 y);
-uchar3 __ovld __cnfn mul_hi(uchar3 x, uchar3 y);
-char4 __ovld __cnfn mul_hi(char4 x, char4 y);
-uchar4 __ovld __cnfn mul_hi(uchar4 x, uchar4 y);
-char8 __ovld __cnfn mul_hi(char8 x, char8 y);
-uchar8 __ovld __cnfn mul_hi(uchar8 x, uchar8 y);
-char16 __ovld __cnfn mul_hi(char16 x, char16 y);
-uchar16 __ovld __cnfn mul_hi(uchar16 x, uchar16 y);
-short __ovld __cnfn mul_hi(short x, short y);
-ushort __ovld __cnfn mul_hi(ushort x, ushort y);
-short2 __ovld __cnfn mul_hi(short2 x, short2 y);
-ushort2 __ovld __cnfn mul_hi(ushort2 x, ushort2 y);
-short3 __ovld __cnfn mul_hi(short3 x, short3 y);
-ushort3 __ovld __cnfn mul_hi(ushort3 x, ushort3 y);
-short4 __ovld __cnfn mul_hi(short4 x, short4 y);
-ushort4 __ovld __cnfn mul_hi(ushort4 x, ushort4 y);
-short8 __ovld __cnfn mul_hi(short8 x, short8 y);
-ushort8 __ovld __cnfn mul_hi(ushort8 x, ushort8 y);
-short16 __ovld __cnfn mul_hi(short16 x, short16 y);
-ushort16 __ovld __cnfn mul_hi(ushort16 x, ushort16 y);
-int __ovld __cnfn mul_hi(int x, int y);
-uint __ovld __cnfn mul_hi(uint x, uint y);
-int2 __ovld __cnfn mul_hi(int2 x, int2 y);
-uint2 __ovld __cnfn mul_hi(uint2 x, uint2 y);
-int3 __ovld __cnfn mul_hi(int3 x, int3 y);
-uint3 __ovld __cnfn mul_hi(uint3 x, uint3 y);
-int4 __ovld __cnfn mul_hi(int4 x, int4 y);
-uint4 __ovld __cnfn mul_hi(uint4 x, uint4 y);
-int8 __ovld __cnfn mul_hi(int8 x, int8 y);
-uint8 __ovld __cnfn mul_hi(uint8 x, uint8 y);
-int16 __ovld __cnfn mul_hi(int16 x, int16 y);
-uint16 __ovld __cnfn mul_hi(uint16 x, uint16 y);
-long __ovld __cnfn mul_hi(long x, long y);
-ulong __ovld __cnfn mul_hi(ulong x, ulong y);
-long2 __ovld __cnfn mul_hi(long2 x, long2 y);
-ulong2 __ovld __cnfn mul_hi(ulong2 x, ulong2 y);
-long3 __ovld __cnfn mul_hi(long3 x, long3 y);
-ulong3 __ovld __cnfn mul_hi(ulong3 x, ulong3 y);
-long4 __ovld __cnfn mul_hi(long4 x, long4 y);
-ulong4 __ovld __cnfn mul_hi(ulong4 x, ulong4 y);
-long8 __ovld __cnfn mul_hi(long8 x, long8 y);
-ulong8 __ovld __cnfn mul_hi(ulong8 x, ulong8 y);
-long16 __ovld __cnfn mul_hi(long16 x, long16 y);
-ulong16 __ovld __cnfn mul_hi(ulong16 x, ulong16 y);
+char __ovld __cnfn mul_hi(char, char);
+uchar __ovld __cnfn mul_hi(uchar, uchar);
+char2 __ovld __cnfn mul_hi(char2, char2);
+uchar2 __ovld __cnfn mul_hi(uchar2, uchar2);
+char3 __ovld __cnfn mul_hi(char3, char3);
+uchar3 __ovld __cnfn mul_hi(uchar3, uchar3);
+char4 __ovld __cnfn mul_hi(char4, char4);
+uchar4 __ovld __cnfn mul_hi(uchar4, uchar4);
+char8 __ovld __cnfn mul_hi(char8, char8);
+uchar8 __ovld __cnfn mul_hi(uchar8, uchar8);
+char16 __ovld __cnfn mul_hi(char16, char16);
+uchar16 __ovld __cnfn mul_hi(uchar16, uchar16);
+short __ovld __cnfn mul_hi(short, short);
+ushort __ovld __cnfn mul_hi(ushort, ushort);
+short2 __ovld __cnfn mul_hi(short2, short2);
+ushort2 __ovld __cnfn mul_hi(ushort2, ushort2);
+short3 __ovld __cnfn mul_hi(short3, short3);
+ushort3 __ovld __cnfn mul_hi(ushort3, ushort3);
+short4 __ovld __cnfn mul_hi(short4, short4);
+ushort4 __ovld __cnfn mul_hi(ushort4, ushort4);
+short8 __ovld __cnfn mul_hi(short8, short8);
+ushort8 __ovld __cnfn mul_hi(ushort8, ushort8);
+short16 __ovld __cnfn mul_hi(short16, short16);
+ushort16 __ovld __cnfn mul_hi(ushort16, ushort16);
+int __ovld __cnfn mul_hi(int, int);
+uint __ovld __cnfn mul_hi(uint, uint);
+int2 __ovld __cnfn mul_hi(int2, int2);
+uint2 __ovld __cnfn mul_hi(uint2, uint2);
+int3 __ovld __cnfn mul_hi(int3, int3);
+uint3 __ovld __cnfn mul_hi(uint3, uint3);
+int4 __ovld __cnfn mul_hi(int4, int4);
+uint4 __ovld __cnfn mul_hi(uint4, uint4);
+int8 __ovld __cnfn mul_hi(int8, int8);
+uint8 __ovld __cnfn mul_hi(uint8, uint8);
+int16 __ovld __cnfn mul_hi(int16, int16);
+uint16 __ovld __cnfn mul_hi(uint16, uint16);
+long __ovld __cnfn mul_hi(long, long);
+ulong __ovld __cnfn mul_hi(ulong, ulong);
+long2 __ovld __cnfn mul_hi(long2, long2);
+ulong2 __ovld __cnfn mul_hi(ulong2, ulong2);
+long3 __ovld __cnfn mul_hi(long3, long3);
+ulong3 __ovld __cnfn mul_hi(ulong3, ulong3);
+long4 __ovld __cnfn mul_hi(long4, long4);
+ulong4 __ovld __cnfn mul_hi(ulong4, ulong4);
+long8 __ovld __cnfn mul_hi(long8, long8);
+ulong8 __ovld __cnfn mul_hi(ulong8, ulong8);
+long16 __ovld __cnfn mul_hi(long16, long16);
+ulong16 __ovld __cnfn mul_hi(ulong16, ulong16);
/**
* For each element in v, the bits are shifted left by
@@ -9767,209 +9767,209 @@ ulong16 __ovld __cnfn mul_hi(ulong16 x, ulong16 y);
* side of the element are shifted back in from the
* right.
*/
-char __ovld __cnfn rotate(char v, char i);
-uchar __ovld __cnfn rotate(uchar v, uchar i);
-char2 __ovld __cnfn rotate(char2 v, char2 i);
-uchar2 __ovld __cnfn rotate(uchar2 v, uchar2 i);
-char3 __ovld __cnfn rotate(char3 v, char3 i);
-uchar3 __ovld __cnfn rotate(uchar3 v, uchar3 i);
-char4 __ovld __cnfn rotate(char4 v, char4 i);
-uchar4 __ovld __cnfn rotate(uchar4 v, uchar4 i);
-char8 __ovld __cnfn rotate(char8 v, char8 i);
-uchar8 __ovld __cnfn rotate(uchar8 v, uchar8 i);
-char16 __ovld __cnfn rotate(char16 v, char16 i);
-uchar16 __ovld __cnfn rotate(uchar16 v, uchar16 i);
-short __ovld __cnfn rotate(short v, short i);
-ushort __ovld __cnfn rotate(ushort v, ushort i);
-short2 __ovld __cnfn rotate(short2 v, short2 i);
-ushort2 __ovld __cnfn rotate(ushort2 v, ushort2 i);
-short3 __ovld __cnfn rotate(short3 v, short3 i);
-ushort3 __ovld __cnfn rotate(ushort3 v, ushort3 i);
-short4 __ovld __cnfn rotate(short4 v, short4 i);
-ushort4 __ovld __cnfn rotate(ushort4 v, ushort4 i);
-short8 __ovld __cnfn rotate(short8 v, short8 i);
-ushort8 __ovld __cnfn rotate(ushort8 v, ushort8 i);
-short16 __ovld __cnfn rotate(short16 v, short16 i);
-ushort16 __ovld __cnfn rotate(ushort16 v, ushort16 i);
-int __ovld __cnfn rotate(int v, int i);
-uint __ovld __cnfn rotate(uint v, uint i);
-int2 __ovld __cnfn rotate(int2 v, int2 i);
-uint2 __ovld __cnfn rotate(uint2 v, uint2 i);
-int3 __ovld __cnfn rotate(int3 v, int3 i);
-uint3 __ovld __cnfn rotate(uint3 v, uint3 i);
-int4 __ovld __cnfn rotate(int4 v, int4 i);
-uint4 __ovld __cnfn rotate(uint4 v, uint4 i);
-int8 __ovld __cnfn rotate(int8 v, int8 i);
-uint8 __ovld __cnfn rotate(uint8 v, uint8 i);
-int16 __ovld __cnfn rotate(int16 v, int16 i);
-uint16 __ovld __cnfn rotate(uint16 v, uint16 i);
-long __ovld __cnfn rotate(long v, long i);
-ulong __ovld __cnfn rotate(ulong v, ulong i);
-long2 __ovld __cnfn rotate(long2 v, long2 i);
-ulong2 __ovld __cnfn rotate(ulong2 v, ulong2 i);
-long3 __ovld __cnfn rotate(long3 v, long3 i);
-ulong3 __ovld __cnfn rotate(ulong3 v, ulong3 i);
-long4 __ovld __cnfn rotate(long4 v, long4 i);
-ulong4 __ovld __cnfn rotate(ulong4 v, ulong4 i);
-long8 __ovld __cnfn rotate(long8 v, long8 i);
-ulong8 __ovld __cnfn rotate(ulong8 v, ulong8 i);
-long16 __ovld __cnfn rotate(long16 v, long16 i);
-ulong16 __ovld __cnfn rotate(ulong16 v, ulong16 i);
+char __ovld __cnfn rotate(char, char);
+uchar __ovld __cnfn rotate(uchar, uchar);
+char2 __ovld __cnfn rotate(char2, char2);
+uchar2 __ovld __cnfn rotate(uchar2, uchar2);
+char3 __ovld __cnfn rotate(char3, char3);
+uchar3 __ovld __cnfn rotate(uchar3, uchar3);
+char4 __ovld __cnfn rotate(char4, char4);
+uchar4 __ovld __cnfn rotate(uchar4, uchar4);
+char8 __ovld __cnfn rotate(char8, char8);
+uchar8 __ovld __cnfn rotate(uchar8, uchar8);
+char16 __ovld __cnfn rotate(char16, char16);
+uchar16 __ovld __cnfn rotate(uchar16, uchar16);
+short __ovld __cnfn rotate(short, short);
+ushort __ovld __cnfn rotate(ushort, ushort);
+short2 __ovld __cnfn rotate(short2, short2);
+ushort2 __ovld __cnfn rotate(ushort2, ushort2);
+short3 __ovld __cnfn rotate(short3, short3);
+ushort3 __ovld __cnfn rotate(ushort3, ushort3);
+short4 __ovld __cnfn rotate(short4, short4);
+ushort4 __ovld __cnfn rotate(ushort4, ushort4);
+short8 __ovld __cnfn rotate(short8, short8);
+ushort8 __ovld __cnfn rotate(ushort8, ushort8);
+short16 __ovld __cnfn rotate(short16, short16);
+ushort16 __ovld __cnfn rotate(ushort16, ushort16);
+int __ovld __cnfn rotate(int, int);
+uint __ovld __cnfn rotate(uint, uint);
+int2 __ovld __cnfn rotate(int2, int2);
+uint2 __ovld __cnfn rotate(uint2, uint2);
+int3 __ovld __cnfn rotate(int3, int3);
+uint3 __ovld __cnfn rotate(uint3, uint3);
+int4 __ovld __cnfn rotate(int4, int4);
+uint4 __ovld __cnfn rotate(uint4, uint4);
+int8 __ovld __cnfn rotate(int8, int8);
+uint8 __ovld __cnfn rotate(uint8, uint8);
+int16 __ovld __cnfn rotate(int16, int16);
+uint16 __ovld __cnfn rotate(uint16, uint16);
+long __ovld __cnfn rotate(long, long);
+ulong __ovld __cnfn rotate(ulong, ulong);
+long2 __ovld __cnfn rotate(long2, long2);
+ulong2 __ovld __cnfn rotate(ulong2, ulong2);
+long3 __ovld __cnfn rotate(long3, long3);
+ulong3 __ovld __cnfn rotate(ulong3, ulong3);
+long4 __ovld __cnfn rotate(long4, long4);
+ulong4 __ovld __cnfn rotate(ulong4, ulong4);
+long8 __ovld __cnfn rotate(long8, long8);
+ulong8 __ovld __cnfn rotate(ulong8, ulong8);
+long16 __ovld __cnfn rotate(long16, long16);
+ulong16 __ovld __cnfn rotate(ulong16, ulong16);
/**
* Returns x - y and saturates the result.
*/
-char __ovld __cnfn sub_sat(char x, char y);
-uchar __ovld __cnfn sub_sat(uchar x, uchar y);
-char2 __ovld __cnfn sub_sat(char2 x, char2 y);
-uchar2 __ovld __cnfn sub_sat(uchar2 x, uchar2 y);
-char3 __ovld __cnfn sub_sat(char3 x, char3 y);
-uchar3 __ovld __cnfn sub_sat(uchar3 x, uchar3 y);
-char4 __ovld __cnfn sub_sat(char4 x, char4 y);
-uchar4 __ovld __cnfn sub_sat(uchar4 x, uchar4 y);
-char8 __ovld __cnfn sub_sat(char8 x, char8 y);
-uchar8 __ovld __cnfn sub_sat(uchar8 x, uchar8 y);
-char16 __ovld __cnfn sub_sat(char16 x, char16 y);
-uchar16 __ovld __cnfn sub_sat(uchar16 x, uchar16 y);
-short __ovld __cnfn sub_sat(short x, short y);
-ushort __ovld __cnfn sub_sat(ushort x, ushort y);
-short2 __ovld __cnfn sub_sat(short2 x, short2 y);
-ushort2 __ovld __cnfn sub_sat(ushort2 x, ushort2 y);
-short3 __ovld __cnfn sub_sat(short3 x, short3 y);
-ushort3 __ovld __cnfn sub_sat(ushort3 x, ushort3 y);
-short4 __ovld __cnfn sub_sat(short4 x, short4 y);
-ushort4 __ovld __cnfn sub_sat(ushort4 x, ushort4 y);
-short8 __ovld __cnfn sub_sat(short8 x, short8 y);
-ushort8 __ovld __cnfn sub_sat(ushort8 x, ushort8 y);
-short16 __ovld __cnfn sub_sat(short16 x, short16 y);
-ushort16 __ovld __cnfn sub_sat(ushort16 x, ushort16 y);
-int __ovld __cnfn sub_sat(int x, int y);
-uint __ovld __cnfn sub_sat(uint x, uint y);
-int2 __ovld __cnfn sub_sat(int2 x, int2 y);
-uint2 __ovld __cnfn sub_sat(uint2 x, uint2 y);
-int3 __ovld __cnfn sub_sat(int3 x, int3 y);
-uint3 __ovld __cnfn sub_sat(uint3 x, uint3 y);
-int4 __ovld __cnfn sub_sat(int4 x, int4 y);
-uint4 __ovld __cnfn sub_sat(uint4 x, uint4 y);
-int8 __ovld __cnfn sub_sat(int8 x, int8 y);
-uint8 __ovld __cnfn sub_sat(uint8 x, uint8 y);
-int16 __ovld __cnfn sub_sat(int16 x, int16 y);
-uint16 __ovld __cnfn sub_sat(uint16 x, uint16 y);
-long __ovld __cnfn sub_sat(long x, long y);
-ulong __ovld __cnfn sub_sat(ulong x, ulong y);
-long2 __ovld __cnfn sub_sat(long2 x, long2 y);
-ulong2 __ovld __cnfn sub_sat(ulong2 x, ulong2 y);
-long3 __ovld __cnfn sub_sat(long3 x, long3 y);
-ulong3 __ovld __cnfn sub_sat(ulong3 x, ulong3 y);
-long4 __ovld __cnfn sub_sat(long4 x, long4 y);
-ulong4 __ovld __cnfn sub_sat(ulong4 x, ulong4 y);
-long8 __ovld __cnfn sub_sat(long8 x, long8 y);
-ulong8 __ovld __cnfn sub_sat(ulong8 x, ulong8 y);
-long16 __ovld __cnfn sub_sat(long16 x, long16 y);
-ulong16 __ovld __cnfn sub_sat(ulong16 x, ulong16 y);
+char __ovld __cnfn sub_sat(char, char);
+uchar __ovld __cnfn sub_sat(uchar, uchar);
+char2 __ovld __cnfn sub_sat(char2, char2);
+uchar2 __ovld __cnfn sub_sat(uchar2, uchar2);
+char3 __ovld __cnfn sub_sat(char3, char3);
+uchar3 __ovld __cnfn sub_sat(uchar3, uchar3);
+char4 __ovld __cnfn sub_sat(char4, char4);
+uchar4 __ovld __cnfn sub_sat(uchar4, uchar4);
+char8 __ovld __cnfn sub_sat(char8, char8);
+uchar8 __ovld __cnfn sub_sat(uchar8, uchar8);
+char16 __ovld __cnfn sub_sat(char16, char16);
+uchar16 __ovld __cnfn sub_sat(uchar16, uchar16);
+short __ovld __cnfn sub_sat(short, short);
+ushort __ovld __cnfn sub_sat(ushort, ushort);
+short2 __ovld __cnfn sub_sat(short2, short2);
+ushort2 __ovld __cnfn sub_sat(ushort2, ushort2);
+short3 __ovld __cnfn sub_sat(short3, short3);
+ushort3 __ovld __cnfn sub_sat(ushort3, ushort3);
+short4 __ovld __cnfn sub_sat(short4, short4);
+ushort4 __ovld __cnfn sub_sat(ushort4, ushort4);
+short8 __ovld __cnfn sub_sat(short8, short8);
+ushort8 __ovld __cnfn sub_sat(ushort8, ushort8);
+short16 __ovld __cnfn sub_sat(short16, short16);
+ushort16 __ovld __cnfn sub_sat(ushort16, ushort16);
+int __ovld __cnfn sub_sat(int, int);
+uint __ovld __cnfn sub_sat(uint, uint);
+int2 __ovld __cnfn sub_sat(int2, int2);
+uint2 __ovld __cnfn sub_sat(uint2, uint2);
+int3 __ovld __cnfn sub_sat(int3, int3);
+uint3 __ovld __cnfn sub_sat(uint3, uint3);
+int4 __ovld __cnfn sub_sat(int4, int4);
+uint4 __ovld __cnfn sub_sat(uint4, uint4);
+int8 __ovld __cnfn sub_sat(int8, int8);
+uint8 __ovld __cnfn sub_sat(uint8, uint8);
+int16 __ovld __cnfn sub_sat(int16, int16);
+uint16 __ovld __cnfn sub_sat(uint16, uint16);
+long __ovld __cnfn sub_sat(long, long);
+ulong __ovld __cnfn sub_sat(ulong, ulong);
+long2 __ovld __cnfn sub_sat(long2, long2);
+ulong2 __ovld __cnfn sub_sat(ulong2, ulong2);
+long3 __ovld __cnfn sub_sat(long3, long3);
+ulong3 __ovld __cnfn sub_sat(ulong3, ulong3);
+long4 __ovld __cnfn sub_sat(long4, long4);
+ulong4 __ovld __cnfn sub_sat(ulong4, ulong4);
+long8 __ovld __cnfn sub_sat(long8, long8);
+ulong8 __ovld __cnfn sub_sat(ulong8, ulong8);
+long16 __ovld __cnfn sub_sat(long16, long16);
+ulong16 __ovld __cnfn sub_sat(ulong16, ulong16);
/**
* result[i] = ((short)hi[i] << 8) | lo[i]
* result[i] = ((ushort)hi[i] << 8) | lo[i]
*/
-short __ovld __cnfn upsample(char hi, uchar lo);
-ushort __ovld __cnfn upsample(uchar hi, uchar lo);
-short2 __ovld __cnfn upsample(char2 hi, uchar2 lo);
-short3 __ovld __cnfn upsample(char3 hi, uchar3 lo);
-short4 __ovld __cnfn upsample(char4 hi, uchar4 lo);
-short8 __ovld __cnfn upsample(char8 hi, uchar8 lo);
-short16 __ovld __cnfn upsample(char16 hi, uchar16 lo);
-ushort2 __ovld __cnfn upsample(uchar2 hi, uchar2 lo);
-ushort3 __ovld __cnfn upsample(uchar3 hi, uchar3 lo);
-ushort4 __ovld __cnfn upsample(uchar4 hi, uchar4 lo);
-ushort8 __ovld __cnfn upsample(uchar8 hi, uchar8 lo);
-ushort16 __ovld __cnfn upsample(uchar16 hi, uchar16 lo);
+short __ovld __cnfn upsample(char, uchar);
+ushort __ovld __cnfn upsample(uchar, uchar);
+short2 __ovld __cnfn upsample(char2, uchar2);
+short3 __ovld __cnfn upsample(char3, uchar3);
+short4 __ovld __cnfn upsample(char4, uchar4);
+short8 __ovld __cnfn upsample(char8, uchar8);
+short16 __ovld __cnfn upsample(char16, uchar16);
+ushort2 __ovld __cnfn upsample(uchar2, uchar2);
+ushort3 __ovld __cnfn upsample(uchar3, uchar3);
+ushort4 __ovld __cnfn upsample(uchar4, uchar4);
+ushort8 __ovld __cnfn upsample(uchar8, uchar8);
+ushort16 __ovld __cnfn upsample(uchar16, uchar16);
/**
* result[i] = ((int)hi[i] << 16) | lo[i]
* result[i] = ((uint)hi[i] << 16) | lo[i]
*/
-int __ovld __cnfn upsample(short hi, ushort lo);
-uint __ovld __cnfn upsample(ushort hi, ushort lo);
-int2 __ovld __cnfn upsample(short2 hi, ushort2 lo);
-int3 __ovld __cnfn upsample(short3 hi, ushort3 lo);
-int4 __ovld __cnfn upsample(short4 hi, ushort4 lo);
-int8 __ovld __cnfn upsample(short8 hi, ushort8 lo);
-int16 __ovld __cnfn upsample(short16 hi, ushort16 lo);
-uint2 __ovld __cnfn upsample(ushort2 hi, ushort2 lo);
-uint3 __ovld __cnfn upsample(ushort3 hi, ushort3 lo);
-uint4 __ovld __cnfn upsample(ushort4 hi, ushort4 lo);
-uint8 __ovld __cnfn upsample(ushort8 hi, ushort8 lo);
-uint16 __ovld __cnfn upsample(ushort16 hi, ushort16 lo);
+int __ovld __cnfn upsample(short, ushort);
+uint __ovld __cnfn upsample(ushort, ushort);
+int2 __ovld __cnfn upsample(short2, ushort2);
+int3 __ovld __cnfn upsample(short3, ushort3);
+int4 __ovld __cnfn upsample(short4, ushort4);
+int8 __ovld __cnfn upsample(short8, ushort8);
+int16 __ovld __cnfn upsample(short16, ushort16);
+uint2 __ovld __cnfn upsample(ushort2, ushort2);
+uint3 __ovld __cnfn upsample(ushort3, ushort3);
+uint4 __ovld __cnfn upsample(ushort4, ushort4);
+uint8 __ovld __cnfn upsample(ushort8, ushort8);
+uint16 __ovld __cnfn upsample(ushort16, ushort16);
/**
* result[i] = ((long)hi[i] << 32) | lo[i]
* result[i] = ((ulong)hi[i] << 32) | lo[i]
*/
-long __ovld __cnfn upsample(int hi, uint lo);
-ulong __ovld __cnfn upsample(uint hi, uint lo);
-long2 __ovld __cnfn upsample(int2 hi, uint2 lo);
-long3 __ovld __cnfn upsample(int3 hi, uint3 lo);
-long4 __ovld __cnfn upsample(int4 hi, uint4 lo);
-long8 __ovld __cnfn upsample(int8 hi, uint8 lo);
-long16 __ovld __cnfn upsample(int16 hi, uint16 lo);
-ulong2 __ovld __cnfn upsample(uint2 hi, uint2 lo);
-ulong3 __ovld __cnfn upsample(uint3 hi, uint3 lo);
-ulong4 __ovld __cnfn upsample(uint4 hi, uint4 lo);
-ulong8 __ovld __cnfn upsample(uint8 hi, uint8 lo);
-ulong16 __ovld __cnfn upsample(uint16 hi, uint16 lo);
+long __ovld __cnfn upsample(int, uint);
+ulong __ovld __cnfn upsample(uint, uint);
+long2 __ovld __cnfn upsample(int2, uint2);
+long3 __ovld __cnfn upsample(int3, uint3);
+long4 __ovld __cnfn upsample(int4, uint4);
+long8 __ovld __cnfn upsample(int8, uint8);
+long16 __ovld __cnfn upsample(int16, uint16);
+ulong2 __ovld __cnfn upsample(uint2, uint2);
+ulong3 __ovld __cnfn upsample(uint3, uint3);
+ulong4 __ovld __cnfn upsample(uint4, uint4);
+ulong8 __ovld __cnfn upsample(uint8, uint8);
+ulong16 __ovld __cnfn upsample(uint16, uint16);
/*
* popcount(x): returns the number of set bit in x
*/
#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-char __ovld __cnfn popcount(char x);
-uchar __ovld __cnfn popcount(uchar x);
-char2 __ovld __cnfn popcount(char2 x);
-uchar2 __ovld __cnfn popcount(uchar2 x);
-char3 __ovld __cnfn popcount(char3 x);
-uchar3 __ovld __cnfn popcount(uchar3 x);
-char4 __ovld __cnfn popcount(char4 x);
-uchar4 __ovld __cnfn popcount(uchar4 x);
-char8 __ovld __cnfn popcount(char8 x);
-uchar8 __ovld __cnfn popcount(uchar8 x);
-char16 __ovld __cnfn popcount(char16 x);
-uchar16 __ovld __cnfn popcount(uchar16 x);
-short __ovld __cnfn popcount(short x);
-ushort __ovld __cnfn popcount(ushort x);
-short2 __ovld __cnfn popcount(short2 x);
-ushort2 __ovld __cnfn popcount(ushort2 x);
-short3 __ovld __cnfn popcount(short3 x);
-ushort3 __ovld __cnfn popcount(ushort3 x);
-short4 __ovld __cnfn popcount(short4 x);
-ushort4 __ovld __cnfn popcount(ushort4 x);
-short8 __ovld __cnfn popcount(short8 x);
-ushort8 __ovld __cnfn popcount(ushort8 x);
-short16 __ovld __cnfn popcount(short16 x);
-ushort16 __ovld __cnfn popcount(ushort16 x);
-int __ovld __cnfn popcount(int x);
-uint __ovld __cnfn popcount(uint x);
-int2 __ovld __cnfn popcount(int2 x);
-uint2 __ovld __cnfn popcount(uint2 x);
-int3 __ovld __cnfn popcount(int3 x);
-uint3 __ovld __cnfn popcount(uint3 x);
-int4 __ovld __cnfn popcount(int4 x);
-uint4 __ovld __cnfn popcount(uint4 x);
-int8 __ovld __cnfn popcount(int8 x);
-uint8 __ovld __cnfn popcount(uint8 x);
-int16 __ovld __cnfn popcount(int16 x);
-uint16 __ovld __cnfn popcount(uint16 x);
-long __ovld __cnfn popcount(long x);
-ulong __ovld __cnfn popcount(ulong x);
-long2 __ovld __cnfn popcount(long2 x);
-ulong2 __ovld __cnfn popcount(ulong2 x);
-long3 __ovld __cnfn popcount(long3 x);
-ulong3 __ovld __cnfn popcount(ulong3 x);
-long4 __ovld __cnfn popcount(long4 x);
-ulong4 __ovld __cnfn popcount(ulong4 x);
-long8 __ovld __cnfn popcount(long8 x);
-ulong8 __ovld __cnfn popcount(ulong8 x);
-long16 __ovld __cnfn popcount(long16 x);
-ulong16 __ovld __cnfn popcount(ulong16 x);
+char __ovld __cnfn popcount(char);
+uchar __ovld __cnfn popcount(uchar);
+char2 __ovld __cnfn popcount(char2);
+uchar2 __ovld __cnfn popcount(uchar2);
+char3 __ovld __cnfn popcount(char3);
+uchar3 __ovld __cnfn popcount(uchar3);
+char4 __ovld __cnfn popcount(char4);
+uchar4 __ovld __cnfn popcount(uchar4);
+char8 __ovld __cnfn popcount(char8);
+uchar8 __ovld __cnfn popcount(uchar8);
+char16 __ovld __cnfn popcount(char16);
+uchar16 __ovld __cnfn popcount(uchar16);
+short __ovld __cnfn popcount(short);
+ushort __ovld __cnfn popcount(ushort);
+short2 __ovld __cnfn popcount(short2);
+ushort2 __ovld __cnfn popcount(ushort2);
+short3 __ovld __cnfn popcount(short3);
+ushort3 __ovld __cnfn popcount(ushort3);
+short4 __ovld __cnfn popcount(short4);
+ushort4 __ovld __cnfn popcount(ushort4);
+short8 __ovld __cnfn popcount(short8);
+ushort8 __ovld __cnfn popcount(ushort8);
+short16 __ovld __cnfn popcount(short16);
+ushort16 __ovld __cnfn popcount(ushort16);
+int __ovld __cnfn popcount(int);
+uint __ovld __cnfn popcount(uint);
+int2 __ovld __cnfn popcount(int2);
+uint2 __ovld __cnfn popcount(uint2);
+int3 __ovld __cnfn popcount(int3);
+uint3 __ovld __cnfn popcount(uint3);
+int4 __ovld __cnfn popcount(int4);
+uint4 __ovld __cnfn popcount(uint4);
+int8 __ovld __cnfn popcount(int8);
+uint8 __ovld __cnfn popcount(uint8);
+int16 __ovld __cnfn popcount(int16);
+uint16 __ovld __cnfn popcount(uint16);
+long __ovld __cnfn popcount(long);
+ulong __ovld __cnfn popcount(ulong);
+long2 __ovld __cnfn popcount(long2);
+ulong2 __ovld __cnfn popcount(ulong2);
+long3 __ovld __cnfn popcount(long3);
+ulong3 __ovld __cnfn popcount(ulong3);
+long4 __ovld __cnfn popcount(long4);
+ulong4 __ovld __cnfn popcount(ulong4);
+long8 __ovld __cnfn popcount(long8);
+ulong8 __ovld __cnfn popcount(ulong8);
+long16 __ovld __cnfn popcount(long16);
+ulong16 __ovld __cnfn popcount(ulong16);
#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
/**
@@ -9978,18 +9978,18 @@ ulong16 __ovld __cnfn popcount(ulong16 x);
* Refer to definition of mul24 to see how the 24-bit
* integer multiplication is performed.
*/
-int __ovld __cnfn mad24(int x, int y, int z);
-uint __ovld __cnfn mad24(uint x, uint y, uint z);
-int2 __ovld __cnfn mad24(int2 x, int2 y, int2 z);
-uint2 __ovld __cnfn mad24(uint2 x, uint2 y, uint2 z);
-int3 __ovld __cnfn mad24(int3 x, int3 y, int3 z);
-uint3 __ovld __cnfn mad24(uint3 x, uint3 y, uint3 z);
-int4 __ovld __cnfn mad24(int4 x, int4 y, int4 z);
-uint4 __ovld __cnfn mad24(uint4 x, uint4 y, uint4 z);
-int8 __ovld __cnfn mad24(int8 x, int8 y, int8 z);
-uint8 __ovld __cnfn mad24(uint8 x, uint8 y, uint8 z);
-int16 __ovld __cnfn mad24(int16 x, int16 y, int16 z);
-uint16 __ovld __cnfn mad24(uint16 x, uint16 y, uint16 z);
+int __ovld __cnfn mad24(int, int, int);
+uint __ovld __cnfn mad24(uint, uint, uint);
+int2 __ovld __cnfn mad24(int2, int2, int2);
+uint2 __ovld __cnfn mad24(uint2, uint2, uint2);
+int3 __ovld __cnfn mad24(int3, int3, int3);
+uint3 __ovld __cnfn mad24(uint3, uint3, uint3);
+int4 __ovld __cnfn mad24(int4, int4, int4);
+uint4 __ovld __cnfn mad24(uint4, uint4, uint4);
+int8 __ovld __cnfn mad24(int8, int8, int8);
+uint8 __ovld __cnfn mad24(uint8, uint8, uint8);
+int16 __ovld __cnfn mad24(int16, int16, int16);
+uint16 __ovld __cnfn mad24(uint16, uint16, uint16);
/**
* Multiply two 24-bit integer values x and y. x and y
@@ -10001,18 +10001,18 @@ uint16 __ovld __cnfn mad24(uint16 x, uint16 y, uint16 z);
* x and y are not in this range, the multiplication
* result is implementation-defined.
*/
-int __ovld __cnfn mul24(int x, int y);
-uint __ovld __cnfn mul24(uint x, uint y);
-int2 __ovld __cnfn mul24(int2 x, int2 y);
-uint2 __ovld __cnfn mul24(uint2 x, uint2 y);
-int3 __ovld __cnfn mul24(int3 x, int3 y);
-uint3 __ovld __cnfn mul24(uint3 x, uint3 y);
-int4 __ovld __cnfn mul24(int4 x, int4 y);
-uint4 __ovld __cnfn mul24(uint4 x, uint4 y);
-int8 __ovld __cnfn mul24(int8 x, int8 y);
-uint8 __ovld __cnfn mul24(uint8 x, uint8 y);
-int16 __ovld __cnfn mul24(int16 x, int16 y);
-uint16 __ovld __cnfn mul24(uint16 x, uint16 y);
+int __ovld __cnfn mul24(int, int);
+uint __ovld __cnfn mul24(uint, uint);
+int2 __ovld __cnfn mul24(int2, int2);
+uint2 __ovld __cnfn mul24(uint2, uint2);
+int3 __ovld __cnfn mul24(int3, int3);
+uint3 __ovld __cnfn mul24(uint3, uint3);
+int4 __ovld __cnfn mul24(int4, int4);
+uint4 __ovld __cnfn mul24(uint4, uint4);
+int8 __ovld __cnfn mul24(int8, int8);
+uint8 __ovld __cnfn mul24(uint8, uint8);
+int16 __ovld __cnfn mul24(int16, int16);
+uint16 __ovld __cnfn mul24(uint16, uint16);
// OpenCL v1.1 s6.11.4, v1.2 s6.12.4, v2.0 s6.13.4 - Common Functions
@@ -10020,153 +10020,153 @@ uint16 __ovld __cnfn mul24(uint16 x, uint16 y);
* Returns fmin(fmax(x, minval), maxval).
* Results are undefined if minval > maxval.
*/
-float __ovld __cnfn clamp(float x, float minval, float maxval);
-float2 __ovld __cnfn clamp(float2 x, float2 minval, float2 maxval);
-float3 __ovld __cnfn clamp(float3 x, float3 minval, float3 maxval);
-float4 __ovld __cnfn clamp(float4 x, float4 minval, float4 maxval);
-float8 __ovld __cnfn clamp(float8 x, float8 minval, float8 maxval);
-float16 __ovld __cnfn clamp(float16 x, float16 minval, float16 maxval);
-float2 __ovld __cnfn clamp(float2 x, float minval, float maxval);
-float3 __ovld __cnfn clamp(float3 x, float minval, float maxval);
-float4 __ovld __cnfn clamp(float4 x, float minval, float maxval);
-float8 __ovld __cnfn clamp(float8 x, float minval, float maxval);
-float16 __ovld __cnfn clamp(float16 x, float minval, float maxval);
+float __ovld __cnfn clamp(float, float, float);
+float2 __ovld __cnfn clamp(float2, float2, float2);
+float3 __ovld __cnfn clamp(float3, float3, float3);
+float4 __ovld __cnfn clamp(float4, float4, float4);
+float8 __ovld __cnfn clamp(float8, float8, float8);
+float16 __ovld __cnfn clamp(float16, float16, float16);
+float2 __ovld __cnfn clamp(float2, float, float);
+float3 __ovld __cnfn clamp(float3, float, float);
+float4 __ovld __cnfn clamp(float4, float, float);
+float8 __ovld __cnfn clamp(float8, float, float);
+float16 __ovld __cnfn clamp(float16, float, float);
#ifdef cl_khr_fp64
-double __ovld __cnfn clamp(double x, double minval, double maxval);
-double2 __ovld __cnfn clamp(double2 x, double2 minval, double2 maxval);
-double3 __ovld __cnfn clamp(double3 x, double3 minval, double3 maxval);
-double4 __ovld __cnfn clamp(double4 x, double4 minval, double4 maxval);
-double8 __ovld __cnfn clamp(double8 x, double8 minval, double8 maxval);
-double16 __ovld __cnfn clamp(double16 x, double16 minval, double16 maxval);
-double2 __ovld __cnfn clamp(double2 x, double minval, double maxval);
-double3 __ovld __cnfn clamp(double3 x, double minval, double maxval);
-double4 __ovld __cnfn clamp(double4 x, double minval, double maxval);
-double8 __ovld __cnfn clamp(double8 x, double minval, double maxval);
-double16 __ovld __cnfn clamp(double16 x, double minval, double maxval);
+double __ovld __cnfn clamp(double, double, double);
+double2 __ovld __cnfn clamp(double2, double2, double2);
+double3 __ovld __cnfn clamp(double3, double3, double3);
+double4 __ovld __cnfn clamp(double4, double4, double4);
+double8 __ovld __cnfn clamp(double8, double8, double8);
+double16 __ovld __cnfn clamp(double16, double16, double16);
+double2 __ovld __cnfn clamp(double2, double, double);
+double3 __ovld __cnfn clamp(double3, double, double);
+double4 __ovld __cnfn clamp(double4, double, double);
+double8 __ovld __cnfn clamp(double8, double, double);
+double16 __ovld __cnfn clamp(double16, double, double);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn clamp(half x, half minval, half maxval);
-half2 __ovld __cnfn clamp(half2 x, half2 minval, half2 maxval);
-half3 __ovld __cnfn clamp(half3 x, half3 minval, half3 maxval);
-half4 __ovld __cnfn clamp(half4 x, half4 minval, half4 maxval);
-half8 __ovld __cnfn clamp(half8 x, half8 minval, half8 maxval);
-half16 __ovld __cnfn clamp(half16 x, half16 minval, half16 maxval);
-half2 __ovld __cnfn clamp(half2 x, half minval, half maxval);
-half3 __ovld __cnfn clamp(half3 x, half minval, half maxval);
-half4 __ovld __cnfn clamp(half4 x, half minval, half maxval);
-half8 __ovld __cnfn clamp(half8 x, half minval, half maxval);
-half16 __ovld __cnfn clamp(half16 x, half minval, half maxval);
+half __ovld __cnfn clamp(half, half, half);
+half2 __ovld __cnfn clamp(half2, half2, half2);
+half3 __ovld __cnfn clamp(half3, half3, half3);
+half4 __ovld __cnfn clamp(half4, half4, half4);
+half8 __ovld __cnfn clamp(half8, half8, half8);
+half16 __ovld __cnfn clamp(half16, half16, half16);
+half2 __ovld __cnfn clamp(half2, half, half);
+half3 __ovld __cnfn clamp(half3, half, half);
+half4 __ovld __cnfn clamp(half4, half, half);
+half8 __ovld __cnfn clamp(half8, half, half);
+half16 __ovld __cnfn clamp(half16, half, half);
#endif //cl_khr_fp16
/**
* Converts radians to degrees, i.e. (180 / PI) *
* radians.
*/
-float __ovld __cnfn degrees(float radians);
-float2 __ovld __cnfn degrees(float2 radians);
-float3 __ovld __cnfn degrees(float3 radians);
-float4 __ovld __cnfn degrees(float4 radians);
-float8 __ovld __cnfn degrees(float8 radians);
-float16 __ovld __cnfn degrees(float16 radians);
+float __ovld __cnfn degrees(float);
+float2 __ovld __cnfn degrees(float2);
+float3 __ovld __cnfn degrees(float3);
+float4 __ovld __cnfn degrees(float4);
+float8 __ovld __cnfn degrees(float8);
+float16 __ovld __cnfn degrees(float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn degrees(double radians);
-double2 __ovld __cnfn degrees(double2 radians);
-double3 __ovld __cnfn degrees(double3 radians);
-double4 __ovld __cnfn degrees(double4 radians);
-double8 __ovld __cnfn degrees(double8 radians);
-double16 __ovld __cnfn degrees(double16 radians);
+double __ovld __cnfn degrees(double);
+double2 __ovld __cnfn degrees(double2);
+double3 __ovld __cnfn degrees(double3);
+double4 __ovld __cnfn degrees(double4);
+double8 __ovld __cnfn degrees(double8);
+double16 __ovld __cnfn degrees(double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn degrees(half radians);
-half2 __ovld __cnfn degrees(half2 radians);
-half3 __ovld __cnfn degrees(half3 radians);
-half4 __ovld __cnfn degrees(half4 radians);
-half8 __ovld __cnfn degrees(half8 radians);
-half16 __ovld __cnfn degrees(half16 radians);
+half __ovld __cnfn degrees(half);
+half2 __ovld __cnfn degrees(half2);
+half3 __ovld __cnfn degrees(half3);
+half4 __ovld __cnfn degrees(half4);
+half8 __ovld __cnfn degrees(half8);
+half16 __ovld __cnfn degrees(half16);
#endif //cl_khr_fp16
/**
* Returns y if x < y, otherwise it returns x. If x and y
* are infinite or NaN, the return values are undefined.
*/
-float __ovld __cnfn max(float x, float y);
-float2 __ovld __cnfn max(float2 x, float2 y);
-float3 __ovld __cnfn max(float3 x, float3 y);
-float4 __ovld __cnfn max(float4 x, float4 y);
-float8 __ovld __cnfn max(float8 x, float8 y);
-float16 __ovld __cnfn max(float16 x, float16 y);
-float2 __ovld __cnfn max(float2 x, float y);
-float3 __ovld __cnfn max(float3 x, float y);
-float4 __ovld __cnfn max(float4 x, float y);
-float8 __ovld __cnfn max(float8 x, float y);
-float16 __ovld __cnfn max(float16 x, float y);
+float __ovld __cnfn max(float, float);
+float2 __ovld __cnfn max(float2, float2);
+float3 __ovld __cnfn max(float3, float3);
+float4 __ovld __cnfn max(float4, float4);
+float8 __ovld __cnfn max(float8, float8);
+float16 __ovld __cnfn max(float16, float16);
+float2 __ovld __cnfn max(float2, float);
+float3 __ovld __cnfn max(float3, float);
+float4 __ovld __cnfn max(float4, float);
+float8 __ovld __cnfn max(float8, float);
+float16 __ovld __cnfn max(float16, float);
#ifdef cl_khr_fp64
-double __ovld __cnfn max(double x, double y);
-double2 __ovld __cnfn max(double2 x, double2 y);
-double3 __ovld __cnfn max(double3 x, double3 y);
-double4 __ovld __cnfn max(double4 x, double4 y);
-double8 __ovld __cnfn max(double8 x, double8 y);
-double16 __ovld __cnfn max(double16 x, double16 y);
-double2 __ovld __cnfn max(double2 x, double y);
-double3 __ovld __cnfn max(double3 x, double y);
-double4 __ovld __cnfn max(double4 x, double y);
-double8 __ovld __cnfn max(double8 x, double y);
-double16 __ovld __cnfn max(double16 x, double y);
+double __ovld __cnfn max(double, double);
+double2 __ovld __cnfn max(double2, double2);
+double3 __ovld __cnfn max(double3, double3);
+double4 __ovld __cnfn max(double4, double4);
+double8 __ovld __cnfn max(double8, double8);
+double16 __ovld __cnfn max(double16, double16);
+double2 __ovld __cnfn max(double2, double);
+double3 __ovld __cnfn max(double3, double);
+double4 __ovld __cnfn max(double4, double);
+double8 __ovld __cnfn max(double8, double);
+double16 __ovld __cnfn max(double16, double);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn max(half x, half y);
-half2 __ovld __cnfn max(half2 x, half2 y);
-half3 __ovld __cnfn max(half3 x, half3 y);
-half4 __ovld __cnfn max(half4 x, half4 y);
-half8 __ovld __cnfn max(half8 x, half8 y);
-half16 __ovld __cnfn max(half16 x, half16 y);
-half2 __ovld __cnfn max(half2 x, half y);
-half3 __ovld __cnfn max(half3 x, half y);
-half4 __ovld __cnfn max(half4 x, half y);
-half8 __ovld __cnfn max(half8 x, half y);
-half16 __ovld __cnfn max(half16 x, half y);
+half __ovld __cnfn max(half, half);
+half2 __ovld __cnfn max(half2, half2);
+half3 __ovld __cnfn max(half3, half3);
+half4 __ovld __cnfn max(half4, half4);
+half8 __ovld __cnfn max(half8, half8);
+half16 __ovld __cnfn max(half16, half16);
+half2 __ovld __cnfn max(half2, half);
+half3 __ovld __cnfn max(half3, half);
+half4 __ovld __cnfn max(half4, half);
+half8 __ovld __cnfn max(half8, half);
+half16 __ovld __cnfn max(half16, half);
#endif //cl_khr_fp16
/**
* Returns y if y < x, otherwise it returns x. If x and y
* are infinite or NaN, the return values are undefined.
*/
-float __ovld __cnfn min(float x, float y);
-float2 __ovld __cnfn min(float2 x, float2 y);
-float3 __ovld __cnfn min(float3 x, float3 y);
-float4 __ovld __cnfn min(float4 x, float4 y);
-float8 __ovld __cnfn min(float8 x, float8 y);
-float16 __ovld __cnfn min(float16 x, float16 y);
-float2 __ovld __cnfn min(float2 x, float y);
-float3 __ovld __cnfn min(float3 x, float y);
-float4 __ovld __cnfn min(float4 x, float y);
-float8 __ovld __cnfn min(float8 x, float y);
-float16 __ovld __cnfn min(float16 x, float y);
+float __ovld __cnfn min(float, float);
+float2 __ovld __cnfn min(float2, float2);
+float3 __ovld __cnfn min(float3, float3);
+float4 __ovld __cnfn min(float4, float4);
+float8 __ovld __cnfn min(float8, float8);
+float16 __ovld __cnfn min(float16, float16);
+float2 __ovld __cnfn min(float2, float);
+float3 __ovld __cnfn min(float3, float);
+float4 __ovld __cnfn min(float4, float);
+float8 __ovld __cnfn min(float8, float);
+float16 __ovld __cnfn min(float16, float);
#ifdef cl_khr_fp64
-double __ovld __cnfn min(double x, double y);
-double2 __ovld __cnfn min(double2 x, double2 y);
-double3 __ovld __cnfn min(double3 x, double3 y);
-double4 __ovld __cnfn min(double4 x, double4 y);
-double8 __ovld __cnfn min(double8 x, double8 y);
-double16 __ovld __cnfn min(double16 x, double16 y);
-double2 __ovld __cnfn min(double2 x, double y);
-double3 __ovld __cnfn min(double3 x, double y);
-double4 __ovld __cnfn min(double4 x, double y);
-double8 __ovld __cnfn min(double8 x, double y);
-double16 __ovld __cnfn min(double16 x, double y);
+double __ovld __cnfn min(double, double);
+double2 __ovld __cnfn min(double2, double2);
+double3 __ovld __cnfn min(double3, double3);
+double4 __ovld __cnfn min(double4, double4);
+double8 __ovld __cnfn min(double8, double8);
+double16 __ovld __cnfn min(double16, double16);
+double2 __ovld __cnfn min(double2, double);
+double3 __ovld __cnfn min(double3, double);
+double4 __ovld __cnfn min(double4, double);
+double8 __ovld __cnfn min(double8, double);
+double16 __ovld __cnfn min(double16, double);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn min(half x, half y);
-half2 __ovld __cnfn min(half2 x, half2 y);
-half3 __ovld __cnfn min(half3 x, half3 y);
-half4 __ovld __cnfn min(half4 x, half4 y);
-half8 __ovld __cnfn min(half8 x, half8 y);
-half16 __ovld __cnfn min(half16 x, half16 y);
-half2 __ovld __cnfn min(half2 x, half y);
-half3 __ovld __cnfn min(half3 x, half y);
-half4 __ovld __cnfn min(half4 x, half y);
-half8 __ovld __cnfn min(half8 x, half y);
-half16 __ovld __cnfn min(half16 x, half y);
+half __ovld __cnfn min(half, half);
+half2 __ovld __cnfn min(half2, half2);
+half3 __ovld __cnfn min(half3, half3);
+half4 __ovld __cnfn min(half4, half4);
+half8 __ovld __cnfn min(half8, half8);
+half16 __ovld __cnfn min(half16, half16);
+half2 __ovld __cnfn min(half2, half);
+half3 __ovld __cnfn min(half3, half);
+half4 __ovld __cnfn min(half4, half);
+half8 __ovld __cnfn min(half8, half);
+half16 __ovld __cnfn min(half16, half);
#endif //cl_khr_fp16
/**
@@ -10176,110 +10176,110 @@ half16 __ovld __cnfn min(half16 x, half y);
* in the range 0.0 ... 1.0, the return values are
* undefined.
*/
-float __ovld __cnfn mix(float x, float y, float a);
-float2 __ovld __cnfn mix(float2 x, float2 y, float2 a);
-float3 __ovld __cnfn mix(float3 x, float3 y, float3 a);
-float4 __ovld __cnfn mix(float4 x, float4 y, float4 a);
-float8 __ovld __cnfn mix(float8 x, float8 y, float8 a);
-float16 __ovld __cnfn mix(float16 x, float16 y, float16 a);
-float2 __ovld __cnfn mix(float2 x, float2 y, float a);
-float3 __ovld __cnfn mix(float3 x, float3 y, float a);
-float4 __ovld __cnfn mix(float4 x, float4 y, float a);
-float8 __ovld __cnfn mix(float8 x, float8 y, float a);
-float16 __ovld __cnfn mix(float16 x, float16 y, float a);
+float __ovld __cnfn mix(float, float, float);
+float2 __ovld __cnfn mix(float2, float2, float2);
+float3 __ovld __cnfn mix(float3, float3, float3);
+float4 __ovld __cnfn mix(float4, float4, float4);
+float8 __ovld __cnfn mix(float8, float8, float8);
+float16 __ovld __cnfn mix(float16, float16, float16);
+float2 __ovld __cnfn mix(float2, float2, float);
+float3 __ovld __cnfn mix(float3, float3, float);
+float4 __ovld __cnfn mix(float4, float4, float);
+float8 __ovld __cnfn mix(float8, float8, float);
+float16 __ovld __cnfn mix(float16, float16, float);
#ifdef cl_khr_fp64
-double __ovld __cnfn mix(double x, double y, double a);
-double2 __ovld __cnfn mix(double2 x, double2 y, double2 a);
-double3 __ovld __cnfn mix(double3 x, double3 y, double3 a);
-double4 __ovld __cnfn mix(double4 x, double4 y, double4 a);
-double8 __ovld __cnfn mix(double8 x, double8 y, double8 a);
-double16 __ovld __cnfn mix(double16 x, double16 y, double16 a);
-double2 __ovld __cnfn mix(double2 x, double2 y, double a);
-double3 __ovld __cnfn mix(double3 x, double3 y, double a);
-double4 __ovld __cnfn mix(double4 x, double4 y, double a);
-double8 __ovld __cnfn mix(double8 x, double8 y, double a);
-double16 __ovld __cnfn mix(double16 x, double16 y, double a);
+double __ovld __cnfn mix(double, double, double);
+double2 __ovld __cnfn mix(double2, double2, double2);
+double3 __ovld __cnfn mix(double3, double3, double3);
+double4 __ovld __cnfn mix(double4, double4, double4);
+double8 __ovld __cnfn mix(double8, double8, double8);
+double16 __ovld __cnfn mix(double16, double16, double16);
+double2 __ovld __cnfn mix(double2, double2, double);
+double3 __ovld __cnfn mix(double3, double3, double);
+double4 __ovld __cnfn mix(double4, double4, double);
+double8 __ovld __cnfn mix(double8, double8, double);
+double16 __ovld __cnfn mix(double16, double16, double);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn mix(half x, half y, half a);
-half2 __ovld __cnfn mix(half2 x, half2 y, half2 a);
-half3 __ovld __cnfn mix(half3 x, half3 y, half3 a);
-half4 __ovld __cnfn mix(half4 x, half4 y, half4 a);
-half8 __ovld __cnfn mix(half8 x, half8 y, half8 a);
-half16 __ovld __cnfn mix(half16 x, half16 y, half16 a);
-half2 __ovld __cnfn mix(half2 x, half2 y, half a);
-half3 __ovld __cnfn mix(half3 x, half3 y, half a);
-half4 __ovld __cnfn mix(half4 x, half4 y, half a);
-half8 __ovld __cnfn mix(half8 x, half8 y, half a);
-half16 __ovld __cnfn mix(half16 x, half16 y, half a);
+half __ovld __cnfn mix(half, half, half);
+half2 __ovld __cnfn mix(half2, half2, half2);
+half3 __ovld __cnfn mix(half3, half3, half3);
+half4 __ovld __cnfn mix(half4, half4, half4);
+half8 __ovld __cnfn mix(half8, half8, half8);
+half16 __ovld __cnfn mix(half16, half16, half16);
+half2 __ovld __cnfn mix(half2, half2, half);
+half3 __ovld __cnfn mix(half3, half3, half);
+half4 __ovld __cnfn mix(half4, half4, half);
+half8 __ovld __cnfn mix(half8, half8, half);
+half16 __ovld __cnfn mix(half16, half16, half);
#endif //cl_khr_fp16
/**
* Converts degrees to radians, i.e. (PI / 180) *
* degrees.
*/
-float __ovld __cnfn radians(float degrees);
-float2 __ovld __cnfn radians(float2 degrees);
-float3 __ovld __cnfn radians(float3 degrees);
-float4 __ovld __cnfn radians(float4 degrees);
-float8 __ovld __cnfn radians(float8 degrees);
-float16 __ovld __cnfn radians(float16 degrees);
+float __ovld __cnfn radians(float);
+float2 __ovld __cnfn radians(float2);
+float3 __ovld __cnfn radians(float3);
+float4 __ovld __cnfn radians(float4);
+float8 __ovld __cnfn radians(float8);
+float16 __ovld __cnfn radians(float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn radians(double degrees);
-double2 __ovld __cnfn radians(double2 degrees);
-double3 __ovld __cnfn radians(double3 degrees);
-double4 __ovld __cnfn radians(double4 degrees);
-double8 __ovld __cnfn radians(double8 degrees);
-double16 __ovld __cnfn radians(double16 degrees);
+double __ovld __cnfn radians(double);
+double2 __ovld __cnfn radians(double2);
+double3 __ovld __cnfn radians(double3);
+double4 __ovld __cnfn radians(double4);
+double8 __ovld __cnfn radians(double8);
+double16 __ovld __cnfn radians(double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn radians(half degrees);
-half2 __ovld __cnfn radians(half2 degrees);
-half3 __ovld __cnfn radians(half3 degrees);
-half4 __ovld __cnfn radians(half4 degrees);
-half8 __ovld __cnfn radians(half8 degrees);
-half16 __ovld __cnfn radians(half16 degrees);
+half __ovld __cnfn radians(half);
+half2 __ovld __cnfn radians(half2);
+half3 __ovld __cnfn radians(half3);
+half4 __ovld __cnfn radians(half4);
+half8 __ovld __cnfn radians(half8);
+half16 __ovld __cnfn radians(half16);
#endif //cl_khr_fp16
/**
* Returns 0.0 if x < edge, otherwise it returns 1.0.
*/
-float __ovld __cnfn step(float edge, float x);
-float2 __ovld __cnfn step(float2 edge, float2 x);
-float3 __ovld __cnfn step(float3 edge, float3 x);
-float4 __ovld __cnfn step(float4 edge, float4 x);
-float8 __ovld __cnfn step(float8 edge, float8 x);
-float16 __ovld __cnfn step(float16 edge, float16 x);
-float2 __ovld __cnfn step(float edge, float2 x);
-float3 __ovld __cnfn step(float edge, float3 x);
-float4 __ovld __cnfn step(float edge, float4 x);
-float8 __ovld __cnfn step(float edge, float8 x);
-float16 __ovld __cnfn step(float edge, float16 x);
+float __ovld __cnfn step(float, float);
+float2 __ovld __cnfn step(float2, float2);
+float3 __ovld __cnfn step(float3, float3);
+float4 __ovld __cnfn step(float4, float4);
+float8 __ovld __cnfn step(float8, float8);
+float16 __ovld __cnfn step(float16, float16);
+float2 __ovld __cnfn step(float, float2);
+float3 __ovld __cnfn step(float, float3);
+float4 __ovld __cnfn step(float, float4);
+float8 __ovld __cnfn step(float, float8);
+float16 __ovld __cnfn step(float, float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn step(double edge, double x);
-double2 __ovld __cnfn step(double2 edge, double2 x);
-double3 __ovld __cnfn step(double3 edge, double3 x);
-double4 __ovld __cnfn step(double4 edge, double4 x);
-double8 __ovld __cnfn step(double8 edge, double8 x);
-double16 __ovld __cnfn step(double16 edge, double16 x);
-double2 __ovld __cnfn step(double edge, double2 x);
-double3 __ovld __cnfn step(double edge, double3 x);
-double4 __ovld __cnfn step(double edge, double4 x);
-double8 __ovld __cnfn step(double edge, double8 x);
-double16 __ovld __cnfn step(double edge, double16 x);
+double __ovld __cnfn step(double, double);
+double2 __ovld __cnfn step(double2, double2);
+double3 __ovld __cnfn step(double3, double3);
+double4 __ovld __cnfn step(double4, double4);
+double8 __ovld __cnfn step(double8, double8);
+double16 __ovld __cnfn step(double16, double16);
+double2 __ovld __cnfn step(double, double2);
+double3 __ovld __cnfn step(double, double3);
+double4 __ovld __cnfn step(double, double4);
+double8 __ovld __cnfn step(double, double8);
+double16 __ovld __cnfn step(double, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn step(half edge, half x);
-half2 __ovld __cnfn step(half2 edge, half2 x);
-half3 __ovld __cnfn step(half3 edge, half3 x);
-half4 __ovld __cnfn step(half4 edge, half4 x);
-half8 __ovld __cnfn step(half8 edge, half8 x);
-half16 __ovld __cnfn step(half16 edge, half16 x);
-half2 __ovld __cnfn step(half edge, half2 x);
-half3 __ovld __cnfn step(half edge, half3 x);
-half4 __ovld __cnfn step(half edge, half4 x);
-half8 __ovld __cnfn step(half edge, half8 x);
-half16 __ovld __cnfn step(half edge, half16 x);
+half __ovld __cnfn step(half, half);
+half2 __ovld __cnfn step(half2, half2);
+half3 __ovld __cnfn step(half3, half3);
+half4 __ovld __cnfn step(half4, half4);
+half8 __ovld __cnfn step(half8, half8);
+half16 __ovld __cnfn step(half16, half16);
+half2 __ovld __cnfn step(half, half2);
+half3 __ovld __cnfn step(half, half3);
+half4 __ovld __cnfn step(half, half4);
+half8 __ovld __cnfn step(half, half8);
+half16 __ovld __cnfn step(half, half16);
#endif //cl_khr_fp16
/**
@@ -10295,69 +10295,69 @@ half16 __ovld __cnfn step(half edge, half16 x);
* Results are undefined if edge0 >= edge1 or if x,
* edge0 or edge1 is a NaN.
*/
-float __ovld __cnfn smoothstep(float edge0, float edge1, float x);
-float2 __ovld __cnfn smoothstep(float2 edge0, float2 edge1, float2 x);
-float3 __ovld __cnfn smoothstep(float3 edge0, float3 edge1, float3 x);
-float4 __ovld __cnfn smoothstep(float4 edge0, float4 edge1, float4 x);
-float8 __ovld __cnfn smoothstep(float8 edge0, float8 edge1, float8 x);
-float16 __ovld __cnfn smoothstep(float16 edge0, float16 edge1, float16 x);
-float2 __ovld __cnfn smoothstep(float edge0, float edge1, float2 x);
-float3 __ovld __cnfn smoothstep(float edge0, float edge1, float3 x);
-float4 __ovld __cnfn smoothstep(float edge0, float edge1, float4 x);
-float8 __ovld __cnfn smoothstep(float edge0, float edge1, float8 x);
-float16 __ovld __cnfn smoothstep(float edge0, float edge1, float16 x);
+float __ovld __cnfn smoothstep(float, float, float);
+float2 __ovld __cnfn smoothstep(float2, float2, float2);
+float3 __ovld __cnfn smoothstep(float3, float3, float3);
+float4 __ovld __cnfn smoothstep(float4, float4, float4);
+float8 __ovld __cnfn smoothstep(float8, float8, float8);
+float16 __ovld __cnfn smoothstep(float16, float16, float16);
+float2 __ovld __cnfn smoothstep(float, float, float2);
+float3 __ovld __cnfn smoothstep(float, float, float3);
+float4 __ovld __cnfn smoothstep(float, float, float4);
+float8 __ovld __cnfn smoothstep(float, float, float8);
+float16 __ovld __cnfn smoothstep(float, float, float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn smoothstep(double edge0, double edge1, double x);
-double2 __ovld __cnfn smoothstep(double2 edge0, double2 edge1, double2 x);
-double3 __ovld __cnfn smoothstep(double3 edge0, double3 edge1, double3 x);
-double4 __ovld __cnfn smoothstep(double4 edge0, double4 edge1, double4 x);
-double8 __ovld __cnfn smoothstep(double8 edge0, double8 edge1, double8 x);
-double16 __ovld __cnfn smoothstep(double16 edge0, double16 edge1, double16 x);
-double2 __ovld __cnfn smoothstep(double edge0, double edge1, double2 x);
-double3 __ovld __cnfn smoothstep(double edge0, double edge1, double3 x);
-double4 __ovld __cnfn smoothstep(double edge0, double edge1, double4 x);
-double8 __ovld __cnfn smoothstep(double edge0, double edge1, double8 x);
-double16 __ovld __cnfn smoothstep(double edge0, double edge1, double16 x);
+double __ovld __cnfn smoothstep(double, double, double);
+double2 __ovld __cnfn smoothstep(double2, double2, double2);
+double3 __ovld __cnfn smoothstep(double3, double3, double3);
+double4 __ovld __cnfn smoothstep(double4, double4, double4);
+double8 __ovld __cnfn smoothstep(double8, double8, double8);
+double16 __ovld __cnfn smoothstep(double16, double16, double16);
+double2 __ovld __cnfn smoothstep(double, double, double2);
+double3 __ovld __cnfn smoothstep(double, double, double3);
+double4 __ovld __cnfn smoothstep(double, double, double4);
+double8 __ovld __cnfn smoothstep(double, double, double8);
+double16 __ovld __cnfn smoothstep(double, double, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn smoothstep(half edge0, half edge1, half x);
-half2 __ovld __cnfn smoothstep(half2 edge0, half2 edge1, half2 x);
-half3 __ovld __cnfn smoothstep(half3 edge0, half3 edge1, half3 x);
-half4 __ovld __cnfn smoothstep(half4 edge0, half4 edge1, half4 x);
-half8 __ovld __cnfn smoothstep(half8 edge0, half8 edge1, half8 x);
-half16 __ovld __cnfn smoothstep(half16 edge0, half16 edge1, half16 x);
-half2 __ovld __cnfn smoothstep(half edge0, half edge1, half2 x);
-half3 __ovld __cnfn smoothstep(half edge0, half edge1, half3 x);
-half4 __ovld __cnfn smoothstep(half edge0, half edge1, half4 x);
-half8 __ovld __cnfn smoothstep(half edge0, half edge1, half8 x);
-half16 __ovld __cnfn smoothstep(half edge0, half edge1, half16 x);
+half __ovld __cnfn smoothstep(half, half, half);
+half2 __ovld __cnfn smoothstep(half2, half2, half2);
+half3 __ovld __cnfn smoothstep(half3, half3, half3);
+half4 __ovld __cnfn smoothstep(half4, half4, half4);
+half8 __ovld __cnfn smoothstep(half8, half8, half8);
+half16 __ovld __cnfn smoothstep(half16, half16, half16);
+half2 __ovld __cnfn smoothstep(half, half, half2);
+half3 __ovld __cnfn smoothstep(half, half, half3);
+half4 __ovld __cnfn smoothstep(half, half, half4);
+half8 __ovld __cnfn smoothstep(half, half, half8);
+half16 __ovld __cnfn smoothstep(half, half, half16);
#endif //cl_khr_fp16
/**
* Returns 1.0 if x > 0, -0.0 if x = -0.0, +0.0 if x =
* +0.0, or -1.0 if x < 0. Returns 0.0 if x is a NaN.
*/
-float __ovld __cnfn sign(float x);
-float2 __ovld __cnfn sign(float2 x);
-float3 __ovld __cnfn sign(float3 x);
-float4 __ovld __cnfn sign(float4 x);
-float8 __ovld __cnfn sign(float8 x);
-float16 __ovld __cnfn sign(float16 x);
+float __ovld __cnfn sign(float);
+float2 __ovld __cnfn sign(float2);
+float3 __ovld __cnfn sign(float3);
+float4 __ovld __cnfn sign(float4);
+float8 __ovld __cnfn sign(float8);
+float16 __ovld __cnfn sign(float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn sign(double x);
-double2 __ovld __cnfn sign(double2 x);
-double3 __ovld __cnfn sign(double3 x);
-double4 __ovld __cnfn sign(double4 x);
-double8 __ovld __cnfn sign(double8 x);
-double16 __ovld __cnfn sign(double16 x);
+double __ovld __cnfn sign(double);
+double2 __ovld __cnfn sign(double2);
+double3 __ovld __cnfn sign(double3);
+double4 __ovld __cnfn sign(double4);
+double8 __ovld __cnfn sign(double8);
+double16 __ovld __cnfn sign(double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn sign(half x);
-half2 __ovld __cnfn sign(half2 x);
-half3 __ovld __cnfn sign(half3 x);
-half4 __ovld __cnfn sign(half4 x);
-half8 __ovld __cnfn sign(half8 x);
-half16 __ovld __cnfn sign(half16 x);
+half __ovld __cnfn sign(half);
+half2 __ovld __cnfn sign(half2);
+half3 __ovld __cnfn sign(half3);
+half4 __ovld __cnfn sign(half4);
+half8 __ovld __cnfn sign(half8);
+half16 __ovld __cnfn sign(half16);
#endif //cl_khr_fp16
// OpenCL v1.1 s6.11.5, v1.2 s6.12.5, v2.0 s6.13.5 - Geometric Functions
@@ -10366,127 +10366,127 @@ half16 __ovld __cnfn sign(half16 x);
* Returns the cross product of p0.xyz and p1.xyz. The
* w component of float4 result returned will be 0.0.
*/
-float4 __ovld __cnfn cross(float4 p0, float4 p1);
-float3 __ovld __cnfn cross(float3 p0, float3 p1);
+float4 __ovld __cnfn cross(float4, float4);
+float3 __ovld __cnfn cross(float3, float3);
#ifdef cl_khr_fp64
-double4 __ovld __cnfn cross(double4 p0, double4 p1);
-double3 __ovld __cnfn cross(double3 p0, double3 p1);
+double4 __ovld __cnfn cross(double4, double4);
+double3 __ovld __cnfn cross(double3, double3);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half4 __ovld __cnfn cross(half4 p0, half4 p1);
-half3 __ovld __cnfn cross(half3 p0, half3 p1);
+half4 __ovld __cnfn cross(half4, half4);
+half3 __ovld __cnfn cross(half3, half3);
#endif //cl_khr_fp16
/**
* Compute dot product.
*/
-float __ovld __cnfn dot(float p0, float p1);
-float __ovld __cnfn dot(float2 p0, float2 p1);
-float __ovld __cnfn dot(float3 p0, float3 p1);
-float __ovld __cnfn dot(float4 p0, float4 p1);
+float __ovld __cnfn dot(float, float);
+float __ovld __cnfn dot(float2, float2);
+float __ovld __cnfn dot(float3, float3);
+float __ovld __cnfn dot(float4, float4);
#ifdef cl_khr_fp64
-double __ovld __cnfn dot(double p0, double p1);
-double __ovld __cnfn dot(double2 p0, double2 p1);
-double __ovld __cnfn dot(double3 p0, double3 p1);
-double __ovld __cnfn dot(double4 p0, double4 p1);
+double __ovld __cnfn dot(double, double);
+double __ovld __cnfn dot(double2, double2);
+double __ovld __cnfn dot(double3, double3);
+double __ovld __cnfn dot(double4, double4);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn dot(half p0, half p1);
-half __ovld __cnfn dot(half2 p0, half2 p1);
-half __ovld __cnfn dot(half3 p0, half3 p1);
-half __ovld __cnfn dot(half4 p0, half4 p1);
+half __ovld __cnfn dot(half, half);
+half __ovld __cnfn dot(half2, half2);
+half __ovld __cnfn dot(half3, half3);
+half __ovld __cnfn dot(half4, half4);
#endif //cl_khr_fp16
/**
* Returns the distance between p0 and p1. This is
* calculated as length(p0 - p1).
*/
-float __ovld __cnfn distance(float p0, float p1);
-float __ovld __cnfn distance(float2 p0, float2 p1);
-float __ovld __cnfn distance(float3 p0, float3 p1);
-float __ovld __cnfn distance(float4 p0, float4 p1);
+float __ovld __cnfn distance(float, float);
+float __ovld __cnfn distance(float2, float2);
+float __ovld __cnfn distance(float3, float3);
+float __ovld __cnfn distance(float4, float4);
#ifdef cl_khr_fp64
-double __ovld __cnfn distance(double p0, double p1);
-double __ovld __cnfn distance(double2 p0, double2 p1);
-double __ovld __cnfn distance(double3 p0, double3 p1);
-double __ovld __cnfn distance(double4 p0, double4 p1);
+double __ovld __cnfn distance(double, double);
+double __ovld __cnfn distance(double2, double2);
+double __ovld __cnfn distance(double3, double3);
+double __ovld __cnfn distance(double4, double4);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn distance(half p0, half p1);
-half __ovld __cnfn distance(half2 p0, half2 p1);
-half __ovld __cnfn distance(half3 p0, half3 p1);
-half __ovld __cnfn distance(half4 p0, half4 p1);
+half __ovld __cnfn distance(half, half);
+half __ovld __cnfn distance(half2, half2);
+half __ovld __cnfn distance(half3, half3);
+half __ovld __cnfn distance(half4, half4);
#endif //cl_khr_fp16
/**
* Return the length of vector p, i.e.,
* sqrt(p.x2 + p.y 2 + ...)
*/
-float __ovld __cnfn length(float p);
-float __ovld __cnfn length(float2 p);
-float __ovld __cnfn length(float3 p);
-float __ovld __cnfn length(float4 p);
+float __ovld __cnfn length(float);
+float __ovld __cnfn length(float2);
+float __ovld __cnfn length(float3);
+float __ovld __cnfn length(float4);
#ifdef cl_khr_fp64
-double __ovld __cnfn length(double p);
-double __ovld __cnfn length(double2 p);
-double __ovld __cnfn length(double3 p);
-double __ovld __cnfn length(double4 p);
+double __ovld __cnfn length(double);
+double __ovld __cnfn length(double2);
+double __ovld __cnfn length(double3);
+double __ovld __cnfn length(double4);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn length(half p);
-half __ovld __cnfn length(half2 p);
-half __ovld __cnfn length(half3 p);
-half __ovld __cnfn length(half4 p);
+half __ovld __cnfn length(half);
+half __ovld __cnfn length(half2);
+half __ovld __cnfn length(half3);
+half __ovld __cnfn length(half4);
#endif //cl_khr_fp16
/**
* Returns a vector in the same direction as p but with a
* length of 1.
*/
-float __ovld __cnfn normalize(float p);
-float2 __ovld __cnfn normalize(float2 p);
-float3 __ovld __cnfn normalize(float3 p);
-float4 __ovld __cnfn normalize(float4 p);
+float __ovld __cnfn normalize(float);
+float2 __ovld __cnfn normalize(float2);
+float3 __ovld __cnfn normalize(float3);
+float4 __ovld __cnfn normalize(float4);
#ifdef cl_khr_fp64
-double __ovld __cnfn normalize(double p);
-double2 __ovld __cnfn normalize(double2 p);
-double3 __ovld __cnfn normalize(double3 p);
-double4 __ovld __cnfn normalize(double4 p);
+double __ovld __cnfn normalize(double);
+double2 __ovld __cnfn normalize(double2);
+double3 __ovld __cnfn normalize(double3);
+double4 __ovld __cnfn normalize(double4);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn normalize(half p);
-half2 __ovld __cnfn normalize(half2 p);
-half3 __ovld __cnfn normalize(half3 p);
-half4 __ovld __cnfn normalize(half4 p);
+half __ovld __cnfn normalize(half);
+half2 __ovld __cnfn normalize(half2);
+half3 __ovld __cnfn normalize(half3);
+half4 __ovld __cnfn normalize(half4);
#endif //cl_khr_fp16
/**
* Returns fast_length(p0 - p1).
*/
-float __ovld __cnfn fast_distance(float p0, float p1);
-float __ovld __cnfn fast_distance(float2 p0, float2 p1);
-float __ovld __cnfn fast_distance(float3 p0, float3 p1);
-float __ovld __cnfn fast_distance(float4 p0, float4 p1);
+float __ovld __cnfn fast_distance(float, float);
+float __ovld __cnfn fast_distance(float2, float2);
+float __ovld __cnfn fast_distance(float3, float3);
+float __ovld __cnfn fast_distance(float4, float4);
#ifdef cl_khr_fp16
-half __ovld __cnfn fast_distance(half p0, half p1);
-half __ovld __cnfn fast_distance(half2 p0, half2 p1);
-half __ovld __cnfn fast_distance(half3 p0, half3 p1);
-half __ovld __cnfn fast_distance(half4 p0, half4 p1);
+half __ovld __cnfn fast_distance(half, half);
+half __ovld __cnfn fast_distance(half2, half2);
+half __ovld __cnfn fast_distance(half3, half3);
+half __ovld __cnfn fast_distance(half4, half4);
#endif //cl_khr_fp16
/**
* Returns the length of vector p computed as:
* half_sqrt(p.x2 + p.y2 + ...)
*/
-float __ovld __cnfn fast_length(float p);
-float __ovld __cnfn fast_length(float2 p);
-float __ovld __cnfn fast_length(float3 p);
-float __ovld __cnfn fast_length(float4 p);
+float __ovld __cnfn fast_length(float);
+float __ovld __cnfn fast_length(float2);
+float __ovld __cnfn fast_length(float3);
+float __ovld __cnfn fast_length(float4);
#ifdef cl_khr_fp16
-half __ovld __cnfn fast_length(half p);
-half __ovld __cnfn fast_length(half2 p);
-half __ovld __cnfn fast_length(half3 p);
-half __ovld __cnfn fast_length(half4 p);
+half __ovld __cnfn fast_length(half);
+half __ovld __cnfn fast_length(half2);
+half __ovld __cnfn fast_length(half3);
+half __ovld __cnfn fast_length(half4);
#endif //cl_khr_fp16
/**
@@ -10510,15 +10510,15 @@ half __ovld __cnfn fast_length(half4 p);
* less than sqrt(FLT_MIN) may be flushed to zero
* before proceeding with the calculation.
*/
-float __ovld __cnfn fast_normalize(float p);
-float2 __ovld __cnfn fast_normalize(float2 p);
-float3 __ovld __cnfn fast_normalize(float3 p);
-float4 __ovld __cnfn fast_normalize(float4 p);
+float __ovld __cnfn fast_normalize(float);
+float2 __ovld __cnfn fast_normalize(float2);
+float3 __ovld __cnfn fast_normalize(float3);
+float4 __ovld __cnfn fast_normalize(float4);
#ifdef cl_khr_fp16
-half __ovld __cnfn fast_normalize(half p);
-half2 __ovld __cnfn fast_normalize(half2 p);
-half3 __ovld __cnfn fast_normalize(half3 p);
-half4 __ovld __cnfn fast_normalize(half4 p);
+half __ovld __cnfn fast_normalize(half);
+half2 __ovld __cnfn fast_normalize(half2);
+half3 __ovld __cnfn fast_normalize(half3);
+half4 __ovld __cnfn fast_normalize(half4);
#endif //cl_khr_fp16
// OpenCL v1.1 s6.11.6, v1.2 s6.12.6, v2.0 s6.13.6 - Relational Functions
@@ -10527,184 +10527,184 @@ half4 __ovld __cnfn fast_normalize(half4 p);
* intn isequal (floatn x, floatn y)
* Returns the component-wise compare of x == y.
*/
-int __ovld __cnfn isequal(float x, float y);
-int2 __ovld __cnfn isequal(float2 x, float2 y);
-int3 __ovld __cnfn isequal(float3 x, float3 y);
-int4 __ovld __cnfn isequal(float4 x, float4 y);
-int8 __ovld __cnfn isequal(float8 x, float8 y);
-int16 __ovld __cnfn isequal(float16 x, float16 y);
+int __ovld __cnfn isequal(float, float);
+int2 __ovld __cnfn isequal(float2, float2);
+int3 __ovld __cnfn isequal(float3, float3);
+int4 __ovld __cnfn isequal(float4, float4);
+int8 __ovld __cnfn isequal(float8, float8);
+int16 __ovld __cnfn isequal(float16, float16);
#ifdef cl_khr_fp64
-int __ovld __cnfn isequal(double x, double y);
-long2 __ovld __cnfn isequal(double2 x, double2 y);
-long3 __ovld __cnfn isequal(double3 x, double3 y);
-long4 __ovld __cnfn isequal(double4 x, double4 y);
-long8 __ovld __cnfn isequal(double8 x, double8 y);
-long16 __ovld __cnfn isequal(double16 x, double16 y);
+int __ovld __cnfn isequal(double, double);
+long2 __ovld __cnfn isequal(double2, double2);
+long3 __ovld __cnfn isequal(double3, double3);
+long4 __ovld __cnfn isequal(double4, double4);
+long8 __ovld __cnfn isequal(double8, double8);
+long16 __ovld __cnfn isequal(double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-int __ovld __cnfn isequal(half x, half y);
-short2 __ovld __cnfn isequal(half2 x, half2 y);
-short3 __ovld __cnfn isequal(half3 x, half3 y);
-short4 __ovld __cnfn isequal(half4 x, half4 y);
-short8 __ovld __cnfn isequal(half8 x, half8 y);
-short16 __ovld __cnfn isequal(half16 x, half16 y);
+int __ovld __cnfn isequal(half, half);
+short2 __ovld __cnfn isequal(half2, half2);
+short3 __ovld __cnfn isequal(half3, half3);
+short4 __ovld __cnfn isequal(half4, half4);
+short8 __ovld __cnfn isequal(half8, half8);
+short16 __ovld __cnfn isequal(half16, half16);
#endif //cl_khr_fp16
/**
* Returns the component-wise compare of x != y.
*/
-int __ovld __cnfn isnotequal(float x, float y);
-int2 __ovld __cnfn isnotequal(float2 x, float2 y);
-int3 __ovld __cnfn isnotequal(float3 x, float3 y);
-int4 __ovld __cnfn isnotequal(float4 x, float4 y);
-int8 __ovld __cnfn isnotequal(float8 x, float8 y);
-int16 __ovld __cnfn isnotequal(float16 x, float16 y);
+int __ovld __cnfn isnotequal(float, float);
+int2 __ovld __cnfn isnotequal(float2, float2);
+int3 __ovld __cnfn isnotequal(float3, float3);
+int4 __ovld __cnfn isnotequal(float4, float4);
+int8 __ovld __cnfn isnotequal(float8, float8);
+int16 __ovld __cnfn isnotequal(float16, float16);
#ifdef cl_khr_fp64
-int __ovld __cnfn isnotequal(double x, double y);
-long2 __ovld __cnfn isnotequal(double2 x, double2 y);
-long3 __ovld __cnfn isnotequal(double3 x, double3 y);
-long4 __ovld __cnfn isnotequal(double4 x, double4 y);
-long8 __ovld __cnfn isnotequal(double8 x, double8 y);
-long16 __ovld __cnfn isnotequal(double16 x, double16 y);
+int __ovld __cnfn isnotequal(double, double);
+long2 __ovld __cnfn isnotequal(double2, double2);
+long3 __ovld __cnfn isnotequal(double3, double3);
+long4 __ovld __cnfn isnotequal(double4, double4);
+long8 __ovld __cnfn isnotequal(double8, double8);
+long16 __ovld __cnfn isnotequal(double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-int __ovld __cnfn isnotequal(half x, half y);
-short2 __ovld __cnfn isnotequal(half2 x, half2 y);
-short3 __ovld __cnfn isnotequal(half3 x, half3 y);
-short4 __ovld __cnfn isnotequal(half4 x, half4 y);
-short8 __ovld __cnfn isnotequal(half8 x, half8 y);
-short16 __ovld __cnfn isnotequal(half16 x, half16 y);
+int __ovld __cnfn isnotequal(half, half);
+short2 __ovld __cnfn isnotequal(half2, half2);
+short3 __ovld __cnfn isnotequal(half3, half3);
+short4 __ovld __cnfn isnotequal(half4, half4);
+short8 __ovld __cnfn isnotequal(half8, half8);
+short16 __ovld __cnfn isnotequal(half16, half16);
#endif //cl_khr_fp16
/**
* Returns the component-wise compare of x > y.
*/
-int __ovld __cnfn isgreater(float x, float y);
-int2 __ovld __cnfn isgreater(float2 x, float2 y);
-int3 __ovld __cnfn isgreater(float3 x, float3 y);
-int4 __ovld __cnfn isgreater(float4 x, float4 y);
-int8 __ovld __cnfn isgreater(float8 x, float8 y);
-int16 __ovld __cnfn isgreater(float16 x, float16 y);
+int __ovld __cnfn isgreater(float, float);
+int2 __ovld __cnfn isgreater(float2, float2);
+int3 __ovld __cnfn isgreater(float3, float3);
+int4 __ovld __cnfn isgreater(float4, float4);
+int8 __ovld __cnfn isgreater(float8, float8);
+int16 __ovld __cnfn isgreater(float16, float16);
#ifdef cl_khr_fp64
-int __ovld __cnfn isgreater(double x, double y);
-long2 __ovld __cnfn isgreater(double2 x, double2 y);
-long3 __ovld __cnfn isgreater(double3 x, double3 y);
-long4 __ovld __cnfn isgreater(double4 x, double4 y);
-long8 __ovld __cnfn isgreater(double8 x, double8 y);
-long16 __ovld __cnfn isgreater(double16 x, double16 y);
+int __ovld __cnfn isgreater(double, double);
+long2 __ovld __cnfn isgreater(double2, double2);
+long3 __ovld __cnfn isgreater(double3, double3);
+long4 __ovld __cnfn isgreater(double4, double4);
+long8 __ovld __cnfn isgreater(double8, double8);
+long16 __ovld __cnfn isgreater(double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-int __ovld __cnfn isgreater(half x, half y);
-short2 __ovld __cnfn isgreater(half2 x, half2 y);
-short3 __ovld __cnfn isgreater(half3 x, half3 y);
-short4 __ovld __cnfn isgreater(half4 x, half4 y);
-short8 __ovld __cnfn isgreater(half8 x, half8 y);
-short16 __ovld __cnfn isgreater(half16 x, half16 y);
+int __ovld __cnfn isgreater(half, half);
+short2 __ovld __cnfn isgreater(half2, half2);
+short3 __ovld __cnfn isgreater(half3, half3);
+short4 __ovld __cnfn isgreater(half4, half4);
+short8 __ovld __cnfn isgreater(half8, half8);
+short16 __ovld __cnfn isgreater(half16, half16);
#endif //cl_khr_fp16
/**
* Returns the component-wise compare of x >= y.
*/
-int __ovld __cnfn isgreaterequal(float x, float y);
-int2 __ovld __cnfn isgreaterequal(float2 x, float2 y);
-int3 __ovld __cnfn isgreaterequal(float3 x, float3 y);
-int4 __ovld __cnfn isgreaterequal(float4 x, float4 y);
-int8 __ovld __cnfn isgreaterequal(float8 x, float8 y);
-int16 __ovld __cnfn isgreaterequal(float16 x, float16 y);
+int __ovld __cnfn isgreaterequal(float, float);
+int2 __ovld __cnfn isgreaterequal(float2, float2);
+int3 __ovld __cnfn isgreaterequal(float3, float3);
+int4 __ovld __cnfn isgreaterequal(float4, float4);
+int8 __ovld __cnfn isgreaterequal(float8, float8);
+int16 __ovld __cnfn isgreaterequal(float16, float16);
#ifdef cl_khr_fp64
-int __ovld __cnfn isgreaterequal(double x, double y);
-long2 __ovld __cnfn isgreaterequal(double2 x, double2 y);
-long3 __ovld __cnfn isgreaterequal(double3 x, double3 y);
-long4 __ovld __cnfn isgreaterequal(double4 x, double4 y);
-long8 __ovld __cnfn isgreaterequal(double8 x, double8 y);
-long16 __ovld __cnfn isgreaterequal(double16 x, double16 y);
+int __ovld __cnfn isgreaterequal(double, double);
+long2 __ovld __cnfn isgreaterequal(double2, double2);
+long3 __ovld __cnfn isgreaterequal(double3, double3);
+long4 __ovld __cnfn isgreaterequal(double4, double4);
+long8 __ovld __cnfn isgreaterequal(double8, double8);
+long16 __ovld __cnfn isgreaterequal(double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-int __ovld __cnfn isgreaterequal(half x, half y);
-short2 __ovld __cnfn isgreaterequal(half2 x, half2 y);
-short3 __ovld __cnfn isgreaterequal(half3 x, half3 y);
-short4 __ovld __cnfn isgreaterequal(half4 x, half4 y);
-short8 __ovld __cnfn isgreaterequal(half8 x, half8 y);
-short16 __ovld __cnfn isgreaterequal(half16 x, half16 y);
+int __ovld __cnfn isgreaterequal(half, half);
+short2 __ovld __cnfn isgreaterequal(half2, half2);
+short3 __ovld __cnfn isgreaterequal(half3, half3);
+short4 __ovld __cnfn isgreaterequal(half4, half4);
+short8 __ovld __cnfn isgreaterequal(half8, half8);
+short16 __ovld __cnfn isgreaterequal(half16, half16);
#endif //cl_khr_fp16
/**
* Returns the component-wise compare of x < y.
*/
-int __ovld __cnfn isless(float x, float y);
-int2 __ovld __cnfn isless(float2 x, float2 y);
-int3 __ovld __cnfn isless(float3 x, float3 y);
-int4 __ovld __cnfn isless(float4 x, float4 y);
-int8 __ovld __cnfn isless(float8 x, float8 y);
-int16 __ovld __cnfn isless(float16 x, float16 y);
+int __ovld __cnfn isless(float, float);
+int2 __ovld __cnfn isless(float2, float2);
+int3 __ovld __cnfn isless(float3, float3);
+int4 __ovld __cnfn isless(float4, float4);
+int8 __ovld __cnfn isless(float8, float8);
+int16 __ovld __cnfn isless(float16, float16);
#ifdef cl_khr_fp64
-int __ovld __cnfn isless(double x, double y);
-long2 __ovld __cnfn isless(double2 x, double2 y);
-long3 __ovld __cnfn isless(double3 x, double3 y);
-long4 __ovld __cnfn isless(double4 x, double4 y);
-long8 __ovld __cnfn isless(double8 x, double8 y);
-long16 __ovld __cnfn isless(double16 x, double16 y);
+int __ovld __cnfn isless(double, double);
+long2 __ovld __cnfn isless(double2, double2);
+long3 __ovld __cnfn isless(double3, double3);
+long4 __ovld __cnfn isless(double4, double4);
+long8 __ovld __cnfn isless(double8, double8);
+long16 __ovld __cnfn isless(double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-int __ovld __cnfn isless(half x, half y);
-short2 __ovld __cnfn isless(half2 x, half2 y);
-short3 __ovld __cnfn isless(half3 x, half3 y);
-short4 __ovld __cnfn isless(half4 x, half4 y);
-short8 __ovld __cnfn isless(half8 x, half8 y);
-short16 __ovld __cnfn isless(half16 x, half16 y);
+int __ovld __cnfn isless(half, half);
+short2 __ovld __cnfn isless(half2, half2);
+short3 __ovld __cnfn isless(half3, half3);
+short4 __ovld __cnfn isless(half4, half4);
+short8 __ovld __cnfn isless(half8, half8);
+short16 __ovld __cnfn isless(half16, half16);
#endif //cl_khr_fp16
/**
* Returns the component-wise compare of x <= y.
*/
-int __ovld __cnfn islessequal(float x, float y);
-int2 __ovld __cnfn islessequal(float2 x, float2 y);
-int3 __ovld __cnfn islessequal(float3 x, float3 y);
-int4 __ovld __cnfn islessequal(float4 x, float4 y);
-int8 __ovld __cnfn islessequal(float8 x, float8 y);
-int16 __ovld __cnfn islessequal(float16 x, float16 y);
+int __ovld __cnfn islessequal(float, float);
+int2 __ovld __cnfn islessequal(float2, float2);
+int3 __ovld __cnfn islessequal(float3, float3);
+int4 __ovld __cnfn islessequal(float4, float4);
+int8 __ovld __cnfn islessequal(float8, float8);
+int16 __ovld __cnfn islessequal(float16, float16);
#ifdef cl_khr_fp64
-int __ovld __cnfn islessequal(double x, double y);
-long2 __ovld __cnfn islessequal(double2 x, double2 y);
-long3 __ovld __cnfn islessequal(double3 x, double3 y);
-long4 __ovld __cnfn islessequal(double4 x, double4 y);
-long8 __ovld __cnfn islessequal(double8 x, double8 y);
-long16 __ovld __cnfn islessequal(double16 x, double16 y);
+int __ovld __cnfn islessequal(double, double);
+long2 __ovld __cnfn islessequal(double2, double2);
+long3 __ovld __cnfn islessequal(double3, double3);
+long4 __ovld __cnfn islessequal(double4, double4);
+long8 __ovld __cnfn islessequal(double8, double8);
+long16 __ovld __cnfn islessequal(double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-int __ovld __cnfn islessequal(half x, half y);
-short2 __ovld __cnfn islessequal(half2 x, half2 y);
-short3 __ovld __cnfn islessequal(half3 x, half3 y);
-short4 __ovld __cnfn islessequal(half4 x, half4 y);
-short8 __ovld __cnfn islessequal(half8 x, half8 y);
-short16 __ovld __cnfn islessequal(half16 x, half16 y);
+int __ovld __cnfn islessequal(half, half);
+short2 __ovld __cnfn islessequal(half2, half2);
+short3 __ovld __cnfn islessequal(half3, half3);
+short4 __ovld __cnfn islessequal(half4, half4);
+short8 __ovld __cnfn islessequal(half8, half8);
+short16 __ovld __cnfn islessequal(half16, half16);
#endif //cl_khr_fp16
/**
* Returns the component-wise compare of
* (x < y) || (x > y) .
*/
-int __ovld __cnfn islessgreater(float x, float y);
-int2 __ovld __cnfn islessgreater(float2 x, float2 y);
-int3 __ovld __cnfn islessgreater(float3 x, float3 y);
-int4 __ovld __cnfn islessgreater(float4 x, float4 y);
-int8 __ovld __cnfn islessgreater(float8 x, float8 y);
-int16 __ovld __cnfn islessgreater(float16 x, float16 y);
+int __ovld __cnfn islessgreater(float, float);
+int2 __ovld __cnfn islessgreater(float2, float2);
+int3 __ovld __cnfn islessgreater(float3, float3);
+int4 __ovld __cnfn islessgreater(float4, float4);
+int8 __ovld __cnfn islessgreater(float8, float8);
+int16 __ovld __cnfn islessgreater(float16, float16);
#ifdef cl_khr_fp64
-int __ovld __cnfn islessgreater(double x, double y);
-long2 __ovld __cnfn islessgreater(double2 x, double2 y);
-long3 __ovld __cnfn islessgreater(double3 x, double3 y);
-long4 __ovld __cnfn islessgreater(double4 x, double4 y);
-long8 __ovld __cnfn islessgreater(double8 x, double8 y);
-long16 __ovld __cnfn islessgreater(double16 x, double16 y);
+int __ovld __cnfn islessgreater(double, double);
+long2 __ovld __cnfn islessgreater(double2, double2);
+long3 __ovld __cnfn islessgreater(double3, double3);
+long4 __ovld __cnfn islessgreater(double4, double4);
+long8 __ovld __cnfn islessgreater(double8, double8);
+long16 __ovld __cnfn islessgreater(double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-int __ovld __cnfn islessgreater(half x, half y);
-short2 __ovld __cnfn islessgreater(half2 x, half2 y);
-short3 __ovld __cnfn islessgreater(half3 x, half3 y);
-short4 __ovld __cnfn islessgreater(half4 x, half4 y);
-short8 __ovld __cnfn islessgreater(half8 x, half8 y);
-short16 __ovld __cnfn islessgreater(half16 x, half16 y);
+int __ovld __cnfn islessgreater(half, half);
+short2 __ovld __cnfn islessgreater(half2, half2);
+short3 __ovld __cnfn islessgreater(half3, half3);
+short4 __ovld __cnfn islessgreater(half4, half4);
+short8 __ovld __cnfn islessgreater(half8, half8);
+short16 __ovld __cnfn islessgreater(half16, half16);
#endif //cl_khr_fp16
/**
@@ -10816,27 +10816,27 @@ short16 __ovld __cnfn isnormal(half16);
* arguments x and y, and returns the result
* isequal(x, x) && isequal(y, y).
*/
-int __ovld __cnfn isordered(float x, float y);
-int2 __ovld __cnfn isordered(float2 x, float2 y);
-int3 __ovld __cnfn isordered(float3 x, float3 y);
-int4 __ovld __cnfn isordered(float4 x, float4 y);
-int8 __ovld __cnfn isordered(float8 x, float8 y);
-int16 __ovld __cnfn isordered(float16 x, float16 y);
+int __ovld __cnfn isordered(float, float);
+int2 __ovld __cnfn isordered(float2, float2);
+int3 __ovld __cnfn isordered(float3, float3);
+int4 __ovld __cnfn isordered(float4, float4);
+int8 __ovld __cnfn isordered(float8, float8);
+int16 __ovld __cnfn isordered(float16, float16);
#ifdef cl_khr_fp64
-int __ovld __cnfn isordered(double x, double y);
-long2 __ovld __cnfn isordered(double2 x, double2 y);
-long3 __ovld __cnfn isordered(double3 x, double3 y);
-long4 __ovld __cnfn isordered(double4 x, double4 y);
-long8 __ovld __cnfn isordered(double8 x, double8 y);
-long16 __ovld __cnfn isordered(double16 x, double16 y);
+int __ovld __cnfn isordered(double, double);
+long2 __ovld __cnfn isordered(double2, double2);
+long3 __ovld __cnfn isordered(double3, double3);
+long4 __ovld __cnfn isordered(double4, double4);
+long8 __ovld __cnfn isordered(double8, double8);
+long16 __ovld __cnfn isordered(double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-int __ovld __cnfn isordered(half x, half y);
-short2 __ovld __cnfn isordered(half2 x, half2 y);
-short3 __ovld __cnfn isordered(half3 x, half3 y);
-short4 __ovld __cnfn isordered(half4 x, half4 y);
-short8 __ovld __cnfn isordered(half8 x, half8 y);
-short16 __ovld __cnfn isordered(half16 x, half16 y);
+int __ovld __cnfn isordered(half, half);
+short2 __ovld __cnfn isordered(half2, half2);
+short3 __ovld __cnfn isordered(half3, half3);
+short4 __ovld __cnfn isordered(half4, half4);
+short8 __ovld __cnfn isordered(half8, half8);
+short16 __ovld __cnfn isordered(half16, half16);
#endif //cl_khr_fp16
/**
@@ -10844,27 +10844,27 @@ short16 __ovld __cnfn isordered(half16 x, half16 y);
* takes arguments x and y, returning non-zero if x or y
* is NaN, and zero otherwise.
*/
-int __ovld __cnfn isunordered(float x, float y);
-int2 __ovld __cnfn isunordered(float2 x, float2 y);
-int3 __ovld __cnfn isunordered(float3 x, float3 y);
-int4 __ovld __cnfn isunordered(float4 x, float4 y);
-int8 __ovld __cnfn isunordered(float8 x, float8 y);
-int16 __ovld __cnfn isunordered(float16 x, float16 y);
+int __ovld __cnfn isunordered(float, float);
+int2 __ovld __cnfn isunordered(float2, float2);
+int3 __ovld __cnfn isunordered(float3, float3);
+int4 __ovld __cnfn isunordered(float4, float4);
+int8 __ovld __cnfn isunordered(float8, float8);
+int16 __ovld __cnfn isunordered(float16, float16);
#ifdef cl_khr_fp64
-int __ovld __cnfn isunordered(double x, double y);
-long2 __ovld __cnfn isunordered(double2 x, double2 y);
-long3 __ovld __cnfn isunordered(double3 x, double3 y);
-long4 __ovld __cnfn isunordered(double4 x, double4 y);
-long8 __ovld __cnfn isunordered(double8 x, double8 y);
-long16 __ovld __cnfn isunordered(double16 x, double16 y);
+int __ovld __cnfn isunordered(double, double);
+long2 __ovld __cnfn isunordered(double2, double2);
+long3 __ovld __cnfn isunordered(double3, double3);
+long4 __ovld __cnfn isunordered(double4, double4);
+long8 __ovld __cnfn isunordered(double8, double8);
+long16 __ovld __cnfn isunordered(double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-int __ovld __cnfn isunordered(half x, half y);
-short2 __ovld __cnfn isunordered(half2 x, half2 y);
-short3 __ovld __cnfn isunordered(half3 x, half3 y);
-short4 __ovld __cnfn isunordered(half4 x, half4 y);
-short8 __ovld __cnfn isunordered(half8 x, half8 y);
-short16 __ovld __cnfn isunordered(half16 x, half16 y);
+int __ovld __cnfn isunordered(half, half);
+short2 __ovld __cnfn isunordered(half2, half2);
+short3 __ovld __cnfn isunordered(half3, half3);
+short4 __ovld __cnfn isunordered(half4, half4);
+short8 __ovld __cnfn isunordered(half8, half8);
+short16 __ovld __cnfn isunordered(half16, half16);
#endif //cl_khr_fp16
/**
@@ -10901,134 +10901,134 @@ short16 __ovld __cnfn signbit(half16);
* Returns 1 if the most significant bit in any component
* of x is set; otherwise returns 0.
*/
-int __ovld __cnfn any(char x);
-int __ovld __cnfn any(char2 x);
-int __ovld __cnfn any(char3 x);
-int __ovld __cnfn any(char4 x);
-int __ovld __cnfn any(char8 x);
-int __ovld __cnfn any(char16 x);
-int __ovld __cnfn any(short x);
-int __ovld __cnfn any(short2 x);
-int __ovld __cnfn any(short3 x);
-int __ovld __cnfn any(short4 x);
-int __ovld __cnfn any(short8 x);
-int __ovld __cnfn any(short16 x);
-int __ovld __cnfn any(int x);
-int __ovld __cnfn any(int2 x);
-int __ovld __cnfn any(int3 x);
-int __ovld __cnfn any(int4 x);
-int __ovld __cnfn any(int8 x);
-int __ovld __cnfn any(int16 x);
-int __ovld __cnfn any(long x);
-int __ovld __cnfn any(long2 x);
-int __ovld __cnfn any(long3 x);
-int __ovld __cnfn any(long4 x);
-int __ovld __cnfn any(long8 x);
-int __ovld __cnfn any(long16 x);
+int __ovld __cnfn any(char);
+int __ovld __cnfn any(char2);
+int __ovld __cnfn any(char3);
+int __ovld __cnfn any(char4);
+int __ovld __cnfn any(char8);
+int __ovld __cnfn any(char16);
+int __ovld __cnfn any(short);
+int __ovld __cnfn any(short2);
+int __ovld __cnfn any(short3);
+int __ovld __cnfn any(short4);
+int __ovld __cnfn any(short8);
+int __ovld __cnfn any(short16);
+int __ovld __cnfn any(int);
+int __ovld __cnfn any(int2);
+int __ovld __cnfn any(int3);
+int __ovld __cnfn any(int4);
+int __ovld __cnfn any(int8);
+int __ovld __cnfn any(int16);
+int __ovld __cnfn any(long);
+int __ovld __cnfn any(long2);
+int __ovld __cnfn any(long3);
+int __ovld __cnfn any(long4);
+int __ovld __cnfn any(long8);
+int __ovld __cnfn any(long16);
/**
* Returns 1 if the most significant bit in all components
* of x is set; otherwise returns 0.
*/
-int __ovld __cnfn all(char x);
-int __ovld __cnfn all(char2 x);
-int __ovld __cnfn all(char3 x);
-int __ovld __cnfn all(char4 x);
-int __ovld __cnfn all(char8 x);
-int __ovld __cnfn all(char16 x);
-int __ovld __cnfn all(short x);
-int __ovld __cnfn all(short2 x);
-int __ovld __cnfn all(short3 x);
-int __ovld __cnfn all(short4 x);
-int __ovld __cnfn all(short8 x);
-int __ovld __cnfn all(short16 x);
-int __ovld __cnfn all(int x);
-int __ovld __cnfn all(int2 x);
-int __ovld __cnfn all(int3 x);
-int __ovld __cnfn all(int4 x);
-int __ovld __cnfn all(int8 x);
-int __ovld __cnfn all(int16 x);
-int __ovld __cnfn all(long x);
-int __ovld __cnfn all(long2 x);
-int __ovld __cnfn all(long3 x);
-int __ovld __cnfn all(long4 x);
-int __ovld __cnfn all(long8 x);
-int __ovld __cnfn all(long16 x);
+int __ovld __cnfn all(char);
+int __ovld __cnfn all(char2);
+int __ovld __cnfn all(char3);
+int __ovld __cnfn all(char4);
+int __ovld __cnfn all(char8);
+int __ovld __cnfn all(char16);
+int __ovld __cnfn all(short);
+int __ovld __cnfn all(short2);
+int __ovld __cnfn all(short3);
+int __ovld __cnfn all(short4);
+int __ovld __cnfn all(short8);
+int __ovld __cnfn all(short16);
+int __ovld __cnfn all(int);
+int __ovld __cnfn all(int2);
+int __ovld __cnfn all(int3);
+int __ovld __cnfn all(int4);
+int __ovld __cnfn all(int8);
+int __ovld __cnfn all(int16);
+int __ovld __cnfn all(long);
+int __ovld __cnfn all(long2);
+int __ovld __cnfn all(long3);
+int __ovld __cnfn all(long4);
+int __ovld __cnfn all(long8);
+int __ovld __cnfn all(long16);
/**
* Each bit of the result is the corresponding bit of a if
* the corresponding bit of c is 0. Otherwise it is the
* corresponding bit of b.
*/
-char __ovld __cnfn bitselect(char a, char b, char c);
-uchar __ovld __cnfn bitselect(uchar a, uchar b, uchar c);
-char2 __ovld __cnfn bitselect(char2 a, char2 b, char2 c);
-uchar2 __ovld __cnfn bitselect(uchar2 a, uchar2 b, uchar2 c);
-char3 __ovld __cnfn bitselect(char3 a, char3 b, char3 c);
-uchar3 __ovld __cnfn bitselect(uchar3 a, uchar3 b, uchar3 c);
-char4 __ovld __cnfn bitselect(char4 a, char4 b, char4 c);
-uchar4 __ovld __cnfn bitselect(uchar4 a, uchar4 b, uchar4 c);
-char8 __ovld __cnfn bitselect(char8 a, char8 b, char8 c);
-uchar8 __ovld __cnfn bitselect(uchar8 a, uchar8 b, uchar8 c);
-char16 __ovld __cnfn bitselect(char16 a, char16 b, char16 c);
-uchar16 __ovld __cnfn bitselect(uchar16 a, uchar16 b, uchar16 c);
-short __ovld __cnfn bitselect(short a, short b, short c);
-ushort __ovld __cnfn bitselect(ushort a, ushort b, ushort c);
-short2 __ovld __cnfn bitselect(short2 a, short2 b, short2 c);
-ushort2 __ovld __cnfn bitselect(ushort2 a, ushort2 b, ushort2 c);
-short3 __ovld __cnfn bitselect(short3 a, short3 b, short3 c);
-ushort3 __ovld __cnfn bitselect(ushort3 a, ushort3 b, ushort3 c);
-short4 __ovld __cnfn bitselect(short4 a, short4 b, short4 c);
-ushort4 __ovld __cnfn bitselect(ushort4 a, ushort4 b, ushort4 c);
-short8 __ovld __cnfn bitselect(short8 a, short8 b, short8 c);
-ushort8 __ovld __cnfn bitselect(ushort8 a, ushort8 b, ushort8 c);
-short16 __ovld __cnfn bitselect(short16 a, short16 b, short16 c);
-ushort16 __ovld __cnfn bitselect(ushort16 a, ushort16 b, ushort16 c);
-int __ovld __cnfn bitselect(int a, int b, int c);
-uint __ovld __cnfn bitselect(uint a, uint b, uint c);
-int2 __ovld __cnfn bitselect(int2 a, int2 b, int2 c);
-uint2 __ovld __cnfn bitselect(uint2 a, uint2 b, uint2 c);
-int3 __ovld __cnfn bitselect(int3 a, int3 b, int3 c);
-uint3 __ovld __cnfn bitselect(uint3 a, uint3 b, uint3 c);
-int4 __ovld __cnfn bitselect(int4 a, int4 b, int4 c);
-uint4 __ovld __cnfn bitselect(uint4 a, uint4 b, uint4 c);
-int8 __ovld __cnfn bitselect(int8 a, int8 b, int8 c);
-uint8 __ovld __cnfn bitselect(uint8 a, uint8 b, uint8 c);
-int16 __ovld __cnfn bitselect(int16 a, int16 b, int16 c);
-uint16 __ovld __cnfn bitselect(uint16 a, uint16 b, uint16 c);
-long __ovld __cnfn bitselect(long a, long b, long c);
-ulong __ovld __cnfn bitselect(ulong a, ulong b, ulong c);
-long2 __ovld __cnfn bitselect(long2 a, long2 b, long2 c);
-ulong2 __ovld __cnfn bitselect(ulong2 a, ulong2 b, ulong2 c);
-long3 __ovld __cnfn bitselect(long3 a, long3 b, long3 c);
-ulong3 __ovld __cnfn bitselect(ulong3 a, ulong3 b, ulong3 c);
-long4 __ovld __cnfn bitselect(long4 a, long4 b, long4 c);
-ulong4 __ovld __cnfn bitselect(ulong4 a, ulong4 b, ulong4 c);
-long8 __ovld __cnfn bitselect(long8 a, long8 b, long8 c);
-ulong8 __ovld __cnfn bitselect(ulong8 a, ulong8 b, ulong8 c);
-long16 __ovld __cnfn bitselect(long16 a, long16 b, long16 c);
-ulong16 __ovld __cnfn bitselect(ulong16 a, ulong16 b, ulong16 c);
-float __ovld __cnfn bitselect(float a, float b, float c);
-float2 __ovld __cnfn bitselect(float2 a, float2 b, float2 c);
-float3 __ovld __cnfn bitselect(float3 a, float3 b, float3 c);
-float4 __ovld __cnfn bitselect(float4 a, float4 b, float4 c);
-float8 __ovld __cnfn bitselect(float8 a, float8 b, float8 c);
-float16 __ovld __cnfn bitselect(float16 a, float16 b, float16 c);
+char __ovld __cnfn bitselect(char, char, char);
+uchar __ovld __cnfn bitselect(uchar, uchar, uchar);
+char2 __ovld __cnfn bitselect(char2, char2, char2);
+uchar2 __ovld __cnfn bitselect(uchar2, uchar2, uchar2);
+char3 __ovld __cnfn bitselect(char3, char3, char3);
+uchar3 __ovld __cnfn bitselect(uchar3, uchar3, uchar3);
+char4 __ovld __cnfn bitselect(char4, char4, char4);
+uchar4 __ovld __cnfn bitselect(uchar4, uchar4, uchar4);
+char8 __ovld __cnfn bitselect(char8, char8, char8);
+uchar8 __ovld __cnfn bitselect(uchar8, uchar8, uchar8);
+char16 __ovld __cnfn bitselect(char16, char16, char16);
+uchar16 __ovld __cnfn bitselect(uchar16, uchar16, uchar16);
+short __ovld __cnfn bitselect(short, short, short);
+ushort __ovld __cnfn bitselect(ushort, ushort, ushort);
+short2 __ovld __cnfn bitselect(short2, short2, short2);
+ushort2 __ovld __cnfn bitselect(ushort2, ushort2, ushort2);
+short3 __ovld __cnfn bitselect(short3, short3, short3);
+ushort3 __ovld __cnfn bitselect(ushort3, ushort3, ushort3);
+short4 __ovld __cnfn bitselect(short4, short4, short4);
+ushort4 __ovld __cnfn bitselect(ushort4, ushort4, ushort4);
+short8 __ovld __cnfn bitselect(short8, short8, short8);
+ushort8 __ovld __cnfn bitselect(ushort8, ushort8, ushort8);
+short16 __ovld __cnfn bitselect(short16, short16, short16);
+ushort16 __ovld __cnfn bitselect(ushort16, ushort16, ushort16);
+int __ovld __cnfn bitselect(int, int, int);
+uint __ovld __cnfn bitselect(uint, uint, uint);
+int2 __ovld __cnfn bitselect(int2, int2, int2);
+uint2 __ovld __cnfn bitselect(uint2, uint2, uint2);
+int3 __ovld __cnfn bitselect(int3, int3, int3);
+uint3 __ovld __cnfn bitselect(uint3, uint3, uint3);
+int4 __ovld __cnfn bitselect(int4, int4, int4);
+uint4 __ovld __cnfn bitselect(uint4, uint4, uint4);
+int8 __ovld __cnfn bitselect(int8, int8, int8);
+uint8 __ovld __cnfn bitselect(uint8, uint8, uint8);
+int16 __ovld __cnfn bitselect(int16, int16, int16);
+uint16 __ovld __cnfn bitselect(uint16, uint16, uint16);
+long __ovld __cnfn bitselect(long, long, long);
+ulong __ovld __cnfn bitselect(ulong, ulong, ulong);
+long2 __ovld __cnfn bitselect(long2, long2, long2);
+ulong2 __ovld __cnfn bitselect(ulong2, ulong2, ulong2);
+long3 __ovld __cnfn bitselect(long3, long3, long3);
+ulong3 __ovld __cnfn bitselect(ulong3, ulong3, ulong3);
+long4 __ovld __cnfn bitselect(long4, long4, long4);
+ulong4 __ovld __cnfn bitselect(ulong4, ulong4, ulong4);
+long8 __ovld __cnfn bitselect(long8, long8, long8);
+ulong8 __ovld __cnfn bitselect(ulong8, ulong8, ulong8);
+long16 __ovld __cnfn bitselect(long16, long16, long16);
+ulong16 __ovld __cnfn bitselect(ulong16, ulong16, ulong16);
+float __ovld __cnfn bitselect(float, float, float);
+float2 __ovld __cnfn bitselect(float2, float2, float2);
+float3 __ovld __cnfn bitselect(float3, float3, float3);
+float4 __ovld __cnfn bitselect(float4, float4, float4);
+float8 __ovld __cnfn bitselect(float8, float8, float8);
+float16 __ovld __cnfn bitselect(float16, float16, float16);
#ifdef cl_khr_fp64
-double __ovld __cnfn bitselect(double a, double b, double c);
-double2 __ovld __cnfn bitselect(double2 a, double2 b, double2 c);
-double3 __ovld __cnfn bitselect(double3 a, double3 b, double3 c);
-double4 __ovld __cnfn bitselect(double4 a, double4 b, double4 c);
-double8 __ovld __cnfn bitselect(double8 a, double8 b, double8 c);
-double16 __ovld __cnfn bitselect(double16 a, double16 b, double16 c);
+double __ovld __cnfn bitselect(double, double, double);
+double2 __ovld __cnfn bitselect(double2, double2, double2);
+double3 __ovld __cnfn bitselect(double3, double3, double3);
+double4 __ovld __cnfn bitselect(double4, double4, double4);
+double8 __ovld __cnfn bitselect(double8, double8, double8);
+double16 __ovld __cnfn bitselect(double16, double16, double16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn bitselect(half a, half b, half c);
-half2 __ovld __cnfn bitselect(half2 a, half2 b, half2 c);
-half3 __ovld __cnfn bitselect(half3 a, half3 b, half3 c);
-half4 __ovld __cnfn bitselect(half4 a, half4 b, half4 c);
-half8 __ovld __cnfn bitselect(half8 a, half8 b, half8 c);
-half16 __ovld __cnfn bitselect(half16 a, half16 b, half16 c);
+half __ovld __cnfn bitselect(half, half, half);
+half2 __ovld __cnfn bitselect(half2, half2, half2);
+half3 __ovld __cnfn bitselect(half3, half3, half3);
+half4 __ovld __cnfn bitselect(half4, half4, half4);
+half8 __ovld __cnfn bitselect(half8, half8, half8);
+half16 __ovld __cnfn bitselect(half16, half16, half16);
#endif //cl_khr_fp16
/**
@@ -11038,149 +11038,149 @@ half16 __ovld __cnfn bitselect(half16 a, half16 b, half16 c);
* b and a must have the same type.
* c must have the same number of elements and bits as a.
*/
-char __ovld __cnfn select(char a, char b, char c);
-uchar __ovld __cnfn select(uchar a, uchar b, char c);
-char2 __ovld __cnfn select(char2 a, char2 b, char2 c);
-uchar2 __ovld __cnfn select(uchar2 a, uchar2 b, char2 c);
-char3 __ovld __cnfn select(char3 a, char3 b, char3 c);
-uchar3 __ovld __cnfn select(uchar3 a, uchar3 b, char3 c);
-char4 __ovld __cnfn select(char4 a, char4 b, char4 c);
-uchar4 __ovld __cnfn select(uchar4 a, uchar4 b, char4 c);
-char8 __ovld __cnfn select(char8 a, char8 b, char8 c);
-uchar8 __ovld __cnfn select(uchar8 a, uchar8 b, char8 c);
-char16 __ovld __cnfn select(char16 a, char16 b, char16 c);
-uchar16 __ovld __cnfn select(uchar16 a, uchar16 b, char16 c);
-
-short __ovld __cnfn select(short a, short b, short c);
-ushort __ovld __cnfn select(ushort a, ushort b, short c);
-short2 __ovld __cnfn select(short2 a, short2 b, short2 c);
-ushort2 __ovld __cnfn select(ushort2 a, ushort2 b, short2 c);
-short3 __ovld __cnfn select(short3 a, short3 b, short3 c);
-ushort3 __ovld __cnfn select(ushort3 a, ushort3 b, short3 c);
-short4 __ovld __cnfn select(short4 a, short4 b, short4 c);
-ushort4 __ovld __cnfn select(ushort4 a, ushort4 b, short4 c);
-short8 __ovld __cnfn select(short8 a, short8 b, short8 c);
-ushort8 __ovld __cnfn select(ushort8 a, ushort8 b, short8 c);
-short16 __ovld __cnfn select(short16 a, short16 b, short16 c);
-ushort16 __ovld __cnfn select(ushort16 a, ushort16 b, short16 c);
-
-int __ovld __cnfn select(int a, int b, int c);
-uint __ovld __cnfn select(uint a, uint b, int c);
-int2 __ovld __cnfn select(int2 a, int2 b, int2 c);
-uint2 __ovld __cnfn select(uint2 a, uint2 b, int2 c);
-int3 __ovld __cnfn select(int3 a, int3 b, int3 c);
-uint3 __ovld __cnfn select(uint3 a, uint3 b, int3 c);
-int4 __ovld __cnfn select(int4 a, int4 b, int4 c);
-uint4 __ovld __cnfn select(uint4 a, uint4 b, int4 c);
-int8 __ovld __cnfn select(int8 a, int8 b, int8 c);
-uint8 __ovld __cnfn select(uint8 a, uint8 b, int8 c);
-int16 __ovld __cnfn select(int16 a, int16 b, int16 c);
-uint16 __ovld __cnfn select(uint16 a, uint16 b, int16 c);
-float __ovld __cnfn select(float a, float b, int c);
-float2 __ovld __cnfn select(float2 a, float2 b, int2 c);
-float3 __ovld __cnfn select(float3 a, float3 b, int3 c);
-float4 __ovld __cnfn select(float4 a, float4 b, int4 c);
-float8 __ovld __cnfn select(float8 a, float8 b, int8 c);
-float16 __ovld __cnfn select(float16 a, float16 b, int16 c);
-
-long __ovld __cnfn select(long a, long b, long c);
-ulong __ovld __cnfn select(ulong a, ulong b, long c);
-long2 __ovld __cnfn select(long2 a, long2 b, long2 c);
-ulong2 __ovld __cnfn select(ulong2 a, ulong2 b, long2 c);
-long3 __ovld __cnfn select(long3 a, long3 b, long3 c);
-ulong3 __ovld __cnfn select(ulong3 a, ulong3 b, long3 c);
-long4 __ovld __cnfn select(long4 a, long4 b, long4 c);
-ulong4 __ovld __cnfn select(ulong4 a, ulong4 b, long4 c);
-long8 __ovld __cnfn select(long8 a, long8 b, long8 c);
-ulong8 __ovld __cnfn select(ulong8 a, ulong8 b, long8 c);
-long16 __ovld __cnfn select(long16 a, long16 b, long16 c);
-ulong16 __ovld __cnfn select(ulong16 a, ulong16 b, long16 c);
-
-char __ovld __cnfn select(char a, char b, uchar c);
-uchar __ovld __cnfn select(uchar a, uchar b, uchar c);
-char2 __ovld __cnfn select(char2 a, char2 b, uchar2 c);
-uchar2 __ovld __cnfn select(uchar2 a, uchar2 b, uchar2 c);
-char3 __ovld __cnfn select(char3 a, char3 b, uchar3 c);
-uchar3 __ovld __cnfn select(uchar3 a, uchar3 b, uchar3 c);
-char4 __ovld __cnfn select(char4 a, char4 b, uchar4 c);
-uchar4 __ovld __cnfn select(uchar4 a, uchar4 b, uchar4 c);
-char8 __ovld __cnfn select(char8 a, char8 b, uchar8 c);
-uchar8 __ovld __cnfn select(uchar8 a, uchar8 b, uchar8 c);
-char16 __ovld __cnfn select(char16 a, char16 b, uchar16 c);
-uchar16 __ovld __cnfn select(uchar16 a, uchar16 b, uchar16 c);
-
-short __ovld __cnfn select(short a, short b, ushort c);
-ushort __ovld __cnfn select(ushort a, ushort b, ushort c);
-short2 __ovld __cnfn select(short2 a, short2 b, ushort2 c);
-ushort2 __ovld __cnfn select(ushort2 a, ushort2 b, ushort2 c);
-short3 __ovld __cnfn select(short3 a, short3 b, ushort3 c);
-ushort3 __ovld __cnfn select(ushort3 a, ushort3 b, ushort3 c);
-short4 __ovld __cnfn select(short4 a, short4 b, ushort4 c);
-ushort4 __ovld __cnfn select(ushort4 a, ushort4 b, ushort4 c);
-short8 __ovld __cnfn select(short8 a, short8 b, ushort8 c);
-ushort8 __ovld __cnfn select(ushort8 a, ushort8 b, ushort8 c);
-short16 __ovld __cnfn select(short16 a, short16 b, ushort16 c);
-ushort16 __ovld __cnfn select(ushort16 a, ushort16 b, ushort16 c);
-
-int __ovld __cnfn select(int a, int b, uint c);
-uint __ovld __cnfn select(uint a, uint b, uint c);
-int2 __ovld __cnfn select(int2 a, int2 b, uint2 c);
-uint2 __ovld __cnfn select(uint2 a, uint2 b, uint2 c);
-int3 __ovld __cnfn select(int3 a, int3 b, uint3 c);
-uint3 __ovld __cnfn select(uint3 a, uint3 b, uint3 c);
-int4 __ovld __cnfn select(int4 a, int4 b, uint4 c);
-uint4 __ovld __cnfn select(uint4 a, uint4 b, uint4 c);
-int8 __ovld __cnfn select(int8 a, int8 b, uint8 c);
-uint8 __ovld __cnfn select(uint8 a, uint8 b, uint8 c);
-int16 __ovld __cnfn select(int16 a, int16 b, uint16 c);
-uint16 __ovld __cnfn select(uint16 a, uint16 b, uint16 c);
-float __ovld __cnfn select(float a, float b, uint c);
-float2 __ovld __cnfn select(float2 a, float2 b, uint2 c);
-float3 __ovld __cnfn select(float3 a, float3 b, uint3 c);
-float4 __ovld __cnfn select(float4 a, float4 b, uint4 c);
-float8 __ovld __cnfn select(float8 a, float8 b, uint8 c);
-float16 __ovld __cnfn select(float16 a, float16 b, uint16 c);
-
-long __ovld __cnfn select(long a, long b, ulong c);
-ulong __ovld __cnfn select(ulong a, ulong b, ulong c);
-long2 __ovld __cnfn select(long2 a, long2 b, ulong2 c);
-ulong2 __ovld __cnfn select(ulong2 a, ulong2 b, ulong2 c);
-long3 __ovld __cnfn select(long3 a, long3 b, ulong3 c);
-ulong3 __ovld __cnfn select(ulong3 a, ulong3 b, ulong3 c);
-long4 __ovld __cnfn select(long4 a, long4 b, ulong4 c);
-ulong4 __ovld __cnfn select(ulong4 a, ulong4 b, ulong4 c);
-long8 __ovld __cnfn select(long8 a, long8 b, ulong8 c);
-ulong8 __ovld __cnfn select(ulong8 a, ulong8 b, ulong8 c);
-long16 __ovld __cnfn select(long16 a, long16 b, ulong16 c);
-ulong16 __ovld __cnfn select(ulong16 a, ulong16 b, ulong16 c);
+char __ovld __cnfn select(char, char, char);
+uchar __ovld __cnfn select(uchar, uchar, char);
+char2 __ovld __cnfn select(char2, char2, char2);
+uchar2 __ovld __cnfn select(uchar2, uchar2, char2);
+char3 __ovld __cnfn select(char3, char3, char3);
+uchar3 __ovld __cnfn select(uchar3, uchar3, char3);
+char4 __ovld __cnfn select(char4, char4, char4);
+uchar4 __ovld __cnfn select(uchar4, uchar4, char4);
+char8 __ovld __cnfn select(char8, char8, char8);
+uchar8 __ovld __cnfn select(uchar8, uchar8, char8);
+char16 __ovld __cnfn select(char16, char16, char16);
+uchar16 __ovld __cnfn select(uchar16, uchar16, char16);
+
+short __ovld __cnfn select(short, short, short);
+ushort __ovld __cnfn select(ushort, ushort, short);
+short2 __ovld __cnfn select(short2, short2, short2);
+ushort2 __ovld __cnfn select(ushort2, ushort2, short2);
+short3 __ovld __cnfn select(short3, short3, short3);
+ushort3 __ovld __cnfn select(ushort3, ushort3, short3);
+short4 __ovld __cnfn select(short4, short4, short4);
+ushort4 __ovld __cnfn select(ushort4, ushort4, short4);
+short8 __ovld __cnfn select(short8, short8, short8);
+ushort8 __ovld __cnfn select(ushort8, ushort8, short8);
+short16 __ovld __cnfn select(short16, short16, short16);
+ushort16 __ovld __cnfn select(ushort16, ushort16, short16);
+
+int __ovld __cnfn select(int, int, int);
+uint __ovld __cnfn select(uint, uint, int);
+int2 __ovld __cnfn select(int2, int2, int2);
+uint2 __ovld __cnfn select(uint2, uint2, int2);
+int3 __ovld __cnfn select(int3, int3, int3);
+uint3 __ovld __cnfn select(uint3, uint3, int3);
+int4 __ovld __cnfn select(int4, int4, int4);
+uint4 __ovld __cnfn select(uint4, uint4, int4);
+int8 __ovld __cnfn select(int8, int8, int8);
+uint8 __ovld __cnfn select(uint8, uint8, int8);
+int16 __ovld __cnfn select(int16, int16, int16);
+uint16 __ovld __cnfn select(uint16, uint16, int16);
+float __ovld __cnfn select(float, float, int);
+float2 __ovld __cnfn select(float2, float2, int2);
+float3 __ovld __cnfn select(float3, float3, int3);
+float4 __ovld __cnfn select(float4, float4, int4);
+float8 __ovld __cnfn select(float8, float8, int8);
+float16 __ovld __cnfn select(float16, float16, int16);
+
+long __ovld __cnfn select(long, long, long);
+ulong __ovld __cnfn select(ulong, ulong, long);
+long2 __ovld __cnfn select(long2, long2, long2);
+ulong2 __ovld __cnfn select(ulong2, ulong2, long2);
+long3 __ovld __cnfn select(long3, long3, long3);
+ulong3 __ovld __cnfn select(ulong3, ulong3, long3);
+long4 __ovld __cnfn select(long4, long4, long4);
+ulong4 __ovld __cnfn select(ulong4, ulong4, long4);
+long8 __ovld __cnfn select(long8, long8, long8);
+ulong8 __ovld __cnfn select(ulong8, ulong8, long8);
+long16 __ovld __cnfn select(long16, long16, long16);
+ulong16 __ovld __cnfn select(ulong16, ulong16, long16);
+
+char __ovld __cnfn select(char, char, uchar);
+uchar __ovld __cnfn select(uchar, uchar, uchar);
+char2 __ovld __cnfn select(char2, char2, uchar2);
+uchar2 __ovld __cnfn select(uchar2, uchar2, uchar2);
+char3 __ovld __cnfn select(char3, char3, uchar3);
+uchar3 __ovld __cnfn select(uchar3, uchar3, uchar3);
+char4 __ovld __cnfn select(char4, char4, uchar4);
+uchar4 __ovld __cnfn select(uchar4, uchar4, uchar4);
+char8 __ovld __cnfn select(char8, char8, uchar8);
+uchar8 __ovld __cnfn select(uchar8, uchar8, uchar8);
+char16 __ovld __cnfn select(char16, char16, uchar16);
+uchar16 __ovld __cnfn select(uchar16, uchar16, uchar16);
+
+short __ovld __cnfn select(short, short, ushort);
+ushort __ovld __cnfn select(ushort, ushort, ushort);
+short2 __ovld __cnfn select(short2, short2, ushort2);
+ushort2 __ovld __cnfn select(ushort2, ushort2, ushort2);
+short3 __ovld __cnfn select(short3, short3, ushort3);
+ushort3 __ovld __cnfn select(ushort3, ushort3, ushort3);
+short4 __ovld __cnfn select(short4, short4, ushort4);
+ushort4 __ovld __cnfn select(ushort4, ushort4, ushort4);
+short8 __ovld __cnfn select(short8, short8, ushort8);
+ushort8 __ovld __cnfn select(ushort8, ushort8, ushort8);
+short16 __ovld __cnfn select(short16, short16, ushort16);
+ushort16 __ovld __cnfn select(ushort16, ushort16, ushort16);
+
+int __ovld __cnfn select(int, int, uint);
+uint __ovld __cnfn select(uint, uint, uint);
+int2 __ovld __cnfn select(int2, int2, uint2);
+uint2 __ovld __cnfn select(uint2, uint2, uint2);
+int3 __ovld __cnfn select(int3, int3, uint3);
+uint3 __ovld __cnfn select(uint3, uint3, uint3);
+int4 __ovld __cnfn select(int4, int4, uint4);
+uint4 __ovld __cnfn select(uint4, uint4, uint4);
+int8 __ovld __cnfn select(int8, int8, uint8);
+uint8 __ovld __cnfn select(uint8, uint8, uint8);
+int16 __ovld __cnfn select(int16, int16, uint16);
+uint16 __ovld __cnfn select(uint16, uint16, uint16);
+float __ovld __cnfn select(float, float, uint);
+float2 __ovld __cnfn select(float2, float2, uint2);
+float3 __ovld __cnfn select(float3, float3, uint3);
+float4 __ovld __cnfn select(float4, float4, uint4);
+float8 __ovld __cnfn select(float8, float8, uint8);
+float16 __ovld __cnfn select(float16, float16, uint16);
+
+long __ovld __cnfn select(long, long, ulong);
+ulong __ovld __cnfn select(ulong, ulong, ulong);
+long2 __ovld __cnfn select(long2, long2, ulong2);
+ulong2 __ovld __cnfn select(ulong2, ulong2, ulong2);
+long3 __ovld __cnfn select(long3, long3, ulong3);
+ulong3 __ovld __cnfn select(ulong3, ulong3, ulong3);
+long4 __ovld __cnfn select(long4, long4, ulong4);
+ulong4 __ovld __cnfn select(ulong4, ulong4, ulong4);
+long8 __ovld __cnfn select(long8, long8, ulong8);
+ulong8 __ovld __cnfn select(ulong8, ulong8, ulong8);
+long16 __ovld __cnfn select(long16, long16, ulong16);
+ulong16 __ovld __cnfn select(ulong16, ulong16, ulong16);
#ifdef cl_khr_fp64
-double __ovld __cnfn select(double a, double b, long c);
-double2 __ovld __cnfn select(double2 a, double2 b, long2 c);
-double3 __ovld __cnfn select(double3 a, double3 b, long3 c);
-double4 __ovld __cnfn select(double4 a, double4 b, long4 c);
-double8 __ovld __cnfn select(double8 a, double8 b, long8 c);
-double16 __ovld __cnfn select(double16 a, double16 b, long16 c);
-double __ovld __cnfn select(double a, double b, ulong c);
-double2 __ovld __cnfn select(double2 a, double2 b, ulong2 c);
-double3 __ovld __cnfn select(double3 a, double3 b, ulong3 c);
-double4 __ovld __cnfn select(double4 a, double4 b, ulong4 c);
-double8 __ovld __cnfn select(double8 a, double8 b, ulong8 c);
-double16 __ovld __cnfn select(double16 a, double16 b, ulong16 c);
+double __ovld __cnfn select(double, double, long);
+double2 __ovld __cnfn select(double2, double2, long2);
+double3 __ovld __cnfn select(double3, double3, long3);
+double4 __ovld __cnfn select(double4, double4, long4);
+double8 __ovld __cnfn select(double8, double8, long8);
+double16 __ovld __cnfn select(double16, double16, long16);
+double __ovld __cnfn select(double, double, ulong);
+double2 __ovld __cnfn select(double2, double2, ulong2);
+double3 __ovld __cnfn select(double3, double3, ulong3);
+double4 __ovld __cnfn select(double4, double4, ulong4);
+double8 __ovld __cnfn select(double8, double8, ulong8);
+double16 __ovld __cnfn select(double16, double16, ulong16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __cnfn select(half a, half b, short c);
-half2 __ovld __cnfn select(half2 a, half2 b, short2 c);
-half3 __ovld __cnfn select(half3 a, half3 b, short3 c);
-half4 __ovld __cnfn select(half4 a, half4 b, short4 c);
-half8 __ovld __cnfn select(half8 a, half8 b, short8 c);
-half16 __ovld __cnfn select(half16 a, half16 b, short16 c);
-half __ovld __cnfn select(half a, half b, ushort c);
-half2 __ovld __cnfn select(half2 a, half2 b, ushort2 c);
-half3 __ovld __cnfn select(half3 a, half3 b, ushort3 c);
-half4 __ovld __cnfn select(half4 a, half4 b, ushort4 c);
-half8 __ovld __cnfn select(half8 a, half8 b, ushort8 c);
-half16 __ovld __cnfn select(half16 a, half16 b, ushort16 c);
+half __ovld __cnfn select(half, half, short);
+half2 __ovld __cnfn select(half2, half2, short2);
+half3 __ovld __cnfn select(half3, half3, short3);
+half4 __ovld __cnfn select(half4, half4, short4);
+half8 __ovld __cnfn select(half8, half8, short8);
+half16 __ovld __cnfn select(half16, half16, short16);
+half __ovld __cnfn select(half, half, ushort);
+half2 __ovld __cnfn select(half2, half2, ushort2);
+half3 __ovld __cnfn select(half3, half3, ushort3);
+half4 __ovld __cnfn select(half4, half4, ushort4);
+half8 __ovld __cnfn select(half8, half8, ushort8);
+half16 __ovld __cnfn select(half16, half16, ushort16);
#endif //cl_khr_fp16
// OpenCL v1.1 s6.11.7, v1.2 s6.12.7, v2.0 s6.13.7 - Vector Data Load and Store Functions
@@ -11201,545 +11201,536 @@ half16 __ovld __cnfn select(half16 a, half16 b, ushort16 c);
* 64-bit aligned if gentype is long, ulong, double.
*/
-char2 __ovld __purefn vload2(size_t offset, const __constant char *p);
-uchar2 __ovld __purefn vload2(size_t offset, const __constant uchar *p);
-short2 __ovld __purefn vload2(size_t offset, const __constant short *p);
-ushort2 __ovld __purefn vload2(size_t offset, const __constant ushort *p);
-int2 __ovld __purefn vload2(size_t offset, const __constant int *p);
-uint2 __ovld __purefn vload2(size_t offset, const __constant uint *p);
-long2 __ovld __purefn vload2(size_t offset, const __constant long *p);
-ulong2 __ovld __purefn vload2(size_t offset, const __constant ulong *p);
-float2 __ovld __purefn vload2(size_t offset, const __constant float *p);
-char3 __ovld __purefn vload3(size_t offset, const __constant char *p);
-uchar3 __ovld __purefn vload3(size_t offset, const __constant uchar *p);
-short3 __ovld __purefn vload3(size_t offset, const __constant short *p);
-ushort3 __ovld __purefn vload3(size_t offset, const __constant ushort *p);
-int3 __ovld __purefn vload3(size_t offset, const __constant int *p);
-uint3 __ovld __purefn vload3(size_t offset, const __constant uint *p);
-long3 __ovld __purefn vload3(size_t offset, const __constant long *p);
-ulong3 __ovld __purefn vload3(size_t offset, const __constant ulong *p);
-float3 __ovld __purefn vload3(size_t offset, const __constant float *p);
-char4 __ovld __purefn vload4(size_t offset, const __constant char *p);
-uchar4 __ovld __purefn vload4(size_t offset, const __constant uchar *p);
-short4 __ovld __purefn vload4(size_t offset, const __constant short *p);
-ushort4 __ovld __purefn vload4(size_t offset, const __constant ushort *p);
-int4 __ovld __purefn vload4(size_t offset, const __constant int *p);
-uint4 __ovld __purefn vload4(size_t offset, const __constant uint *p);
-long4 __ovld __purefn vload4(size_t offset, const __constant long *p);
-ulong4 __ovld __purefn vload4(size_t offset, const __constant ulong *p);
-float4 __ovld __purefn vload4(size_t offset, const __constant float *p);
-char8 __ovld __purefn vload8(size_t offset, const __constant char *p);
-uchar8 __ovld __purefn vload8(size_t offset, const __constant uchar *p);
-short8 __ovld __purefn vload8(size_t offset, const __constant short *p);
-ushort8 __ovld __purefn vload8(size_t offset, const __constant ushort *p);
-int8 __ovld __purefn vload8(size_t offset, const __constant int *p);
-uint8 __ovld __purefn vload8(size_t offset, const __constant uint *p);
-long8 __ovld __purefn vload8(size_t offset, const __constant long *p);
-ulong8 __ovld __purefn vload8(size_t offset, const __constant ulong *p);
-float8 __ovld __purefn vload8(size_t offset, const __constant float *p);
-char16 __ovld __purefn vload16(size_t offset, const __constant char *p);
-uchar16 __ovld __purefn vload16(size_t offset, const __constant uchar *p);
-short16 __ovld __purefn vload16(size_t offset, const __constant short *p);
-ushort16 __ovld __purefn vload16(size_t offset, const __constant ushort *p);
-int16 __ovld __purefn vload16(size_t offset, const __constant int *p);
-uint16 __ovld __purefn vload16(size_t offset, const __constant uint *p);
-long16 __ovld __purefn vload16(size_t offset, const __constant long *p);
-ulong16 __ovld __purefn vload16(size_t offset, const __constant ulong *p);
-float16 __ovld __purefn vload16(size_t offset, const __constant float *p);
+char2 __ovld __purefn vload2(size_t, const __constant char *);
+uchar2 __ovld __purefn vload2(size_t, const __constant uchar *);
+short2 __ovld __purefn vload2(size_t, const __constant short *);
+ushort2 __ovld __purefn vload2(size_t, const __constant ushort *);
+int2 __ovld __purefn vload2(size_t, const __constant int *);
+uint2 __ovld __purefn vload2(size_t, const __constant uint *);
+long2 __ovld __purefn vload2(size_t, const __constant long *);
+ulong2 __ovld __purefn vload2(size_t, const __constant ulong *);
+float2 __ovld __purefn vload2(size_t, const __constant float *);
+char3 __ovld __purefn vload3(size_t, const __constant char *);
+uchar3 __ovld __purefn vload3(size_t, const __constant uchar *);
+short3 __ovld __purefn vload3(size_t, const __constant short *);
+ushort3 __ovld __purefn vload3(size_t, const __constant ushort *);
+int3 __ovld __purefn vload3(size_t, const __constant int *);
+uint3 __ovld __purefn vload3(size_t, const __constant uint *);
+long3 __ovld __purefn vload3(size_t, const __constant long *);
+ulong3 __ovld __purefn vload3(size_t, const __constant ulong *);
+float3 __ovld __purefn vload3(size_t, const __constant float *);
+char4 __ovld __purefn vload4(size_t, const __constant char *);
+uchar4 __ovld __purefn vload4(size_t, const __constant uchar *);
+short4 __ovld __purefn vload4(size_t, const __constant short *);
+ushort4 __ovld __purefn vload4(size_t, const __constant ushort *);
+int4 __ovld __purefn vload4(size_t, const __constant int *);
+uint4 __ovld __purefn vload4(size_t, const __constant uint *);
+long4 __ovld __purefn vload4(size_t, const __constant long *);
+ulong4 __ovld __purefn vload4(size_t, const __constant ulong *);
+float4 __ovld __purefn vload4(size_t, const __constant float *);
+char8 __ovld __purefn vload8(size_t, const __constant char *);
+uchar8 __ovld __purefn vload8(size_t, const __constant uchar *);
+short8 __ovld __purefn vload8(size_t, const __constant short *);
+ushort8 __ovld __purefn vload8(size_t, const __constant ushort *);
+int8 __ovld __purefn vload8(size_t, const __constant int *);
+uint8 __ovld __purefn vload8(size_t, const __constant uint *);
+long8 __ovld __purefn vload8(size_t, const __constant long *);
+ulong8 __ovld __purefn vload8(size_t, const __constant ulong *);
+float8 __ovld __purefn vload8(size_t, const __constant float *);
+char16 __ovld __purefn vload16(size_t, const __constant char *);
+uchar16 __ovld __purefn vload16(size_t, const __constant uchar *);
+short16 __ovld __purefn vload16(size_t, const __constant short *);
+ushort16 __ovld __purefn vload16(size_t, const __constant ushort *);
+int16 __ovld __purefn vload16(size_t, const __constant int *);
+uint16 __ovld __purefn vload16(size_t, const __constant uint *);
+long16 __ovld __purefn vload16(size_t, const __constant long *);
+ulong16 __ovld __purefn vload16(size_t, const __constant ulong *);
+float16 __ovld __purefn vload16(size_t, const __constant float *);
#ifdef cl_khr_fp64
-double2 __ovld __purefn vload2(size_t offset, const __constant double *p);
-double3 __ovld __purefn vload3(size_t offset, const __constant double *p);
-double4 __ovld __purefn vload4(size_t offset, const __constant double *p);
-double8 __ovld __purefn vload8(size_t offset, const __constant double *p);
-double16 __ovld __purefn vload16(size_t offset, const __constant double *p);
+double2 __ovld __purefn vload2(size_t, const __constant double *);
+double3 __ovld __purefn vload3(size_t, const __constant double *);
+double4 __ovld __purefn vload4(size_t, const __constant double *);
+double8 __ovld __purefn vload8(size_t, const __constant double *);
+double16 __ovld __purefn vload16(size_t, const __constant double *);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __purefn vload(size_t offset, const __constant half *p);
-half2 __ovld __purefn vload2(size_t offset, const __constant half *p);
-half3 __ovld __purefn vload3(size_t offset, const __constant half *p);
-half4 __ovld __purefn vload4(size_t offset, const __constant half *p);
-half8 __ovld __purefn vload8(size_t offset, const __constant half *p);
-half16 __ovld __purefn vload16(size_t offset, const __constant half *p);
+half2 __ovld __purefn vload2(size_t, const __constant half *);
+half3 __ovld __purefn vload3(size_t, const __constant half *);
+half4 __ovld __purefn vload4(size_t, const __constant half *);
+half8 __ovld __purefn vload8(size_t, const __constant half *);
+half16 __ovld __purefn vload16(size_t, const __constant half *);
#endif //cl_khr_fp16
#if defined(__opencl_c_generic_address_space)
-char2 __ovld __purefn vload2(size_t offset, const char *p);
-uchar2 __ovld __purefn vload2(size_t offset, const uchar *p);
-short2 __ovld __purefn vload2(size_t offset, const short *p);
-ushort2 __ovld __purefn vload2(size_t offset, const ushort *p);
-int2 __ovld __purefn vload2(size_t offset, const int *p);
-uint2 __ovld __purefn vload2(size_t offset, const uint *p);
-long2 __ovld __purefn vload2(size_t offset, const long *p);
-ulong2 __ovld __purefn vload2(size_t offset, const ulong *p);
-float2 __ovld __purefn vload2(size_t offset, const float *p);
-char3 __ovld __purefn vload3(size_t offset, const char *p);
-uchar3 __ovld __purefn vload3(size_t offset, const uchar *p);
-short3 __ovld __purefn vload3(size_t offset, const short *p);
-ushort3 __ovld __purefn vload3(size_t offset, const ushort *p);
-int3 __ovld __purefn vload3(size_t offset, const int *p);
-uint3 __ovld __purefn vload3(size_t offset, const uint *p);
-long3 __ovld __purefn vload3(size_t offset, const long *p);
-ulong3 __ovld __purefn vload3(size_t offset, const ulong *p);
-float3 __ovld __purefn vload3(size_t offset, const float *p);
-char4 __ovld __purefn vload4(size_t offset, const char *p);
-uchar4 __ovld __purefn vload4(size_t offset, const uchar *p);
-short4 __ovld __purefn vload4(size_t offset, const short *p);
-ushort4 __ovld __purefn vload4(size_t offset, const ushort *p);
-int4 __ovld __purefn vload4(size_t offset, const int *p);
-uint4 __ovld __purefn vload4(size_t offset, const uint *p);
-long4 __ovld __purefn vload4(size_t offset, const long *p);
-ulong4 __ovld __purefn vload4(size_t offset, const ulong *p);
-float4 __ovld __purefn vload4(size_t offset, const float *p);
-char8 __ovld __purefn vload8(size_t offset, const char *p);
-uchar8 __ovld __purefn vload8(size_t offset, const uchar *p);
-short8 __ovld __purefn vload8(size_t offset, const short *p);
-ushort8 __ovld __purefn vload8(size_t offset, const ushort *p);
-int8 __ovld __purefn vload8(size_t offset, const int *p);
-uint8 __ovld __purefn vload8(size_t offset, const uint *p);
-long8 __ovld __purefn vload8(size_t offset, const long *p);
-ulong8 __ovld __purefn vload8(size_t offset, const ulong *p);
-float8 __ovld __purefn vload8(size_t offset, const float *p);
-char16 __ovld __purefn vload16(size_t offset, const char *p);
-uchar16 __ovld __purefn vload16(size_t offset, const uchar *p);
-short16 __ovld __purefn vload16(size_t offset, const short *p);
-ushort16 __ovld __purefn vload16(size_t offset, const ushort *p);
-int16 __ovld __purefn vload16(size_t offset, const int *p);
-uint16 __ovld __purefn vload16(size_t offset, const uint *p);
-long16 __ovld __purefn vload16(size_t offset, const long *p);
-ulong16 __ovld __purefn vload16(size_t offset, const ulong *p);
-float16 __ovld __purefn vload16(size_t offset, const float *p);
+char2 __ovld __purefn vload2(size_t, const char *);
+uchar2 __ovld __purefn vload2(size_t, const uchar *);
+short2 __ovld __purefn vload2(size_t, const short *);
+ushort2 __ovld __purefn vload2(size_t, const ushort *);
+int2 __ovld __purefn vload2(size_t, const int *);
+uint2 __ovld __purefn vload2(size_t, const uint *);
+long2 __ovld __purefn vload2(size_t, const long *);
+ulong2 __ovld __purefn vload2(size_t, const ulong *);
+float2 __ovld __purefn vload2(size_t, const float *);
+char3 __ovld __purefn vload3(size_t, const char *);
+uchar3 __ovld __purefn vload3(size_t, const uchar *);
+short3 __ovld __purefn vload3(size_t, const short *);
+ushort3 __ovld __purefn vload3(size_t, const ushort *);
+int3 __ovld __purefn vload3(size_t, const int *);
+uint3 __ovld __purefn vload3(size_t, const uint *);
+long3 __ovld __purefn vload3(size_t, const long *);
+ulong3 __ovld __purefn vload3(size_t, const ulong *);
+float3 __ovld __purefn vload3(size_t, const float *);
+char4 __ovld __purefn vload4(size_t, const char *);
+uchar4 __ovld __purefn vload4(size_t, const uchar *);
+short4 __ovld __purefn vload4(size_t, const short *);
+ushort4 __ovld __purefn vload4(size_t, const ushort *);
+int4 __ovld __purefn vload4(size_t, const int *);
+uint4 __ovld __purefn vload4(size_t, const uint *);
+long4 __ovld __purefn vload4(size_t, const long *);
+ulong4 __ovld __purefn vload4(size_t, const ulong *);
+float4 __ovld __purefn vload4(size_t, const float *);
+char8 __ovld __purefn vload8(size_t, const char *);
+uchar8 __ovld __purefn vload8(size_t, const uchar *);
+short8 __ovld __purefn vload8(size_t, const short *);
+ushort8 __ovld __purefn vload8(size_t, const ushort *);
+int8 __ovld __purefn vload8(size_t, const int *);
+uint8 __ovld __purefn vload8(size_t, const uint *);
+long8 __ovld __purefn vload8(size_t, const long *);
+ulong8 __ovld __purefn vload8(size_t, const ulong *);
+float8 __ovld __purefn vload8(size_t, const float *);
+char16 __ovld __purefn vload16(size_t, const char *);
+uchar16 __ovld __purefn vload16(size_t, const uchar *);
+short16 __ovld __purefn vload16(size_t, const short *);
+ushort16 __ovld __purefn vload16(size_t, const ushort *);
+int16 __ovld __purefn vload16(size_t, const int *);
+uint16 __ovld __purefn vload16(size_t, const uint *);
+long16 __ovld __purefn vload16(size_t, const long *);
+ulong16 __ovld __purefn vload16(size_t, const ulong *);
+float16 __ovld __purefn vload16(size_t, const float *);
#ifdef cl_khr_fp64
-double2 __ovld __purefn vload2(size_t offset, const double *p);
-double3 __ovld __purefn vload3(size_t offset, const double *p);
-double4 __ovld __purefn vload4(size_t offset, const double *p);
-double8 __ovld __purefn vload8(size_t offset, const double *p);
-double16 __ovld __purefn vload16(size_t offset, const double *p);
+double2 __ovld __purefn vload2(size_t, const double *);
+double3 __ovld __purefn vload3(size_t, const double *);
+double4 __ovld __purefn vload4(size_t, const double *);
+double8 __ovld __purefn vload8(size_t, const double *);
+double16 __ovld __purefn vload16(size_t, const double *);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __purefn vload(size_t offset, const half *p);
-half2 __ovld __purefn vload2(size_t offset, const half *p);
-half3 __ovld __purefn vload3(size_t offset, const half *p);
-half4 __ovld __purefn vload4(size_t offset, const half *p);
-half8 __ovld __purefn vload8(size_t offset, const half *p);
-half16 __ovld __purefn vload16(size_t offset, const half *p);
+half2 __ovld __purefn vload2(size_t, const half *);
+half3 __ovld __purefn vload3(size_t, const half *);
+half4 __ovld __purefn vload4(size_t, const half *);
+half8 __ovld __purefn vload8(size_t, const half *);
+half16 __ovld __purefn vload16(size_t, const half *);
#endif //cl_khr_fp16
#endif //defined(__opencl_c_generic_address_space)
#if defined(__opencl_c_named_address_space_builtins)
-char2 __ovld __purefn vload2(size_t offset, const __global char *p);
-uchar2 __ovld __purefn vload2(size_t offset, const __global uchar *p);
-short2 __ovld __purefn vload2(size_t offset, const __global short *p);
-ushort2 __ovld __purefn vload2(size_t offset, const __global ushort *p);
-int2 __ovld __purefn vload2(size_t offset, const __global int *p);
-uint2 __ovld __purefn vload2(size_t offset, const __global uint *p);
-long2 __ovld __purefn vload2(size_t offset, const __global long *p);
-ulong2 __ovld __purefn vload2(size_t offset, const __global ulong *p);
-float2 __ovld __purefn vload2(size_t offset, const __global float *p);
-char3 __ovld __purefn vload3(size_t offset, const __global char *p);
-uchar3 __ovld __purefn vload3(size_t offset, const __global uchar *p);
-short3 __ovld __purefn vload3(size_t offset, const __global short *p);
-ushort3 __ovld __purefn vload3(size_t offset, const __global ushort *p);
-int3 __ovld __purefn vload3(size_t offset, const __global int *p);
-uint3 __ovld __purefn vload3(size_t offset, const __global uint *p);
-long3 __ovld __purefn vload3(size_t offset, const __global long *p);
-ulong3 __ovld __purefn vload3(size_t offset, const __global ulong *p);
-float3 __ovld __purefn vload3(size_t offset, const __global float *p);
-char4 __ovld __purefn vload4(size_t offset, const __global char *p);
-uchar4 __ovld __purefn vload4(size_t offset, const __global uchar *p);
-short4 __ovld __purefn vload4(size_t offset, const __global short *p);
-ushort4 __ovld __purefn vload4(size_t offset, const __global ushort *p);
-int4 __ovld __purefn vload4(size_t offset, const __global int *p);
-uint4 __ovld __purefn vload4(size_t offset, const __global uint *p);
-long4 __ovld __purefn vload4(size_t offset, const __global long *p);
-ulong4 __ovld __purefn vload4(size_t offset, const __global ulong *p);
-float4 __ovld __purefn vload4(size_t offset, const __global float *p);
-char8 __ovld __purefn vload8(size_t offset, const __global char *p);
-uchar8 __ovld __purefn vload8(size_t offset, const __global uchar *p);
-short8 __ovld __purefn vload8(size_t offset, const __global short *p);
-ushort8 __ovld __purefn vload8(size_t offset, const __global ushort *p);
-int8 __ovld __purefn vload8(size_t offset, const __global int *p);
-uint8 __ovld __purefn vload8(size_t offset, const __global uint *p);
-long8 __ovld __purefn vload8(size_t offset, const __global long *p);
-ulong8 __ovld __purefn vload8(size_t offset, const __global ulong *p);
-float8 __ovld __purefn vload8(size_t offset, const __global float *p);
-char16 __ovld __purefn vload16(size_t offset, const __global char *p);
-uchar16 __ovld __purefn vload16(size_t offset, const __global uchar *p);
-short16 __ovld __purefn vload16(size_t offset, const __global short *p);
-ushort16 __ovld __purefn vload16(size_t offset, const __global ushort *p);
-int16 __ovld __purefn vload16(size_t offset, const __global int *p);
-uint16 __ovld __purefn vload16(size_t offset, const __global uint *p);
-long16 __ovld __purefn vload16(size_t offset, const __global long *p);
-ulong16 __ovld __purefn vload16(size_t offset, const __global ulong *p);
-float16 __ovld __purefn vload16(size_t offset, const __global float *p);
-char2 __ovld __purefn vload2(size_t offset, const __local char *p);
-uchar2 __ovld __purefn vload2(size_t offset, const __local uchar *p);
-short2 __ovld __purefn vload2(size_t offset, const __local short *p);
-ushort2 __ovld __purefn vload2(size_t offset, const __local ushort *p);
-int2 __ovld __purefn vload2(size_t offset, const __local int *p);
-uint2 __ovld __purefn vload2(size_t offset, const __local uint *p);
-long2 __ovld __purefn vload2(size_t offset, const __local long *p);
-ulong2 __ovld __purefn vload2(size_t offset, const __local ulong *p);
-float2 __ovld __purefn vload2(size_t offset, const __local float *p);
-char3 __ovld __purefn vload3(size_t offset, const __local char *p);
-uchar3 __ovld __purefn vload3(size_t offset, const __local uchar *p);
-short3 __ovld __purefn vload3(size_t offset, const __local short *p);
-ushort3 __ovld __purefn vload3(size_t offset, const __local ushort *p);
-int3 __ovld __purefn vload3(size_t offset, const __local int *p);
-uint3 __ovld __purefn vload3(size_t offset, const __local uint *p);
-long3 __ovld __purefn vload3(size_t offset, const __local long *p);
-ulong3 __ovld __purefn vload3(size_t offset, const __local ulong *p);
-float3 __ovld __purefn vload3(size_t offset, const __local float *p);
-char4 __ovld __purefn vload4(size_t offset, const __local char *p);
-uchar4 __ovld __purefn vload4(size_t offset, const __local uchar *p);
-short4 __ovld __purefn vload4(size_t offset, const __local short *p);
-ushort4 __ovld __purefn vload4(size_t offset, const __local ushort *p);
-int4 __ovld __purefn vload4(size_t offset, const __local int *p);
-uint4 __ovld __purefn vload4(size_t offset, const __local uint *p);
-long4 __ovld __purefn vload4(size_t offset, const __local long *p);
-ulong4 __ovld __purefn vload4(size_t offset, const __local ulong *p);
-float4 __ovld __purefn vload4(size_t offset, const __local float *p);
-char8 __ovld __purefn vload8(size_t offset, const __local char *p);
-uchar8 __ovld __purefn vload8(size_t offset, const __local uchar *p);
-short8 __ovld __purefn vload8(size_t offset, const __local short *p);
-ushort8 __ovld __purefn vload8(size_t offset, const __local ushort *p);
-int8 __ovld __purefn vload8(size_t offset, const __local int *p);
-uint8 __ovld __purefn vload8(size_t offset, const __local uint *p);
-long8 __ovld __purefn vload8(size_t offset, const __local long *p);
-ulong8 __ovld __purefn vload8(size_t offset, const __local ulong *p);
-float8 __ovld __purefn vload8(size_t offset, const __local float *p);
-char16 __ovld __purefn vload16(size_t offset, const __local char *p);
-uchar16 __ovld __purefn vload16(size_t offset, const __local uchar *p);
-short16 __ovld __purefn vload16(size_t offset, const __local short *p);
-ushort16 __ovld __purefn vload16(size_t offset, const __local ushort *p);
-int16 __ovld __purefn vload16(size_t offset, const __local int *p);
-uint16 __ovld __purefn vload16(size_t offset, const __local uint *p);
-long16 __ovld __purefn vload16(size_t offset, const __local long *p);
-ulong16 __ovld __purefn vload16(size_t offset, const __local ulong *p);
-float16 __ovld __purefn vload16(size_t offset, const __local float *p);
-char2 __ovld __purefn vload2(size_t offset, const __private char *p);
-uchar2 __ovld __purefn vload2(size_t offset, const __private uchar *p);
-short2 __ovld __purefn vload2(size_t offset, const __private short *p);
-ushort2 __ovld __purefn vload2(size_t offset, const __private ushort *p);
-int2 __ovld __purefn vload2(size_t offset, const __private int *p);
-uint2 __ovld __purefn vload2(size_t offset, const __private uint *p);
-long2 __ovld __purefn vload2(size_t offset, const __private long *p);
-ulong2 __ovld __purefn vload2(size_t offset, const __private ulong *p);
-float2 __ovld __purefn vload2(size_t offset, const __private float *p);
-char3 __ovld __purefn vload3(size_t offset, const __private char *p);
-uchar3 __ovld __purefn vload3(size_t offset, const __private uchar *p);
-short3 __ovld __purefn vload3(size_t offset, const __private short *p);
-ushort3 __ovld __purefn vload3(size_t offset, const __private ushort *p);
-int3 __ovld __purefn vload3(size_t offset, const __private int *p);
-uint3 __ovld __purefn vload3(size_t offset, const __private uint *p);
-long3 __ovld __purefn vload3(size_t offset, const __private long *p);
-ulong3 __ovld __purefn vload3(size_t offset, const __private ulong *p);
-float3 __ovld __purefn vload3(size_t offset, const __private float *p);
-char4 __ovld __purefn vload4(size_t offset, const __private char *p);
-uchar4 __ovld __purefn vload4(size_t offset, const __private uchar *p);
-short4 __ovld __purefn vload4(size_t offset, const __private short *p);
-ushort4 __ovld __purefn vload4(size_t offset, const __private ushort *p);
-int4 __ovld __purefn vload4(size_t offset, const __private int *p);
-uint4 __ovld __purefn vload4(size_t offset, const __private uint *p);
-long4 __ovld __purefn vload4(size_t offset, const __private long *p);
-ulong4 __ovld __purefn vload4(size_t offset, const __private ulong *p);
-float4 __ovld __purefn vload4(size_t offset, const __private float *p);
-char8 __ovld __purefn vload8(size_t offset, const __private char *p);
-uchar8 __ovld __purefn vload8(size_t offset, const __private uchar *p);
-short8 __ovld __purefn vload8(size_t offset, const __private short *p);
-ushort8 __ovld __purefn vload8(size_t offset, const __private ushort *p);
-int8 __ovld __purefn vload8(size_t offset, const __private int *p);
-uint8 __ovld __purefn vload8(size_t offset, const __private uint *p);
-long8 __ovld __purefn vload8(size_t offset, const __private long *p);
-ulong8 __ovld __purefn vload8(size_t offset, const __private ulong *p);
-float8 __ovld __purefn vload8(size_t offset, const __private float *p);
-char16 __ovld __purefn vload16(size_t offset, const __private char *p);
-uchar16 __ovld __purefn vload16(size_t offset, const __private uchar *p);
-short16 __ovld __purefn vload16(size_t offset, const __private short *p);
-ushort16 __ovld __purefn vload16(size_t offset, const __private ushort *p);
-int16 __ovld __purefn vload16(size_t offset, const __private int *p);
-uint16 __ovld __purefn vload16(size_t offset, const __private uint *p);
-long16 __ovld __purefn vload16(size_t offset, const __private long *p);
-ulong16 __ovld __purefn vload16(size_t offset, const __private ulong *p);
-float16 __ovld __purefn vload16(size_t offset, const __private float *p);
+char2 __ovld __purefn vload2(size_t, const __global char *);
+uchar2 __ovld __purefn vload2(size_t, const __global uchar *);
+short2 __ovld __purefn vload2(size_t, const __global short *);
+ushort2 __ovld __purefn vload2(size_t, const __global ushort *);
+int2 __ovld __purefn vload2(size_t, const __global int *);
+uint2 __ovld __purefn vload2(size_t, const __global uint *);
+long2 __ovld __purefn vload2(size_t, const __global long *);
+ulong2 __ovld __purefn vload2(size_t, const __global ulong *);
+float2 __ovld __purefn vload2(size_t, const __global float *);
+char3 __ovld __purefn vload3(size_t, const __global char *);
+uchar3 __ovld __purefn vload3(size_t, const __global uchar *);
+short3 __ovld __purefn vload3(size_t, const __global short *);
+ushort3 __ovld __purefn vload3(size_t, const __global ushort *);
+int3 __ovld __purefn vload3(size_t, const __global int *);
+uint3 __ovld __purefn vload3(size_t, const __global uint *);
+long3 __ovld __purefn vload3(size_t, const __global long *);
+ulong3 __ovld __purefn vload3(size_t, const __global ulong *);
+float3 __ovld __purefn vload3(size_t, const __global float *);
+char4 __ovld __purefn vload4(size_t, const __global char *);
+uchar4 __ovld __purefn vload4(size_t, const __global uchar *);
+short4 __ovld __purefn vload4(size_t, const __global short *);
+ushort4 __ovld __purefn vload4(size_t, const __global ushort *);
+int4 __ovld __purefn vload4(size_t, const __global int *);
+uint4 __ovld __purefn vload4(size_t, const __global uint *);
+long4 __ovld __purefn vload4(size_t, const __global long *);
+ulong4 __ovld __purefn vload4(size_t, const __global ulong *);
+float4 __ovld __purefn vload4(size_t, const __global float *);
+char8 __ovld __purefn vload8(size_t, const __global char *);
+uchar8 __ovld __purefn vload8(size_t, const __global uchar *);
+short8 __ovld __purefn vload8(size_t, const __global short *);
+ushort8 __ovld __purefn vload8(size_t, const __global ushort *);
+int8 __ovld __purefn vload8(size_t, const __global int *);
+uint8 __ovld __purefn vload8(size_t, const __global uint *);
+long8 __ovld __purefn vload8(size_t, const __global long *);
+ulong8 __ovld __purefn vload8(size_t, const __global ulong *);
+float8 __ovld __purefn vload8(size_t, const __global float *);
+char16 __ovld __purefn vload16(size_t, const __global char *);
+uchar16 __ovld __purefn vload16(size_t, const __global uchar *);
+short16 __ovld __purefn vload16(size_t, const __global short *);
+ushort16 __ovld __purefn vload16(size_t, const __global ushort *);
+int16 __ovld __purefn vload16(size_t, const __global int *);
+uint16 __ovld __purefn vload16(size_t, const __global uint *);
+long16 __ovld __purefn vload16(size_t, const __global long *);
+ulong16 __ovld __purefn vload16(size_t, const __global ulong *);
+float16 __ovld __purefn vload16(size_t, const __global float *);
+char2 __ovld __purefn vload2(size_t, const __local char *);
+uchar2 __ovld __purefn vload2(size_t, const __local uchar *);
+short2 __ovld __purefn vload2(size_t, const __local short *);
+ushort2 __ovld __purefn vload2(size_t, const __local ushort *);
+int2 __ovld __purefn vload2(size_t, const __local int *);
+uint2 __ovld __purefn vload2(size_t, const __local uint *);
+long2 __ovld __purefn vload2(size_t, const __local long *);
+ulong2 __ovld __purefn vload2(size_t, const __local ulong *);
+float2 __ovld __purefn vload2(size_t, const __local float *);
+char3 __ovld __purefn vload3(size_t, const __local char *);
+uchar3 __ovld __purefn vload3(size_t, const __local uchar *);
+short3 __ovld __purefn vload3(size_t, const __local short *);
+ushort3 __ovld __purefn vload3(size_t, const __local ushort *);
+int3 __ovld __purefn vload3(size_t, const __local int *);
+uint3 __ovld __purefn vload3(size_t, const __local uint *);
+long3 __ovld __purefn vload3(size_t, const __local long *);
+ulong3 __ovld __purefn vload3(size_t, const __local ulong *);
+float3 __ovld __purefn vload3(size_t, const __local float *);
+char4 __ovld __purefn vload4(size_t, const __local char *);
+uchar4 __ovld __purefn vload4(size_t, const __local uchar *);
+short4 __ovld __purefn vload4(size_t, const __local short *);
+ushort4 __ovld __purefn vload4(size_t, const __local ushort *);
+int4 __ovld __purefn vload4(size_t, const __local int *);
+uint4 __ovld __purefn vload4(size_t, const __local uint *);
+long4 __ovld __purefn vload4(size_t, const __local long *);
+ulong4 __ovld __purefn vload4(size_t, const __local ulong *);
+float4 __ovld __purefn vload4(size_t, const __local float *);
+char8 __ovld __purefn vload8(size_t, const __local char *);
+uchar8 __ovld __purefn vload8(size_t, const __local uchar *);
+short8 __ovld __purefn vload8(size_t, const __local short *);
+ushort8 __ovld __purefn vload8(size_t, const __local ushort *);
+int8 __ovld __purefn vload8(size_t, const __local int *);
+uint8 __ovld __purefn vload8(size_t, const __local uint *);
+long8 __ovld __purefn vload8(size_t, const __local long *);
+ulong8 __ovld __purefn vload8(size_t, const __local ulong *);
+float8 __ovld __purefn vload8(size_t, const __local float *);
+char16 __ovld __purefn vload16(size_t, const __local char *);
+uchar16 __ovld __purefn vload16(size_t, const __local uchar *);
+short16 __ovld __purefn vload16(size_t, const __local short *);
+ushort16 __ovld __purefn vload16(size_t, const __local ushort *);
+int16 __ovld __purefn vload16(size_t, const __local int *);
+uint16 __ovld __purefn vload16(size_t, const __local uint *);
+long16 __ovld __purefn vload16(size_t, const __local long *);
+ulong16 __ovld __purefn vload16(size_t, const __local ulong *);
+float16 __ovld __purefn vload16(size_t, const __local float *);
+char2 __ovld __purefn vload2(size_t, const __private char *);
+uchar2 __ovld __purefn vload2(size_t, const __private uchar *);
+short2 __ovld __purefn vload2(size_t, const __private short *);
+ushort2 __ovld __purefn vload2(size_t, const __private ushort *);
+int2 __ovld __purefn vload2(size_t, const __private int *);
+uint2 __ovld __purefn vload2(size_t, const __private uint *);
+long2 __ovld __purefn vload2(size_t, const __private long *);
+ulong2 __ovld __purefn vload2(size_t, const __private ulong *);
+float2 __ovld __purefn vload2(size_t, const __private float *);
+char3 __ovld __purefn vload3(size_t, const __private char *);
+uchar3 __ovld __purefn vload3(size_t, const __private uchar *);
+short3 __ovld __purefn vload3(size_t, const __private short *);
+ushort3 __ovld __purefn vload3(size_t, const __private ushort *);
+int3 __ovld __purefn vload3(size_t, const __private int *);
+uint3 __ovld __purefn vload3(size_t, const __private uint *);
+long3 __ovld __purefn vload3(size_t, const __private long *);
+ulong3 __ovld __purefn vload3(size_t, const __private ulong *);
+float3 __ovld __purefn vload3(size_t, const __private float *);
+char4 __ovld __purefn vload4(size_t, const __private char *);
+uchar4 __ovld __purefn vload4(size_t, const __private uchar *);
+short4 __ovld __purefn vload4(size_t, const __private short *);
+ushort4 __ovld __purefn vload4(size_t, const __private ushort *);
+int4 __ovld __purefn vload4(size_t, const __private int *);
+uint4 __ovld __purefn vload4(size_t, const __private uint *);
+long4 __ovld __purefn vload4(size_t, const __private long *);
+ulong4 __ovld __purefn vload4(size_t, const __private ulong *);
+float4 __ovld __purefn vload4(size_t, const __private float *);
+char8 __ovld __purefn vload8(size_t, const __private char *);
+uchar8 __ovld __purefn vload8(size_t, const __private uchar *);
+short8 __ovld __purefn vload8(size_t, const __private short *);
+ushort8 __ovld __purefn vload8(size_t, const __private ushort *);
+int8 __ovld __purefn vload8(size_t, const __private int *);
+uint8 __ovld __purefn vload8(size_t, const __private uint *);
+long8 __ovld __purefn vload8(size_t, const __private long *);
+ulong8 __ovld __purefn vload8(size_t, const __private ulong *);
+float8 __ovld __purefn vload8(size_t, const __private float *);
+char16 __ovld __purefn vload16(size_t, const __private char *);
+uchar16 __ovld __purefn vload16(size_t, const __private uchar *);
+short16 __ovld __purefn vload16(size_t, const __private short *);
+ushort16 __ovld __purefn vload16(size_t, const __private ushort *);
+int16 __ovld __purefn vload16(size_t, const __private int *);
+uint16 __ovld __purefn vload16(size_t, const __private uint *);
+long16 __ovld __purefn vload16(size_t, const __private long *);
+ulong16 __ovld __purefn vload16(size_t, const __private ulong *);
+float16 __ovld __purefn vload16(size_t, const __private float *);
#ifdef cl_khr_fp64
-double2 __ovld __purefn vload2(size_t offset, const __global double *p);
-double3 __ovld __purefn vload3(size_t offset, const __global double *p);
-double4 __ovld __purefn vload4(size_t offset, const __global double *p);
-double8 __ovld __purefn vload8(size_t offset, const __global double *p);
-double16 __ovld __purefn vload16(size_t offset, const __global double *p);
-double2 __ovld __purefn vload2(size_t offset, const __local double *p);
-double3 __ovld __purefn vload3(size_t offset, const __local double *p);
-double4 __ovld __purefn vload4(size_t offset, const __local double *p);
-double8 __ovld __purefn vload8(size_t offset, const __local double *p);
-double16 __ovld __purefn vload16(size_t offset, const __local double *p);
-double2 __ovld __purefn vload2(size_t offset, const __private double *p);
-double3 __ovld __purefn vload3(size_t offset, const __private double *p);
-double4 __ovld __purefn vload4(size_t offset, const __private double *p);
-double8 __ovld __purefn vload8(size_t offset, const __private double *p);
-double16 __ovld __purefn vload16(size_t offset, const __private double *p);
+double2 __ovld __purefn vload2(size_t, const __global double *);
+double3 __ovld __purefn vload3(size_t, const __global double *);
+double4 __ovld __purefn vload4(size_t, const __global double *);
+double8 __ovld __purefn vload8(size_t, const __global double *);
+double16 __ovld __purefn vload16(size_t, const __global double *);
+double2 __ovld __purefn vload2(size_t, const __local double *);
+double3 __ovld __purefn vload3(size_t, const __local double *);
+double4 __ovld __purefn vload4(size_t, const __local double *);
+double8 __ovld __purefn vload8(size_t, const __local double *);
+double16 __ovld __purefn vload16(size_t, const __local double *);
+double2 __ovld __purefn vload2(size_t, const __private double *);
+double3 __ovld __purefn vload3(size_t, const __private double *);
+double4 __ovld __purefn vload4(size_t, const __private double *);
+double8 __ovld __purefn vload8(size_t, const __private double *);
+double16 __ovld __purefn vload16(size_t, const __private double *);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __purefn vload(size_t offset, const __global half *p);
-half2 __ovld __purefn vload2(size_t offset, const __global half *p);
-half3 __ovld __purefn vload3(size_t offset, const __global half *p);
-half4 __ovld __purefn vload4(size_t offset, const __global half *p);
-half8 __ovld __purefn vload8(size_t offset, const __global half *p);
-half16 __ovld __purefn vload16(size_t offset, const __global half *p);
-half __ovld __purefn vload(size_t offset, const __local half *p);
-half2 __ovld __purefn vload2(size_t offset, const __local half *p);
-half3 __ovld __purefn vload3(size_t offset, const __local half *p);
-half4 __ovld __purefn vload4(size_t offset, const __local half *p);
-half8 __ovld __purefn vload8(size_t offset, const __local half *p);
-half16 __ovld __purefn vload16(size_t offset, const __local half *p);
-half __ovld __purefn vload(size_t offset, const __private half *p);
-half2 __ovld __purefn vload2(size_t offset, const __private half *p);
-half3 __ovld __purefn vload3(size_t offset, const __private half *p);
-half4 __ovld __purefn vload4(size_t offset, const __private half *p);
-half8 __ovld __purefn vload8(size_t offset, const __private half *p);
-half16 __ovld __purefn vload16(size_t offset, const __private half *p);
+half2 __ovld __purefn vload2(size_t, const __global half *);
+half3 __ovld __purefn vload3(size_t, const __global half *);
+half4 __ovld __purefn vload4(size_t, const __global half *);
+half8 __ovld __purefn vload8(size_t, const __global half *);
+half16 __ovld __purefn vload16(size_t, const __global half *);
+half2 __ovld __purefn vload2(size_t, const __local half *);
+half3 __ovld __purefn vload3(size_t, const __local half *);
+half4 __ovld __purefn vload4(size_t, const __local half *);
+half8 __ovld __purefn vload8(size_t, const __local half *);
+half16 __ovld __purefn vload16(size_t, const __local half *);
+half2 __ovld __purefn vload2(size_t, const __private half *);
+half3 __ovld __purefn vload3(size_t, const __private half *);
+half4 __ovld __purefn vload4(size_t, const __private half *);
+half8 __ovld __purefn vload8(size_t, const __private half *);
+half16 __ovld __purefn vload16(size_t, const __private half *);
#endif //cl_khr_fp16
#endif //defined(__opencl_c_named_address_space_builtins)
#if defined(__opencl_c_generic_address_space)
-void __ovld vstore2(char2 data, size_t offset, char *p);
-void __ovld vstore2(uchar2 data, size_t offset, uchar *p);
-void __ovld vstore2(short2 data, size_t offset, short *p);
-void __ovld vstore2(ushort2 data, size_t offset, ushort *p);
-void __ovld vstore2(int2 data, size_t offset, int *p);
-void __ovld vstore2(uint2 data, size_t offset, uint *p);
-void __ovld vstore2(long2 data, size_t offset, long *p);
-void __ovld vstore2(ulong2 data, size_t offset, ulong *p);
-void __ovld vstore2(float2 data, size_t offset, float *p);
-void __ovld vstore3(char3 data, size_t offset, char *p);
-void __ovld vstore3(uchar3 data, size_t offset, uchar *p);
-void __ovld vstore3(short3 data, size_t offset, short *p);
-void __ovld vstore3(ushort3 data, size_t offset, ushort *p);
-void __ovld vstore3(int3 data, size_t offset, int *p);
-void __ovld vstore3(uint3 data, size_t offset, uint *p);
-void __ovld vstore3(long3 data, size_t offset, long *p);
-void __ovld vstore3(ulong3 data, size_t offset, ulong *p);
-void __ovld vstore3(float3 data, size_t offset, float *p);
-void __ovld vstore4(char4 data, size_t offset, char *p);
-void __ovld vstore4(uchar4 data, size_t offset, uchar *p);
-void __ovld vstore4(short4 data, size_t offset, short *p);
-void __ovld vstore4(ushort4 data, size_t offset, ushort *p);
-void __ovld vstore4(int4 data, size_t offset, int *p);
-void __ovld vstore4(uint4 data, size_t offset, uint *p);
-void __ovld vstore4(long4 data, size_t offset, long *p);
-void __ovld vstore4(ulong4 data, size_t offset, ulong *p);
-void __ovld vstore4(float4 data, size_t offset, float *p);
-void __ovld vstore8(char8 data, size_t offset, char *p);
-void __ovld vstore8(uchar8 data, size_t offset, uchar *p);
-void __ovld vstore8(short8 data, size_t offset, short *p);
-void __ovld vstore8(ushort8 data, size_t offset, ushort *p);
-void __ovld vstore8(int8 data, size_t offset, int *p);
-void __ovld vstore8(uint8 data, size_t offset, uint *p);
-void __ovld vstore8(long8 data, size_t offset, long *p);
-void __ovld vstore8(ulong8 data, size_t offset, ulong *p);
-void __ovld vstore8(float8 data, size_t offset, float *p);
-void __ovld vstore16(char16 data, size_t offset, char *p);
-void __ovld vstore16(uchar16 data, size_t offset, uchar *p);
-void __ovld vstore16(short16 data, size_t offset, short *p);
-void __ovld vstore16(ushort16 data, size_t offset, ushort *p);
-void __ovld vstore16(int16 data, size_t offset, int *p);
-void __ovld vstore16(uint16 data, size_t offset, uint *p);
-void __ovld vstore16(long16 data, size_t offset, long *p);
-void __ovld vstore16(ulong16 data, size_t offset, ulong *p);
-void __ovld vstore16(float16 data, size_t offset, float *p);
+void __ovld vstore2(char2, size_t, char *);
+void __ovld vstore2(uchar2, size_t, uchar *);
+void __ovld vstore2(short2, size_t, short *);
+void __ovld vstore2(ushort2, size_t, ushort *);
+void __ovld vstore2(int2, size_t, int *);
+void __ovld vstore2(uint2, size_t, uint *);
+void __ovld vstore2(long2, size_t, long *);
+void __ovld vstore2(ulong2, size_t, ulong *);
+void __ovld vstore2(float2, size_t, float *);
+void __ovld vstore3(char3, size_t, char *);
+void __ovld vstore3(uchar3, size_t, uchar *);
+void __ovld vstore3(short3, size_t, short *);
+void __ovld vstore3(ushort3, size_t, ushort *);
+void __ovld vstore3(int3, size_t, int *);
+void __ovld vstore3(uint3, size_t, uint *);
+void __ovld vstore3(long3, size_t, long *);
+void __ovld vstore3(ulong3, size_t, ulong *);
+void __ovld vstore3(float3, size_t, float *);
+void __ovld vstore4(char4, size_t, char *);
+void __ovld vstore4(uchar4, size_t, uchar *);
+void __ovld vstore4(short4, size_t, short *);
+void __ovld vstore4(ushort4, size_t, ushort *);
+void __ovld vstore4(int4, size_t, int *);
+void __ovld vstore4(uint4, size_t, uint *);
+void __ovld vstore4(long4, size_t, long *);
+void __ovld vstore4(ulong4, size_t, ulong *);
+void __ovld vstore4(float4, size_t, float *);
+void __ovld vstore8(char8, size_t, char *);
+void __ovld vstore8(uchar8, size_t, uchar *);
+void __ovld vstore8(short8, size_t, short *);
+void __ovld vstore8(ushort8, size_t, ushort *);
+void __ovld vstore8(int8, size_t, int *);
+void __ovld vstore8(uint8, size_t, uint *);
+void __ovld vstore8(long8, size_t, long *);
+void __ovld vstore8(ulong8, size_t, ulong *);
+void __ovld vstore8(float8, size_t, float *);
+void __ovld vstore16(char16, size_t, char *);
+void __ovld vstore16(uchar16, size_t, uchar *);
+void __ovld vstore16(short16, size_t, short *);
+void __ovld vstore16(ushort16, size_t, ushort *);
+void __ovld vstore16(int16, size_t, int *);
+void __ovld vstore16(uint16, size_t, uint *);
+void __ovld vstore16(long16, size_t, long *);
+void __ovld vstore16(ulong16, size_t, ulong *);
+void __ovld vstore16(float16, size_t, float *);
#ifdef cl_khr_fp64
-void __ovld vstore2(double2 data, size_t offset, double *p);
-void __ovld vstore3(double3 data, size_t offset, double *p);
-void __ovld vstore4(double4 data, size_t offset, double *p);
-void __ovld vstore8(double8 data, size_t offset, double *p);
-void __ovld vstore16(double16 data, size_t offset, double *p);
+void __ovld vstore2(double2, size_t, double *);
+void __ovld vstore3(double3, size_t, double *);
+void __ovld vstore4(double4, size_t, double *);
+void __ovld vstore8(double8, size_t, double *);
+void __ovld vstore16(double16, size_t, double *);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-void __ovld vstore(half data, size_t offset, half *p);
-void __ovld vstore2(half2 data, size_t offset, half *p);
-void __ovld vstore3(half3 data, size_t offset, half *p);
-void __ovld vstore4(half4 data, size_t offset, half *p);
-void __ovld vstore8(half8 data, size_t offset, half *p);
-void __ovld vstore16(half16 data, size_t offset, half *p);
+void __ovld vstore2(half2, size_t, half *);
+void __ovld vstore3(half3, size_t, half *);
+void __ovld vstore4(half4, size_t, half *);
+void __ovld vstore8(half8, size_t, half *);
+void __ovld vstore16(half16, size_t, half *);
#endif //cl_khr_fp16
#endif //defined(__opencl_c_generic_address_space)
#if defined(__opencl_c_named_address_space_builtins)
-void __ovld vstore2(char2 data, size_t offset, __global char *p);
-void __ovld vstore2(uchar2 data, size_t offset, __global uchar *p);
-void __ovld vstore2(short2 data, size_t offset, __global short *p);
-void __ovld vstore2(ushort2 data, size_t offset, __global ushort *p);
-void __ovld vstore2(int2 data, size_t offset, __global int *p);
-void __ovld vstore2(uint2 data, size_t offset, __global uint *p);
-void __ovld vstore2(long2 data, size_t offset, __global long *p);
-void __ovld vstore2(ulong2 data, size_t offset, __global ulong *p);
-void __ovld vstore2(float2 data, size_t offset, __global float *p);
-void __ovld vstore3(char3 data, size_t offset, __global char *p);
-void __ovld vstore3(uchar3 data, size_t offset, __global uchar *p);
-void __ovld vstore3(short3 data, size_t offset, __global short *p);
-void __ovld vstore3(ushort3 data, size_t offset, __global ushort *p);
-void __ovld vstore3(int3 data, size_t offset, __global int *p);
-void __ovld vstore3(uint3 data, size_t offset, __global uint *p);
-void __ovld vstore3(long3 data, size_t offset, __global long *p);
-void __ovld vstore3(ulong3 data, size_t offset, __global ulong *p);
-void __ovld vstore3(float3 data, size_t offset, __global float *p);
-void __ovld vstore4(char4 data, size_t offset, __global char *p);
-void __ovld vstore4(uchar4 data, size_t offset, __global uchar *p);
-void __ovld vstore4(short4 data, size_t offset, __global short *p);
-void __ovld vstore4(ushort4 data, size_t offset, __global ushort *p);
-void __ovld vstore4(int4 data, size_t offset, __global int *p);
-void __ovld vstore4(uint4 data, size_t offset, __global uint *p);
-void __ovld vstore4(long4 data, size_t offset, __global long *p);
-void __ovld vstore4(ulong4 data, size_t offset, __global ulong *p);
-void __ovld vstore4(float4 data, size_t offset, __global float *p);
-void __ovld vstore8(char8 data, size_t offset, __global char *p);
-void __ovld vstore8(uchar8 data, size_t offset, __global uchar *p);
-void __ovld vstore8(short8 data, size_t offset, __global short *p);
-void __ovld vstore8(ushort8 data, size_t offset, __global ushort *p);
-void __ovld vstore8(int8 data, size_t offset, __global int *p);
-void __ovld vstore8(uint8 data, size_t offset, __global uint *p);
-void __ovld vstore8(long8 data, size_t offset, __global long *p);
-void __ovld vstore8(ulong8 data, size_t offset, __global ulong *p);
-void __ovld vstore8(float8 data, size_t offset, __global float *p);
-void __ovld vstore16(char16 data, size_t offset, __global char *p);
-void __ovld vstore16(uchar16 data, size_t offset, __global uchar *p);
-void __ovld vstore16(short16 data, size_t offset, __global short *p);
-void __ovld vstore16(ushort16 data, size_t offset, __global ushort *p);
-void __ovld vstore16(int16 data, size_t offset, __global int *p);
-void __ovld vstore16(uint16 data, size_t offset, __global uint *p);
-void __ovld vstore16(long16 data, size_t offset, __global long *p);
-void __ovld vstore16(ulong16 data, size_t offset, __global ulong *p);
-void __ovld vstore16(float16 data, size_t offset, __global float *p);
-void __ovld vstore2(char2 data, size_t offset, __local char *p);
-void __ovld vstore2(uchar2 data, size_t offset, __local uchar *p);
-void __ovld vstore2(short2 data, size_t offset, __local short *p);
-void __ovld vstore2(ushort2 data, size_t offset, __local ushort *p);
-void __ovld vstore2(int2 data, size_t offset, __local int *p);
-void __ovld vstore2(uint2 data, size_t offset, __local uint *p);
-void __ovld vstore2(long2 data, size_t offset, __local long *p);
-void __ovld vstore2(ulong2 data, size_t offset, __local ulong *p);
-void __ovld vstore2(float2 data, size_t offset, __local float *p);
-void __ovld vstore3(char3 data, size_t offset, __local char *p);
-void __ovld vstore3(uchar3 data, size_t offset, __local uchar *p);
-void __ovld vstore3(short3 data, size_t offset, __local short *p);
-void __ovld vstore3(ushort3 data, size_t offset, __local ushort *p);
-void __ovld vstore3(int3 data, size_t offset, __local int *p);
-void __ovld vstore3(uint3 data, size_t offset, __local uint *p);
-void __ovld vstore3(long3 data, size_t offset, __local long *p);
-void __ovld vstore3(ulong3 data, size_t offset, __local ulong *p);
-void __ovld vstore3(float3 data, size_t offset, __local float *p);
-void __ovld vstore4(char4 data, size_t offset, __local char *p);
-void __ovld vstore4(uchar4 data, size_t offset, __local uchar *p);
-void __ovld vstore4(short4 data, size_t offset, __local short *p);
-void __ovld vstore4(ushort4 data, size_t offset, __local ushort *p);
-void __ovld vstore4(int4 data, size_t offset, __local int *p);
-void __ovld vstore4(uint4 data, size_t offset, __local uint *p);
-void __ovld vstore4(long4 data, size_t offset, __local long *p);
-void __ovld vstore4(ulong4 data, size_t offset, __local ulong *p);
-void __ovld vstore4(float4 data, size_t offset, __local float *p);
-void __ovld vstore8(char8 data, size_t offset, __local char *p);
-void __ovld vstore8(uchar8 data, size_t offset, __local uchar *p);
-void __ovld vstore8(short8 data, size_t offset, __local short *p);
-void __ovld vstore8(ushort8 data, size_t offset, __local ushort *p);
-void __ovld vstore8(int8 data, size_t offset, __local int *p);
-void __ovld vstore8(uint8 data, size_t offset, __local uint *p);
-void __ovld vstore8(long8 data, size_t offset, __local long *p);
-void __ovld vstore8(ulong8 data, size_t offset, __local ulong *p);
-void __ovld vstore8(float8 data, size_t offset, __local float *p);
-void __ovld vstore16(char16 data, size_t offset, __local char *p);
-void __ovld vstore16(uchar16 data, size_t offset, __local uchar *p);
-void __ovld vstore16(short16 data, size_t offset, __local short *p);
-void __ovld vstore16(ushort16 data, size_t offset, __local ushort *p);
-void __ovld vstore16(int16 data, size_t offset, __local int *p);
-void __ovld vstore16(uint16 data, size_t offset, __local uint *p);
-void __ovld vstore16(long16 data, size_t offset, __local long *p);
-void __ovld vstore16(ulong16 data, size_t offset, __local ulong *p);
-void __ovld vstore16(float16 data, size_t offset, __local float *p);
-void __ovld vstore2(char2 data, size_t offset, __private char *p);
-void __ovld vstore2(uchar2 data, size_t offset, __private uchar *p);
-void __ovld vstore2(short2 data, size_t offset, __private short *p);
-void __ovld vstore2(ushort2 data, size_t offset, __private ushort *p);
-void __ovld vstore2(int2 data, size_t offset, __private int *p);
-void __ovld vstore2(uint2 data, size_t offset, __private uint *p);
-void __ovld vstore2(long2 data, size_t offset, __private long *p);
-void __ovld vstore2(ulong2 data, size_t offset, __private ulong *p);
-void __ovld vstore2(float2 data, size_t offset, __private float *p);
-void __ovld vstore3(char3 data, size_t offset, __private char *p);
-void __ovld vstore3(uchar3 data, size_t offset, __private uchar *p);
-void __ovld vstore3(short3 data, size_t offset, __private short *p);
-void __ovld vstore3(ushort3 data, size_t offset, __private ushort *p);
-void __ovld vstore3(int3 data, size_t offset, __private int *p);
-void __ovld vstore3(uint3 data, size_t offset, __private uint *p);
-void __ovld vstore3(long3 data, size_t offset, __private long *p);
-void __ovld vstore3(ulong3 data, size_t offset, __private ulong *p);
-void __ovld vstore3(float3 data, size_t offset, __private float *p);
-void __ovld vstore4(char4 data, size_t offset, __private char *p);
-void __ovld vstore4(uchar4 data, size_t offset, __private uchar *p);
-void __ovld vstore4(short4 data, size_t offset, __private short *p);
-void __ovld vstore4(ushort4 data, size_t offset, __private ushort *p);
-void __ovld vstore4(int4 data, size_t offset, __private int *p);
-void __ovld vstore4(uint4 data, size_t offset, __private uint *p);
-void __ovld vstore4(long4 data, size_t offset, __private long *p);
-void __ovld vstore4(ulong4 data, size_t offset, __private ulong *p);
-void __ovld vstore4(float4 data, size_t offset, __private float *p);
-void __ovld vstore8(char8 data, size_t offset, __private char *p);
-void __ovld vstore8(uchar8 data, size_t offset, __private uchar *p);
-void __ovld vstore8(short8 data, size_t offset, __private short *p);
-void __ovld vstore8(ushort8 data, size_t offset, __private ushort *p);
-void __ovld vstore8(int8 data, size_t offset, __private int *p);
-void __ovld vstore8(uint8 data, size_t offset, __private uint *p);
-void __ovld vstore8(long8 data, size_t offset, __private long *p);
-void __ovld vstore8(ulong8 data, size_t offset, __private ulong *p);
-void __ovld vstore8(float8 data, size_t offset, __private float *p);
-void __ovld vstore16(char16 data, size_t offset, __private char *p);
-void __ovld vstore16(uchar16 data, size_t offset, __private uchar *p);
-void __ovld vstore16(short16 data, size_t offset, __private short *p);
-void __ovld vstore16(ushort16 data, size_t offset, __private ushort *p);
-void __ovld vstore16(int16 data, size_t offset, __private int *p);
-void __ovld vstore16(uint16 data, size_t offset, __private uint *p);
-void __ovld vstore16(long16 data, size_t offset, __private long *p);
-void __ovld vstore16(ulong16 data, size_t offset, __private ulong *p);
-void __ovld vstore16(float16 data, size_t offset, __private float *p);
+void __ovld vstore2(char2, size_t, __global char *);
+void __ovld vstore2(uchar2, size_t, __global uchar *);
+void __ovld vstore2(short2, size_t, __global short *);
+void __ovld vstore2(ushort2, size_t, __global ushort *);
+void __ovld vstore2(int2, size_t, __global int *);
+void __ovld vstore2(uint2, size_t, __global uint *);
+void __ovld vstore2(long2, size_t, __global long *);
+void __ovld vstore2(ulong2, size_t, __global ulong *);
+void __ovld vstore2(float2, size_t, __global float *);
+void __ovld vstore3(char3, size_t, __global char *);
+void __ovld vstore3(uchar3, size_t, __global uchar *);
+void __ovld vstore3(short3, size_t, __global short *);
+void __ovld vstore3(ushort3, size_t, __global ushort *);
+void __ovld vstore3(int3, size_t, __global int *);
+void __ovld vstore3(uint3, size_t, __global uint *);
+void __ovld vstore3(long3, size_t, __global long *);
+void __ovld vstore3(ulong3, size_t, __global ulong *);
+void __ovld vstore3(float3, size_t, __global float *);
+void __ovld vstore4(char4, size_t, __global char *);
+void __ovld vstore4(uchar4, size_t, __global uchar *);
+void __ovld vstore4(short4, size_t, __global short *);
+void __ovld vstore4(ushort4, size_t, __global ushort *);
+void __ovld vstore4(int4, size_t, __global int *);
+void __ovld vstore4(uint4, size_t, __global uint *);
+void __ovld vstore4(long4, size_t, __global long *);
+void __ovld vstore4(ulong4, size_t, __global ulong *);
+void __ovld vstore4(float4, size_t, __global float *);
+void __ovld vstore8(char8, size_t, __global char *);
+void __ovld vstore8(uchar8, size_t, __global uchar *);
+void __ovld vstore8(short8, size_t, __global short *);
+void __ovld vstore8(ushort8, size_t, __global ushort *);
+void __ovld vstore8(int8, size_t, __global int *);
+void __ovld vstore8(uint8, size_t, __global uint *);
+void __ovld vstore8(long8, size_t, __global long *);
+void __ovld vstore8(ulong8, size_t, __global ulong *);
+void __ovld vstore8(float8, size_t, __global float *);
+void __ovld vstore16(char16, size_t, __global char *);
+void __ovld vstore16(uchar16, size_t, __global uchar *);
+void __ovld vstore16(short16, size_t, __global short *);
+void __ovld vstore16(ushort16, size_t, __global ushort *);
+void __ovld vstore16(int16, size_t, __global int *);
+void __ovld vstore16(uint16, size_t, __global uint *);
+void __ovld vstore16(long16, size_t, __global long *);
+void __ovld vstore16(ulong16, size_t, __global ulong *);
+void __ovld vstore16(float16, size_t, __global float *);
+void __ovld vstore2(char2, size_t, __local char *);
+void __ovld vstore2(uchar2, size_t, __local uchar *);
+void __ovld vstore2(short2, size_t, __local short *);
+void __ovld vstore2(ushort2, size_t, __local ushort *);
+void __ovld vstore2(int2, size_t, __local int *);
+void __ovld vstore2(uint2, size_t, __local uint *);
+void __ovld vstore2(long2, size_t, __local long *);
+void __ovld vstore2(ulong2, size_t, __local ulong *);
+void __ovld vstore2(float2, size_t, __local float *);
+void __ovld vstore3(char3, size_t, __local char *);
+void __ovld vstore3(uchar3, size_t, __local uchar *);
+void __ovld vstore3(short3, size_t, __local short *);
+void __ovld vstore3(ushort3, size_t, __local ushort *);
+void __ovld vstore3(int3, size_t, __local int *);
+void __ovld vstore3(uint3, size_t, __local uint *);
+void __ovld vstore3(long3, size_t, __local long *);
+void __ovld vstore3(ulong3, size_t, __local ulong *);
+void __ovld vstore3(float3, size_t, __local float *);
+void __ovld vstore4(char4, size_t, __local char *);
+void __ovld vstore4(uchar4, size_t, __local uchar *);
+void __ovld vstore4(short4, size_t, __local short *);
+void __ovld vstore4(ushort4, size_t, __local ushort *);
+void __ovld vstore4(int4, size_t, __local int *);
+void __ovld vstore4(uint4, size_t, __local uint *);
+void __ovld vstore4(long4, size_t, __local long *);
+void __ovld vstore4(ulong4, size_t, __local ulong *);
+void __ovld vstore4(float4, size_t, __local float *);
+void __ovld vstore8(char8, size_t, __local char *);
+void __ovld vstore8(uchar8, size_t, __local uchar *);
+void __ovld vstore8(short8, size_t, __local short *);
+void __ovld vstore8(ushort8, size_t, __local ushort *);
+void __ovld vstore8(int8, size_t, __local int *);
+void __ovld vstore8(uint8, size_t, __local uint *);
+void __ovld vstore8(long8, size_t, __local long *);
+void __ovld vstore8(ulong8, size_t, __local ulong *);
+void __ovld vstore8(float8, size_t, __local float *);
+void __ovld vstore16(char16, size_t, __local char *);
+void __ovld vstore16(uchar16, size_t, __local uchar *);
+void __ovld vstore16(short16, size_t, __local short *);
+void __ovld vstore16(ushort16, size_t, __local ushort *);
+void __ovld vstore16(int16, size_t, __local int *);
+void __ovld vstore16(uint16, size_t, __local uint *);
+void __ovld vstore16(long16, size_t, __local long *);
+void __ovld vstore16(ulong16, size_t, __local ulong *);
+void __ovld vstore16(float16, size_t, __local float *);
+void __ovld vstore2(char2, size_t, __private char *);
+void __ovld vstore2(uchar2, size_t, __private uchar *);
+void __ovld vstore2(short2, size_t, __private short *);
+void __ovld vstore2(ushort2, size_t, __private ushort *);
+void __ovld vstore2(int2, size_t, __private int *);
+void __ovld vstore2(uint2, size_t, __private uint *);
+void __ovld vstore2(long2, size_t, __private long *);
+void __ovld vstore2(ulong2, size_t, __private ulong *);
+void __ovld vstore2(float2, size_t, __private float *);
+void __ovld vstore3(char3, size_t, __private char *);
+void __ovld vstore3(uchar3, size_t, __private uchar *);
+void __ovld vstore3(short3, size_t, __private short *);
+void __ovld vstore3(ushort3, size_t, __private ushort *);
+void __ovld vstore3(int3, size_t, __private int *);
+void __ovld vstore3(uint3, size_t, __private uint *);
+void __ovld vstore3(long3, size_t, __private long *);
+void __ovld vstore3(ulong3, size_t, __private ulong *);
+void __ovld vstore3(float3, size_t, __private float *);
+void __ovld vstore4(char4, size_t, __private char *);
+void __ovld vstore4(uchar4, size_t, __private uchar *);
+void __ovld vstore4(short4, size_t, __private short *);
+void __ovld vstore4(ushort4, size_t, __private ushort *);
+void __ovld vstore4(int4, size_t, __private int *);
+void __ovld vstore4(uint4, size_t, __private uint *);
+void __ovld vstore4(long4, size_t, __private long *);
+void __ovld vstore4(ulong4, size_t, __private ulong *);
+void __ovld vstore4(float4, size_t, __private float *);
+void __ovld vstore8(char8, size_t, __private char *);
+void __ovld vstore8(uchar8, size_t, __private uchar *);
+void __ovld vstore8(short8, size_t, __private short *);
+void __ovld vstore8(ushort8, size_t, __private ushort *);
+void __ovld vstore8(int8, size_t, __private int *);
+void __ovld vstore8(uint8, size_t, __private uint *);
+void __ovld vstore8(long8, size_t, __private long *);
+void __ovld vstore8(ulong8, size_t, __private ulong *);
+void __ovld vstore8(float8, size_t, __private float *);
+void __ovld vstore16(char16, size_t, __private char *);
+void __ovld vstore16(uchar16, size_t, __private uchar *);
+void __ovld vstore16(short16, size_t, __private short *);
+void __ovld vstore16(ushort16, size_t, __private ushort *);
+void __ovld vstore16(int16, size_t, __private int *);
+void __ovld vstore16(uint16, size_t, __private uint *);
+void __ovld vstore16(long16, size_t, __private long *);
+void __ovld vstore16(ulong16, size_t, __private ulong *);
+void __ovld vstore16(float16, size_t, __private float *);
#ifdef cl_khr_fp64
-void __ovld vstore2(double2 data, size_t offset, __global double *p);
-void __ovld vstore3(double3 data, size_t offset, __global double *p);
-void __ovld vstore4(double4 data, size_t offset, __global double *p);
-void __ovld vstore8(double8 data, size_t offset, __global double *p);
-void __ovld vstore16(double16 data, size_t offset, __global double *p);
-void __ovld vstore2(double2 data, size_t offset, __local double *p);
-void __ovld vstore3(double3 data, size_t offset, __local double *p);
-void __ovld vstore4(double4 data, size_t offset, __local double *p);
-void __ovld vstore8(double8 data, size_t offset, __local double *p);
-void __ovld vstore16(double16 data, size_t offset, __local double *p);
-void __ovld vstore2(double2 data, size_t offset, __private double *p);
-void __ovld vstore3(double3 data, size_t offset, __private double *p);
-void __ovld vstore4(double4 data, size_t offset, __private double *p);
-void __ovld vstore8(double8 data, size_t offset, __private double *p);
-void __ovld vstore16(double16 data, size_t offset, __private double *p);
+void __ovld vstore2(double2, size_t, __global double *);
+void __ovld vstore3(double3, size_t, __global double *);
+void __ovld vstore4(double4, size_t, __global double *);
+void __ovld vstore8(double8, size_t, __global double *);
+void __ovld vstore16(double16, size_t, __global double *);
+void __ovld vstore2(double2, size_t, __local double *);
+void __ovld vstore3(double3, size_t, __local double *);
+void __ovld vstore4(double4, size_t, __local double *);
+void __ovld vstore8(double8, size_t, __local double *);
+void __ovld vstore16(double16, size_t, __local double *);
+void __ovld vstore2(double2, size_t, __private double *);
+void __ovld vstore3(double3, size_t, __private double *);
+void __ovld vstore4(double4, size_t, __private double *);
+void __ovld vstore8(double8, size_t, __private double *);
+void __ovld vstore16(double16, size_t, __private double *);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-void __ovld vstore(half data, size_t offset, __global half *p);
-void __ovld vstore2(half2 data, size_t offset, __global half *p);
-void __ovld vstore3(half3 data, size_t offset, __global half *p);
-void __ovld vstore4(half4 data, size_t offset, __global half *p);
-void __ovld vstore8(half8 data, size_t offset, __global half *p);
-void __ovld vstore16(half16 data, size_t offset, __global half *p);
-void __ovld vstore(half data, size_t offset, __local half *p);
-void __ovld vstore2(half2 data, size_t offset, __local half *p);
-void __ovld vstore3(half3 data, size_t offset, __local half *p);
-void __ovld vstore4(half4 data, size_t offset, __local half *p);
-void __ovld vstore8(half8 data, size_t offset, __local half *p);
-void __ovld vstore16(half16 data, size_t offset, __local half *p);
-void __ovld vstore(half data, size_t offset, __private half *p);
-void __ovld vstore2(half2 data, size_t offset, __private half *p);
-void __ovld vstore3(half3 data, size_t offset, __private half *p);
-void __ovld vstore4(half4 data, size_t offset, __private half *p);
-void __ovld vstore8(half8 data, size_t offset, __private half *p);
-void __ovld vstore16(half16 data, size_t offset, __private half *p);
+void __ovld vstore2(half2, size_t, __global half *);
+void __ovld vstore3(half3, size_t, __global half *);
+void __ovld vstore4(half4, size_t, __global half *);
+void __ovld vstore8(half8, size_t, __global half *);
+void __ovld vstore16(half16, size_t, __global half *);
+void __ovld vstore2(half2, size_t, __local half *);
+void __ovld vstore3(half3, size_t, __local half *);
+void __ovld vstore4(half4, size_t, __local half *);
+void __ovld vstore8(half8, size_t, __local half *);
+void __ovld vstore16(half16, size_t, __local half *);
+void __ovld vstore2(half2, size_t, __private half *);
+void __ovld vstore3(half3, size_t, __private half *);
+void __ovld vstore4(half4, size_t, __private half *);
+void __ovld vstore8(half8, size_t, __private half *);
+void __ovld vstore16(half16, size_t, __private half *);
#endif //cl_khr_fp16
#endif //defined(__opencl_c_named_address_space_builtins)
@@ -11751,15 +11742,15 @@ void __ovld vstore16(half16 data, size_t offset, __private half *p);
* The read address computed as (p + offset)
* must be 16-bit aligned.
*/
-float __ovld __purefn vload_half(size_t offset, const __constant half *p);
+float __ovld __purefn vload_half(size_t, const __constant half *);
#if defined(__opencl_c_generic_address_space)
-float __ovld __purefn vload_half(size_t offset, const half *p);
+float __ovld __purefn vload_half(size_t, const half *);
#endif //defined(__opencl_c_generic_address_space)
#if defined(__opencl_c_named_address_space_builtins)
-float __ovld __purefn vload_half(size_t offset, const __global half *p);
-float __ovld __purefn vload_half(size_t offset, const __local half *p);
-float __ovld __purefn vload_half(size_t offset, const __private half *p);
+float __ovld __purefn vload_half(size_t, const __global half *);
+float __ovld __purefn vload_half(size_t, const __local half *);
+float __ovld __purefn vload_half(size_t, const __private half *);
#endif //defined(__opencl_c_named_address_space_builtins)
/**
@@ -11770,35 +11761,35 @@ float __ovld __purefn vload_half(size_t offset, const __private half *p);
* value is returned. The read address computed
* as (p + (offset * n)) must be 16-bit aligned.
*/
-float2 __ovld __purefn vload_half2(size_t offset, const __constant half *p);
-float3 __ovld __purefn vload_half3(size_t offset, const __constant half *p);
-float4 __ovld __purefn vload_half4(size_t offset, const __constant half *p);
-float8 __ovld __purefn vload_half8(size_t offset, const __constant half *p);
-float16 __ovld __purefn vload_half16(size_t offset, const __constant half *p);
+float2 __ovld __purefn vload_half2(size_t, const __constant half *);
+float3 __ovld __purefn vload_half3(size_t, const __constant half *);
+float4 __ovld __purefn vload_half4(size_t, const __constant half *);
+float8 __ovld __purefn vload_half8(size_t, const __constant half *);
+float16 __ovld __purefn vload_half16(size_t, const __constant half *);
#if defined(__opencl_c_generic_address_space)
-float2 __ovld __purefn vload_half2(size_t offset, const half *p);
-float3 __ovld __purefn vload_half3(size_t offset, const half *p);
-float4 __ovld __purefn vload_half4(size_t offset, const half *p);
-float8 __ovld __purefn vload_half8(size_t offset, const half *p);
-float16 __ovld __purefn vload_half16(size_t offset, const half *p);
+float2 __ovld __purefn vload_half2(size_t, const half *);
+float3 __ovld __purefn vload_half3(size_t, const half *);
+float4 __ovld __purefn vload_half4(size_t, const half *);
+float8 __ovld __purefn vload_half8(size_t, const half *);
+float16 __ovld __purefn vload_half16(size_t, const half *);
#endif //defined(__opencl_c_generic_address_space)
#if defined(__opencl_c_named_address_space_builtins)
-float2 __ovld __purefn vload_half2(size_t offset, const __global half *p);
-float3 __ovld __purefn vload_half3(size_t offset, const __global half *p);
-float4 __ovld __purefn vload_half4(size_t offset, const __global half *p);
-float8 __ovld __purefn vload_half8(size_t offset, const __global half *p);
-float16 __ovld __purefn vload_half16(size_t offset, const __global half *p);
-float2 __ovld __purefn vload_half2(size_t offset, const __local half *p);
-float3 __ovld __purefn vload_half3(size_t offset, const __local half *p);
-float4 __ovld __purefn vload_half4(size_t offset, const __local half *p);
-float8 __ovld __purefn vload_half8(size_t offset, const __local half *p);
-float16 __ovld __purefn vload_half16(size_t offset, const __local half *p);
-float2 __ovld __purefn vload_half2(size_t offset, const __private half *p);
-float3 __ovld __purefn vload_half3(size_t offset, const __private half *p);
-float4 __ovld __purefn vload_half4(size_t offset, const __private half *p);
-float8 __ovld __purefn vload_half8(size_t offset, const __private half *p);
-float16 __ovld __purefn vload_half16(size_t offset, const __private half *p);
+float2 __ovld __purefn vload_half2(size_t, const __global half *);
+float3 __ovld __purefn vload_half3(size_t, const __global half *);
+float4 __ovld __purefn vload_half4(size_t, const __global half *);
+float8 __ovld __purefn vload_half8(size_t, const __global half *);
+float16 __ovld __purefn vload_half16(size_t, const __global half *);
+float2 __ovld __purefn vload_half2(size_t, const __local half *);
+float3 __ovld __purefn vload_half3(size_t, const __local half *);
+float4 __ovld __purefn vload_half4(size_t, const __local half *);
+float8 __ovld __purefn vload_half8(size_t, const __local half *);
+float16 __ovld __purefn vload_half16(size_t, const __local half *);
+float2 __ovld __purefn vload_half2(size_t, const __private half *);
+float3 __ovld __purefn vload_half3(size_t, const __private half *);
+float4 __ovld __purefn vload_half4(size_t, const __private half *);
+float8 __ovld __purefn vload_half8(size_t, const __private half *);
+float16 __ovld __purefn vload_half16(size_t, const __private half *);
#endif //defined(__opencl_c_named_address_space_builtins)
/**
@@ -11813,52 +11804,52 @@ float16 __ovld __purefn vload_half16(size_t offset, const __private half *p);
* nearest even.
*/
#if defined(__opencl_c_generic_address_space)
-void __ovld vstore_half(float data, size_t offset, half *p);
-void __ovld vstore_half_rte(float data, size_t offset, half *p);
-void __ovld vstore_half_rtz(float data, size_t offset, half *p);
-void __ovld vstore_half_rtp(float data, size_t offset, half *p);
-void __ovld vstore_half_rtn(float data, size_t offset, half *p);
+void __ovld vstore_half(float, size_t, half *);
+void __ovld vstore_half_rte(float, size_t, half *);
+void __ovld vstore_half_rtz(float, size_t, half *);
+void __ovld vstore_half_rtp(float, size_t, half *);
+void __ovld vstore_half_rtn(float, size_t, half *);
#ifdef cl_khr_fp64
-void __ovld vstore_half(double data, size_t offset, half *p);
-void __ovld vstore_half_rte(double data, size_t offset, half *p);
-void __ovld vstore_half_rtz(double data, size_t offset, half *p);
-void __ovld vstore_half_rtp(double data, size_t offset, half *p);
-void __ovld vstore_half_rtn(double data, size_t offset, half *p);
+void __ovld vstore_half(double, size_t, half *);
+void __ovld vstore_half_rte(double, size_t, half *);
+void __ovld vstore_half_rtz(double, size_t, half *);
+void __ovld vstore_half_rtp(double, size_t, half *);
+void __ovld vstore_half_rtn(double, size_t, half *);
#endif //cl_khr_fp64
#endif //defined(__opencl_c_generic_address_space)
#if defined(__opencl_c_named_address_space_builtins)
-void __ovld vstore_half(float data, size_t offset, __global half *p);
-void __ovld vstore_half_rte(float data, size_t offset, __global half *p);
-void __ovld vstore_half_rtz(float data, size_t offset, __global half *p);
-void __ovld vstore_half_rtp(float data, size_t offset, __global half *p);
-void __ovld vstore_half_rtn(float data, size_t offset, __global half *p);
-void __ovld vstore_half(float data, size_t offset, __local half *p);
-void __ovld vstore_half_rte(float data, size_t offset, __local half *p);
-void __ovld vstore_half_rtz(float data, size_t offset, __local half *p);
-void __ovld vstore_half_rtp(float data, size_t offset, __local half *p);
-void __ovld vstore_half_rtn(float data, size_t offset, __local half *p);
-void __ovld vstore_half(float data, size_t offset, __private half *p);
-void __ovld vstore_half_rte(float data, size_t offset, __private half *p);
-void __ovld vstore_half_rtz(float data, size_t offset, __private half *p);
-void __ovld vstore_half_rtp(float data, size_t offset, __private half *p);
-void __ovld vstore_half_rtn(float data, size_t offset, __private half *p);
+void __ovld vstore_half(float, size_t, __global half *);
+void __ovld vstore_half_rte(float, size_t, __global half *);
+void __ovld vstore_half_rtz(float, size_t, __global half *);
+void __ovld vstore_half_rtp(float, size_t, __global half *);
+void __ovld vstore_half_rtn(float, size_t, __global half *);
+void __ovld vstore_half(float, size_t, __local half *);
+void __ovld vstore_half_rte(float, size_t, __local half *);
+void __ovld vstore_half_rtz(float, size_t, __local half *);
+void __ovld vstore_half_rtp(float, size_t, __local half *);
+void __ovld vstore_half_rtn(float, size_t, __local half *);
+void __ovld vstore_half(float, size_t, __private half *);
+void __ovld vstore_half_rte(float, size_t, __private half *);
+void __ovld vstore_half_rtz(float, size_t, __private half *);
+void __ovld vstore_half_rtp(float, size_t, __private half *);
+void __ovld vstore_half_rtn(float, size_t, __private half *);
#ifdef cl_khr_fp64
-void __ovld vstore_half(double data, size_t offset, __global half *p);
-void __ovld vstore_half_rte(double data, size_t offset, __global half *p);
-void __ovld vstore_half_rtz(double data, size_t offset, __global half *p);
-void __ovld vstore_half_rtp(double data, size_t offset, __global half *p);
-void __ovld vstore_half_rtn(double data, size_t offset, __global half *p);
-void __ovld vstore_half(double data, size_t offset, __local half *p);
-void __ovld vstore_half_rte(double data, size_t offset, __local half *p);
-void __ovld vstore_half_rtz(double data, size_t offset, __local half *p);
-void __ovld vstore_half_rtp(double data, size_t offset, __local half *p);
-void __ovld vstore_half_rtn(double data, size_t offset, __local half *p);
-void __ovld vstore_half(double data, size_t offset, __private half *p);
-void __ovld vstore_half_rte(double data, size_t offset, __private half *p);
-void __ovld vstore_half_rtz(double data, size_t offset, __private half *p);
-void __ovld vstore_half_rtp(double data, size_t offset, __private half *p);
-void __ovld vstore_half_rtn(double data, size_t offset, __private half *p);
+void __ovld vstore_half(double, size_t, __global half *);
+void __ovld vstore_half_rte(double, size_t, __global half *);
+void __ovld vstore_half_rtz(double, size_t, __global half *);
+void __ovld vstore_half_rtp(double, size_t, __global half *);
+void __ovld vstore_half_rtn(double, size_t, __global half *);
+void __ovld vstore_half(double, size_t, __local half *);
+void __ovld vstore_half_rte(double, size_t, __local half *);
+void __ovld vstore_half_rtz(double, size_t, __local half *);
+void __ovld vstore_half_rtp(double, size_t, __local half *);
+void __ovld vstore_half_rtn(double, size_t, __local half *);
+void __ovld vstore_half(double, size_t, __private half *);
+void __ovld vstore_half_rte(double, size_t, __private half *);
+void __ovld vstore_half_rtz(double, size_t, __private half *);
+void __ovld vstore_half_rtp(double, size_t, __private half *);
+void __ovld vstore_half_rtn(double, size_t, __private half *);
#endif //cl_khr_fp64
#endif //defined(__opencl_c_named_address_space_builtins)
@@ -11874,212 +11865,212 @@ void __ovld vstore_half_rtn(double data, size_t offset, __private half *p);
* nearest even.
*/
#if defined(__opencl_c_generic_address_space)
-void __ovld vstore_half2(float2 data, size_t offset, half *p);
-void __ovld vstore_half3(float3 data, size_t offset, half *p);
-void __ovld vstore_half4(float4 data, size_t offset, half *p);
-void __ovld vstore_half8(float8 data, size_t offset, half *p);
-void __ovld vstore_half16(float16 data, size_t offset, half *p);
-void __ovld vstore_half2_rte(float2 data, size_t offset, half *p);
-void __ovld vstore_half3_rte(float3 data, size_t offset, half *p);
-void __ovld vstore_half4_rte(float4 data, size_t offset, half *p);
-void __ovld vstore_half8_rte(float8 data, size_t offset, half *p);
-void __ovld vstore_half16_rte(float16 data, size_t offset, half *p);
-void __ovld vstore_half2_rtz(float2 data, size_t offset, half *p);
-void __ovld vstore_half3_rtz(float3 data, size_t offset, half *p);
-void __ovld vstore_half4_rtz(float4 data, size_t offset, half *p);
-void __ovld vstore_half8_rtz(float8 data, size_t offset, half *p);
-void __ovld vstore_half16_rtz(float16 data, size_t offset, half *p);
-void __ovld vstore_half2_rtp(float2 data, size_t offset, half *p);
-void __ovld vstore_half3_rtp(float3 data, size_t offset, half *p);
-void __ovld vstore_half4_rtp(float4 data, size_t offset, half *p);
-void __ovld vstore_half8_rtp(float8 data, size_t offset, half *p);
-void __ovld vstore_half16_rtp(float16 data, size_t offset, half *p);
-void __ovld vstore_half2_rtn(float2 data, size_t offset, half *p);
-void __ovld vstore_half3_rtn(float3 data, size_t offset, half *p);
-void __ovld vstore_half4_rtn(float4 data, size_t offset, half *p);
-void __ovld vstore_half8_rtn(float8 data, size_t offset, half *p);
-void __ovld vstore_half16_rtn(float16 data, size_t offset, half *p);
+void __ovld vstore_half2(float2, size_t, half *);
+void __ovld vstore_half3(float3, size_t, half *);
+void __ovld vstore_half4(float4, size_t, half *);
+void __ovld vstore_half8(float8, size_t, half *);
+void __ovld vstore_half16(float16, size_t, half *);
+void __ovld vstore_half2_rte(float2, size_t, half *);
+void __ovld vstore_half3_rte(float3, size_t, half *);
+void __ovld vstore_half4_rte(float4, size_t, half *);
+void __ovld vstore_half8_rte(float8, size_t, half *);
+void __ovld vstore_half16_rte(float16, size_t, half *);
+void __ovld vstore_half2_rtz(float2, size_t, half *);
+void __ovld vstore_half3_rtz(float3, size_t, half *);
+void __ovld vstore_half4_rtz(float4, size_t, half *);
+void __ovld vstore_half8_rtz(float8, size_t, half *);
+void __ovld vstore_half16_rtz(float16, size_t, half *);
+void __ovld vstore_half2_rtp(float2, size_t, half *);
+void __ovld vstore_half3_rtp(float3, size_t, half *);
+void __ovld vstore_half4_rtp(float4, size_t, half *);
+void __ovld vstore_half8_rtp(float8, size_t, half *);
+void __ovld vstore_half16_rtp(float16, size_t, half *);
+void __ovld vstore_half2_rtn(float2, size_t, half *);
+void __ovld vstore_half3_rtn(float3, size_t, half *);
+void __ovld vstore_half4_rtn(float4, size_t, half *);
+void __ovld vstore_half8_rtn(float8, size_t, half *);
+void __ovld vstore_half16_rtn(float16, size_t, half *);
#ifdef cl_khr_fp64
-void __ovld vstore_half2(double2 data, size_t offset, half *p);
-void __ovld vstore_half3(double3 data, size_t offset, half *p);
-void __ovld vstore_half4(double4 data, size_t offset, half *p);
-void __ovld vstore_half8(double8 data, size_t offset, half *p);
-void __ovld vstore_half16(double16 data, size_t offset, half *p);
-void __ovld vstore_half2_rte(double2 data, size_t offset, half *p);
-void __ovld vstore_half3_rte(double3 data, size_t offset, half *p);
-void __ovld vstore_half4_rte(double4 data, size_t offset, half *p);
-void __ovld vstore_half8_rte(double8 data, size_t offset, half *p);
-void __ovld vstore_half16_rte(double16 data, size_t offset, half *p);
-void __ovld vstore_half2_rtz(double2 data, size_t offset, half *p);
-void __ovld vstore_half3_rtz(double3 data, size_t offset, half *p);
-void __ovld vstore_half4_rtz(double4 data, size_t offset, half *p);
-void __ovld vstore_half8_rtz(double8 data, size_t offset, half *p);
-void __ovld vstore_half16_rtz(double16 data, size_t offset, half *p);
-void __ovld vstore_half2_rtp(double2 data, size_t offset, half *p);
-void __ovld vstore_half3_rtp(double3 data, size_t offset, half *p);
-void __ovld vstore_half4_rtp(double4 data, size_t offset, half *p);
-void __ovld vstore_half8_rtp(double8 data, size_t offset, half *p);
-void __ovld vstore_half16_rtp(double16 data, size_t offset, half *p);
-void __ovld vstore_half2_rtn(double2 data, size_t offset, half *p);
-void __ovld vstore_half3_rtn(double3 data, size_t offset, half *p);
-void __ovld vstore_half4_rtn(double4 data, size_t offset, half *p);
-void __ovld vstore_half8_rtn(double8 data, size_t offset, half *p);
-void __ovld vstore_half16_rtn(double16 data, size_t offset, half *p);
+void __ovld vstore_half2(double2, size_t, half *);
+void __ovld vstore_half3(double3, size_t, half *);
+void __ovld vstore_half4(double4, size_t, half *);
+void __ovld vstore_half8(double8, size_t, half *);
+void __ovld vstore_half16(double16, size_t, half *);
+void __ovld vstore_half2_rte(double2, size_t, half *);
+void __ovld vstore_half3_rte(double3, size_t, half *);
+void __ovld vstore_half4_rte(double4, size_t, half *);
+void __ovld vstore_half8_rte(double8, size_t, half *);
+void __ovld vstore_half16_rte(double16, size_t, half *);
+void __ovld vstore_half2_rtz(double2, size_t, half *);
+void __ovld vstore_half3_rtz(double3, size_t, half *);
+void __ovld vstore_half4_rtz(double4, size_t, half *);
+void __ovld vstore_half8_rtz(double8, size_t, half *);
+void __ovld vstore_half16_rtz(double16, size_t, half *);
+void __ovld vstore_half2_rtp(double2, size_t, half *);
+void __ovld vstore_half3_rtp(double3, size_t, half *);
+void __ovld vstore_half4_rtp(double4, size_t, half *);
+void __ovld vstore_half8_rtp(double8, size_t, half *);
+void __ovld vstore_half16_rtp(double16, size_t, half *);
+void __ovld vstore_half2_rtn(double2, size_t, half *);
+void __ovld vstore_half3_rtn(double3, size_t, half *);
+void __ovld vstore_half4_rtn(double4, size_t, half *);
+void __ovld vstore_half8_rtn(double8, size_t, half *);
+void __ovld vstore_half16_rtn(double16, size_t, half *);
#endif //cl_khr_fp64
#endif //defined(__opencl_c_generic_address_space)
#if defined(__opencl_c_named_address_space_builtins)
-void __ovld vstore_half2(float2 data, size_t offset, __global half *p);
-void __ovld vstore_half3(float3 data, size_t offset, __global half *p);
-void __ovld vstore_half4(float4 data, size_t offset, __global half *p);
-void __ovld vstore_half8(float8 data, size_t offset, __global half *p);
-void __ovld vstore_half16(float16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rte(float2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rte(float3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rte(float4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rte(float8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rte(float16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rtz(float2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rtz(float3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rtz(float4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rtz(float8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rtz(float16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rtp(float2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rtp(float3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rtp(float4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rtp(float8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rtp(float16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rtn(float2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rtn(float3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rtn(float4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rtn(float8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rtn(float16 data, size_t offset, __global half *p);
-void __ovld vstore_half2(float2 data, size_t offset, __local half *p);
-void __ovld vstore_half3(float3 data, size_t offset, __local half *p);
-void __ovld vstore_half4(float4 data, size_t offset, __local half *p);
-void __ovld vstore_half8(float8 data, size_t offset, __local half *p);
-void __ovld vstore_half16(float16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rte(float2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rte(float3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rte(float4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rte(float8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rte(float16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rtz(float2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rtz(float3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rtz(float4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rtz(float8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rtz(float16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rtp(float2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rtp(float3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rtp(float4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rtp(float8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rtp(float16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rtn(float2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rtn(float3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rtn(float4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rtn(float8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rtn(float16 data, size_t offset, __local half *p);
-void __ovld vstore_half2(float2 data, size_t offset, __private half *p);
-void __ovld vstore_half3(float3 data, size_t offset, __private half *p);
-void __ovld vstore_half4(float4 data, size_t offset, __private half *p);
-void __ovld vstore_half8(float8 data, size_t offset, __private half *p);
-void __ovld vstore_half16(float16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rte(float2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rte(float3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rte(float4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rte(float8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rte(float16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rtz(float2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rtz(float3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rtz(float4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rtz(float8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rtz(float16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rtp(float2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rtp(float3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rtp(float4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rtp(float8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rtp(float16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rtn(float2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rtn(float3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rtn(float4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rtn(float8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rtn(float16 data, size_t offset, __private half *p);
+void __ovld vstore_half2(float2, size_t, __global half *);
+void __ovld vstore_half3(float3, size_t, __global half *);
+void __ovld vstore_half4(float4, size_t, __global half *);
+void __ovld vstore_half8(float8, size_t, __global half *);
+void __ovld vstore_half16(float16, size_t, __global half *);
+void __ovld vstore_half2_rte(float2, size_t, __global half *);
+void __ovld vstore_half3_rte(float3, size_t, __global half *);
+void __ovld vstore_half4_rte(float4, size_t, __global half *);
+void __ovld vstore_half8_rte(float8, size_t, __global half *);
+void __ovld vstore_half16_rte(float16, size_t, __global half *);
+void __ovld vstore_half2_rtz(float2, size_t, __global half *);
+void __ovld vstore_half3_rtz(float3, size_t, __global half *);
+void __ovld vstore_half4_rtz(float4, size_t, __global half *);
+void __ovld vstore_half8_rtz(float8, size_t, __global half *);
+void __ovld vstore_half16_rtz(float16, size_t, __global half *);
+void __ovld vstore_half2_rtp(float2, size_t, __global half *);
+void __ovld vstore_half3_rtp(float3, size_t, __global half *);
+void __ovld vstore_half4_rtp(float4, size_t, __global half *);
+void __ovld vstore_half8_rtp(float8, size_t, __global half *);
+void __ovld vstore_half16_rtp(float16, size_t, __global half *);
+void __ovld vstore_half2_rtn(float2, size_t, __global half *);
+void __ovld vstore_half3_rtn(float3, size_t, __global half *);
+void __ovld vstore_half4_rtn(float4, size_t, __global half *);
+void __ovld vstore_half8_rtn(float8, size_t, __global half *);
+void __ovld vstore_half16_rtn(float16, size_t, __global half *);
+void __ovld vstore_half2(float2, size_t, __local half *);
+void __ovld vstore_half3(float3, size_t, __local half *);
+void __ovld vstore_half4(float4, size_t, __local half *);
+void __ovld vstore_half8(float8, size_t, __local half *);
+void __ovld vstore_half16(float16, size_t, __local half *);
+void __ovld vstore_half2_rte(float2, size_t, __local half *);
+void __ovld vstore_half3_rte(float3, size_t, __local half *);
+void __ovld vstore_half4_rte(float4, size_t, __local half *);
+void __ovld vstore_half8_rte(float8, size_t, __local half *);
+void __ovld vstore_half16_rte(float16, size_t, __local half *);
+void __ovld vstore_half2_rtz(float2, size_t, __local half *);
+void __ovld vstore_half3_rtz(float3, size_t, __local half *);
+void __ovld vstore_half4_rtz(float4, size_t, __local half *);
+void __ovld vstore_half8_rtz(float8, size_t, __local half *);
+void __ovld vstore_half16_rtz(float16, size_t, __local half *);
+void __ovld vstore_half2_rtp(float2, size_t, __local half *);
+void __ovld vstore_half3_rtp(float3, size_t, __local half *);
+void __ovld vstore_half4_rtp(float4, size_t, __local half *);
+void __ovld vstore_half8_rtp(float8, size_t, __local half *);
+void __ovld vstore_half16_rtp(float16, size_t, __local half *);
+void __ovld vstore_half2_rtn(float2, size_t, __local half *);
+void __ovld vstore_half3_rtn(float3, size_t, __local half *);
+void __ovld vstore_half4_rtn(float4, size_t, __local half *);
+void __ovld vstore_half8_rtn(float8, size_t, __local half *);
+void __ovld vstore_half16_rtn(float16, size_t, __local half *);
+void __ovld vstore_half2(float2, size_t, __private half *);
+void __ovld vstore_half3(float3, size_t, __private half *);
+void __ovld vstore_half4(float4, size_t, __private half *);
+void __ovld vstore_half8(float8, size_t, __private half *);
+void __ovld vstore_half16(float16, size_t, __private half *);
+void __ovld vstore_half2_rte(float2, size_t, __private half *);
+void __ovld vstore_half3_rte(float3, size_t, __private half *);
+void __ovld vstore_half4_rte(float4, size_t, __private half *);
+void __ovld vstore_half8_rte(float8, size_t, __private half *);
+void __ovld vstore_half16_rte(float16, size_t, __private half *);
+void __ovld vstore_half2_rtz(float2, size_t, __private half *);
+void __ovld vstore_half3_rtz(float3, size_t, __private half *);
+void __ovld vstore_half4_rtz(float4, size_t, __private half *);
+void __ovld vstore_half8_rtz(float8, size_t, __private half *);
+void __ovld vstore_half16_rtz(float16, size_t, __private half *);
+void __ovld vstore_half2_rtp(float2, size_t, __private half *);
+void __ovld vstore_half3_rtp(float3, size_t, __private half *);
+void __ovld vstore_half4_rtp(float4, size_t, __private half *);
+void __ovld vstore_half8_rtp(float8, size_t, __private half *);
+void __ovld vstore_half16_rtp(float16, size_t, __private half *);
+void __ovld vstore_half2_rtn(float2, size_t, __private half *);
+void __ovld vstore_half3_rtn(float3, size_t, __private half *);
+void __ovld vstore_half4_rtn(float4, size_t, __private half *);
+void __ovld vstore_half8_rtn(float8, size_t, __private half *);
+void __ovld vstore_half16_rtn(float16, size_t, __private half *);
#ifdef cl_khr_fp64
-void __ovld vstore_half2(double2 data, size_t offset, __global half *p);
-void __ovld vstore_half3(double3 data, size_t offset, __global half *p);
-void __ovld vstore_half4(double4 data, size_t offset, __global half *p);
-void __ovld vstore_half8(double8 data, size_t offset, __global half *p);
-void __ovld vstore_half16(double16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rte(double2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rte(double3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rte(double4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rte(double8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rte(double16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rtz(double2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rtz(double3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rtz(double4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rtz(double8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rtz(double16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rtp(double2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rtp(double3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rtp(double4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rtp(double8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rtp(double16 data, size_t offset, __global half *p);
-void __ovld vstore_half2_rtn(double2 data, size_t offset, __global half *p);
-void __ovld vstore_half3_rtn(double3 data, size_t offset, __global half *p);
-void __ovld vstore_half4_rtn(double4 data, size_t offset, __global half *p);
-void __ovld vstore_half8_rtn(double8 data, size_t offset, __global half *p);
-void __ovld vstore_half16_rtn(double16 data, size_t offset, __global half *p);
-void __ovld vstore_half2(double2 data, size_t offset, __local half *p);
-void __ovld vstore_half3(double3 data, size_t offset, __local half *p);
-void __ovld vstore_half4(double4 data, size_t offset, __local half *p);
-void __ovld vstore_half8(double8 data, size_t offset, __local half *p);
-void __ovld vstore_half16(double16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rte(double2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rte(double3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rte(double4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rte(double8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rte(double16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rtz(double2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rtz(double3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rtz(double4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rtz(double8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rtz(double16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rtp(double2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rtp(double3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rtp(double4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rtp(double8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rtp(double16 data, size_t offset, __local half *p);
-void __ovld vstore_half2_rtn(double2 data, size_t offset, __local half *p);
-void __ovld vstore_half3_rtn(double3 data, size_t offset, __local half *p);
-void __ovld vstore_half4_rtn(double4 data, size_t offset, __local half *p);
-void __ovld vstore_half8_rtn(double8 data, size_t offset, __local half *p);
-void __ovld vstore_half16_rtn(double16 data, size_t offset, __local half *p);
-void __ovld vstore_half2(double2 data, size_t offset, __private half *p);
-void __ovld vstore_half3(double3 data, size_t offset, __private half *p);
-void __ovld vstore_half4(double4 data, size_t offset, __private half *p);
-void __ovld vstore_half8(double8 data, size_t offset, __private half *p);
-void __ovld vstore_half16(double16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rte(double2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rte(double3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rte(double4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rte(double8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rte(double16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rtz(double2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rtz(double3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rtz(double4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rtz(double8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rtz(double16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rtp(double2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rtp(double3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rtp(double4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rtp(double8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rtp(double16 data, size_t offset, __private half *p);
-void __ovld vstore_half2_rtn(double2 data, size_t offset, __private half *p);
-void __ovld vstore_half3_rtn(double3 data, size_t offset, __private half *p);
-void __ovld vstore_half4_rtn(double4 data, size_t offset, __private half *p);
-void __ovld vstore_half8_rtn(double8 data, size_t offset, __private half *p);
-void __ovld vstore_half16_rtn(double16 data, size_t offset, __private half *p);
+void __ovld vstore_half2(double2, size_t, __global half *);
+void __ovld vstore_half3(double3, size_t, __global half *);
+void __ovld vstore_half4(double4, size_t, __global half *);
+void __ovld vstore_half8(double8, size_t, __global half *);
+void __ovld vstore_half16(double16, size_t, __global half *);
+void __ovld vstore_half2_rte(double2, size_t, __global half *);
+void __ovld vstore_half3_rte(double3, size_t, __global half *);
+void __ovld vstore_half4_rte(double4, size_t, __global half *);
+void __ovld vstore_half8_rte(double8, size_t, __global half *);
+void __ovld vstore_half16_rte(double16, size_t, __global half *);
+void __ovld vstore_half2_rtz(double2, size_t, __global half *);
+void __ovld vstore_half3_rtz(double3, size_t, __global half *);
+void __ovld vstore_half4_rtz(double4, size_t, __global half *);
+void __ovld vstore_half8_rtz(double8, size_t, __global half *);
+void __ovld vstore_half16_rtz(double16, size_t, __global half *);
+void __ovld vstore_half2_rtp(double2, size_t, __global half *);
+void __ovld vstore_half3_rtp(double3, size_t, __global half *);
+void __ovld vstore_half4_rtp(double4, size_t, __global half *);
+void __ovld vstore_half8_rtp(double8, size_t, __global half *);
+void __ovld vstore_half16_rtp(double16, size_t, __global half *);
+void __ovld vstore_half2_rtn(double2, size_t, __global half *);
+void __ovld vstore_half3_rtn(double3, size_t, __global half *);
+void __ovld vstore_half4_rtn(double4, size_t, __global half *);
+void __ovld vstore_half8_rtn(double8, size_t, __global half *);
+void __ovld vstore_half16_rtn(double16, size_t, __global half *);
+void __ovld vstore_half2(double2, size_t, __local half *);
+void __ovld vstore_half3(double3, size_t, __local half *);
+void __ovld vstore_half4(double4, size_t, __local half *);
+void __ovld vstore_half8(double8, size_t, __local half *);
+void __ovld vstore_half16(double16, size_t, __local half *);
+void __ovld vstore_half2_rte(double2, size_t, __local half *);
+void __ovld vstore_half3_rte(double3, size_t, __local half *);
+void __ovld vstore_half4_rte(double4, size_t, __local half *);
+void __ovld vstore_half8_rte(double8, size_t, __local half *);
+void __ovld vstore_half16_rte(double16, size_t, __local half *);
+void __ovld vstore_half2_rtz(double2, size_t, __local half *);
+void __ovld vstore_half3_rtz(double3, size_t, __local half *);
+void __ovld vstore_half4_rtz(double4, size_t, __local half *);
+void __ovld vstore_half8_rtz(double8, size_t, __local half *);
+void __ovld vstore_half16_rtz(double16, size_t, __local half *);
+void __ovld vstore_half2_rtp(double2, size_t, __local half *);
+void __ovld vstore_half3_rtp(double3, size_t, __local half *);
+void __ovld vstore_half4_rtp(double4, size_t, __local half *);
+void __ovld vstore_half8_rtp(double8, size_t, __local half *);
+void __ovld vstore_half16_rtp(double16, size_t, __local half *);
+void __ovld vstore_half2_rtn(double2, size_t, __local half *);
+void __ovld vstore_half3_rtn(double3, size_t, __local half *);
+void __ovld vstore_half4_rtn(double4, size_t, __local half *);
+void __ovld vstore_half8_rtn(double8, size_t, __local half *);
+void __ovld vstore_half16_rtn(double16, size_t, __local half *);
+void __ovld vstore_half2(double2, size_t, __private half *);
+void __ovld vstore_half3(double3, size_t, __private half *);
+void __ovld vstore_half4(double4, size_t, __private half *);
+void __ovld vstore_half8(double8, size_t, __private half *);
+void __ovld vstore_half16(double16, size_t, __private half *);
+void __ovld vstore_half2_rte(double2, size_t, __private half *);
+void __ovld vstore_half3_rte(double3, size_t, __private half *);
+void __ovld vstore_half4_rte(double4, size_t, __private half *);
+void __ovld vstore_half8_rte(double8, size_t, __private half *);
+void __ovld vstore_half16_rte(double16, size_t, __private half *);
+void __ovld vstore_half2_rtz(double2, size_t, __private half *);
+void __ovld vstore_half3_rtz(double3, size_t, __private half *);
+void __ovld vstore_half4_rtz(double4, size_t, __private half *);
+void __ovld vstore_half8_rtz(double8, size_t, __private half *);
+void __ovld vstore_half16_rtz(double16, size_t, __private half *);
+void __ovld vstore_half2_rtp(double2, size_t, __private half *);
+void __ovld vstore_half3_rtp(double3, size_t, __private half *);
+void __ovld vstore_half4_rtp(double4, size_t, __private half *);
+void __ovld vstore_half8_rtp(double8, size_t, __private half *);
+void __ovld vstore_half16_rtp(double16, size_t, __private half *);
+void __ovld vstore_half2_rtn(double2, size_t, __private half *);
+void __ovld vstore_half3_rtn(double3, size_t, __private half *);
+void __ovld vstore_half4_rtn(double4, size_t, __private half *);
+void __ovld vstore_half8_rtn(double8, size_t, __private half *);
+void __ovld vstore_half16_rtn(double16, size_t, __private half *);
#endif //cl_khr_fp64
#endif //defined(__opencl_c_named_address_space_builtins)
@@ -12096,35 +12087,35 @@ void __ovld vstore_half16_rtn(double16 data, size_t offset, __private half *p);
* The address computed as (p + (offset * 4))
* must be aligned to sizeof (half) * 4 bytes.
*/
-float2 __ovld __purefn vloada_half2(size_t offset, const __constant half *p);
-float3 __ovld __purefn vloada_half3(size_t offset, const __constant half *p);
-float4 __ovld __purefn vloada_half4(size_t offset, const __constant half *p);
-float8 __ovld __purefn vloada_half8(size_t offset, const __constant half *p);
-float16 __ovld __purefn vloada_half16(size_t offset, const __constant half *p);
+float2 __ovld __purefn vloada_half2(size_t, const __constant half *);
+float3 __ovld __purefn vloada_half3(size_t, const __constant half *);
+float4 __ovld __purefn vloada_half4(size_t, const __constant half *);
+float8 __ovld __purefn vloada_half8(size_t, const __constant half *);
+float16 __ovld __purefn vloada_half16(size_t, const __constant half *);
#if defined(__opencl_c_generic_address_space)
-float2 __ovld __purefn vloada_half2(size_t offset, const half *p);
-float3 __ovld __purefn vloada_half3(size_t offset, const half *p);
-float4 __ovld __purefn vloada_half4(size_t offset, const half *p);
-float8 __ovld __purefn vloada_half8(size_t offset, const half *p);
-float16 __ovld __purefn vloada_half16(size_t offset, const half *p);
+float2 __ovld __purefn vloada_half2(size_t, const half *);
+float3 __ovld __purefn vloada_half3(size_t, const half *);
+float4 __ovld __purefn vloada_half4(size_t, const half *);
+float8 __ovld __purefn vloada_half8(size_t, const half *);
+float16 __ovld __purefn vloada_half16(size_t, const half *);
#endif //defined(__opencl_c_generic_address_space)
#if defined(__opencl_c_named_address_space_builtins)
-float2 __ovld __purefn vloada_half2(size_t offset, const __global half *p);
-float3 __ovld __purefn vloada_half3(size_t offset, const __global half *p);
-float4 __ovld __purefn vloada_half4(size_t offset, const __global half *p);
-float8 __ovld __purefn vloada_half8(size_t offset, const __global half *p);
-float16 __ovld __purefn vloada_half16(size_t offset, const __global half *p);
-float2 __ovld __purefn vloada_half2(size_t offset, const __local half *p);
-float3 __ovld __purefn vloada_half3(size_t offset, const __local half *p);
-float4 __ovld __purefn vloada_half4(size_t offset, const __local half *p);
-float8 __ovld __purefn vloada_half8(size_t offset, const __local half *p);
-float16 __ovld __purefn vloada_half16(size_t offset, const __local half *p);
-float2 __ovld __purefn vloada_half2(size_t offset, const __private half *p);
-float3 __ovld __purefn vloada_half3(size_t offset, const __private half *p);
-float4 __ovld __purefn vloada_half4(size_t offset, const __private half *p);
-float8 __ovld __purefn vloada_half8(size_t offset, const __private half *p);
-float16 __ovld __purefn vloada_half16(size_t offset, const __private half *p);
+float2 __ovld __purefn vloada_half2(size_t, const __global half *);
+float3 __ovld __purefn vloada_half3(size_t, const __global half *);
+float4 __ovld __purefn vloada_half4(size_t, const __global half *);
+float8 __ovld __purefn vloada_half8(size_t, const __global half *);
+float16 __ovld __purefn vloada_half16(size_t, const __global half *);
+float2 __ovld __purefn vloada_half2(size_t, const __local half *);
+float3 __ovld __purefn vloada_half3(size_t, const __local half *);
+float4 __ovld __purefn vloada_half4(size_t, const __local half *);
+float8 __ovld __purefn vloada_half8(size_t, const __local half *);
+float16 __ovld __purefn vloada_half16(size_t, const __local half *);
+float2 __ovld __purefn vloada_half2(size_t, const __private half *);
+float3 __ovld __purefn vloada_half3(size_t, const __private half *);
+float4 __ovld __purefn vloada_half4(size_t, const __private half *);
+float8 __ovld __purefn vloada_half8(size_t, const __private half *);
+float16 __ovld __purefn vloada_half16(size_t, const __private half *);
#endif //defined(__opencl_c_named_address_space_builtins)
/**
@@ -12144,250 +12135,250 @@ float16 __ovld __purefn vloada_half16(size_t offset, const __private half *p);
* round to nearest even.
*/
#if defined(__opencl_c_generic_address_space)
-void __ovld vstorea_half2(float2 data, size_t offset, half *p);
-void __ovld vstorea_half3(float3 data, size_t offset, half *p);
-void __ovld vstorea_half4(float4 data, size_t offset, half *p);
-void __ovld vstorea_half8(float8 data, size_t offset, half *p);
-void __ovld vstorea_half16(float16 data, size_t offset, half *p);
-
-void __ovld vstorea_half2_rte(float2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rte(float3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rte(float4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rte(float8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rte(float16 data, size_t offset, half *p);
-
-void __ovld vstorea_half2_rtz(float2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rtz(float3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rtz(float4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rtz(float8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rtz(float16 data, size_t offset, half *p);
-
-void __ovld vstorea_half2_rtp(float2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rtp(float3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rtp(float4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rtp(float8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rtp(float16 data, size_t offset, half *p);
-
-void __ovld vstorea_half2_rtn(float2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rtn(float3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rtn(float4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rtn(float8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rtn(float16 data, size_t offset, half *p);
+void __ovld vstorea_half2(float2, size_t, half *);
+void __ovld vstorea_half3(float3, size_t, half *);
+void __ovld vstorea_half4(float4, size_t, half *);
+void __ovld vstorea_half8(float8, size_t, half *);
+void __ovld vstorea_half16(float16, size_t, half *);
+
+void __ovld vstorea_half2_rte(float2, size_t, half *);
+void __ovld vstorea_half3_rte(float3, size_t, half *);
+void __ovld vstorea_half4_rte(float4, size_t, half *);
+void __ovld vstorea_half8_rte(float8, size_t, half *);
+void __ovld vstorea_half16_rte(float16, size_t, half *);
+
+void __ovld vstorea_half2_rtz(float2, size_t, half *);
+void __ovld vstorea_half3_rtz(float3, size_t, half *);
+void __ovld vstorea_half4_rtz(float4, size_t, half *);
+void __ovld vstorea_half8_rtz(float8, size_t, half *);
+void __ovld vstorea_half16_rtz(float16, size_t, half *);
+
+void __ovld vstorea_half2_rtp(float2, size_t, half *);
+void __ovld vstorea_half3_rtp(float3, size_t, half *);
+void __ovld vstorea_half4_rtp(float4, size_t, half *);
+void __ovld vstorea_half8_rtp(float8, size_t, half *);
+void __ovld vstorea_half16_rtp(float16, size_t, half *);
+
+void __ovld vstorea_half2_rtn(float2, size_t, half *);
+void __ovld vstorea_half3_rtn(float3, size_t, half *);
+void __ovld vstorea_half4_rtn(float4, size_t, half *);
+void __ovld vstorea_half8_rtn(float8, size_t, half *);
+void __ovld vstorea_half16_rtn(float16, size_t, half *);
#ifdef cl_khr_fp64
-void __ovld vstorea_half2(double2 data, size_t offset, half *p);
-void __ovld vstorea_half3(double3 data, size_t offset, half *p);
-void __ovld vstorea_half4(double4 data, size_t offset, half *p);
-void __ovld vstorea_half8(double8 data, size_t offset, half *p);
-void __ovld vstorea_half16(double16 data, size_t offset, half *p);
-
-void __ovld vstorea_half2_rte(double2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rte(double3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rte(double4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rte(double8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rte(double16 data, size_t offset, half *p);
-
-void __ovld vstorea_half2_rtz(double2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rtz(double3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rtz(double4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rtz(double8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rtz(double16 data, size_t offset, half *p);
-
-void __ovld vstorea_half2_rtp(double2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rtp(double3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rtp(double4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rtp(double8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rtp(double16 data, size_t offset, half *p);
-
-void __ovld vstorea_half2_rtn(double2 data, size_t offset, half *p);
-void __ovld vstorea_half3_rtn(double3 data, size_t offset, half *p);
-void __ovld vstorea_half4_rtn(double4 data, size_t offset, half *p);
-void __ovld vstorea_half8_rtn(double8 data, size_t offset, half *p);
-void __ovld vstorea_half16_rtn(double16 data, size_t offset, half *p);
+void __ovld vstorea_half2(double2, size_t, half *);
+void __ovld vstorea_half3(double3, size_t, half *);
+void __ovld vstorea_half4(double4, size_t, half *);
+void __ovld vstorea_half8(double8, size_t, half *);
+void __ovld vstorea_half16(double16, size_t, half *);
+
+void __ovld vstorea_half2_rte(double2, size_t, half *);
+void __ovld vstorea_half3_rte(double3, size_t, half *);
+void __ovld vstorea_half4_rte(double4, size_t, half *);
+void __ovld vstorea_half8_rte(double8, size_t, half *);
+void __ovld vstorea_half16_rte(double16, size_t, half *);
+
+void __ovld vstorea_half2_rtz(double2, size_t, half *);
+void __ovld vstorea_half3_rtz(double3, size_t, half *);
+void __ovld vstorea_half4_rtz(double4, size_t, half *);
+void __ovld vstorea_half8_rtz(double8, size_t, half *);
+void __ovld vstorea_half16_rtz(double16, size_t, half *);
+
+void __ovld vstorea_half2_rtp(double2, size_t, half *);
+void __ovld vstorea_half3_rtp(double3, size_t, half *);
+void __ovld vstorea_half4_rtp(double4, size_t, half *);
+void __ovld vstorea_half8_rtp(double8, size_t, half *);
+void __ovld vstorea_half16_rtp(double16, size_t, half *);
+
+void __ovld vstorea_half2_rtn(double2, size_t, half *);
+void __ovld vstorea_half3_rtn(double3, size_t, half *);
+void __ovld vstorea_half4_rtn(double4, size_t, half *);
+void __ovld vstorea_half8_rtn(double8, size_t, half *);
+void __ovld vstorea_half16_rtn(double16, size_t, half *);
#endif //cl_khr_fp64
#endif //defined(__opencl_c_generic_address_space)
#if defined(__opencl_c_named_address_space_builtins)
-void __ovld vstorea_half2(float2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3(float3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4(float4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8(float8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16(float16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half2_rte(float2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rte(float3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rte(float4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rte(float8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rte(float16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half2_rtz(float2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rtz(float3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rtz(float4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rtz(float8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rtz(float16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half2_rtp(float2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rtp(float3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rtp(float4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rtp(float8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rtp(float16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half2_rtn(float2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rtn(float3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rtn(float4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rtn(float8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rtn(float16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half2(float2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3(float3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4(float4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8(float8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16(float16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half2_rte(float2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rte(float3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rte(float4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rte(float8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rte(float16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half2_rtz(float2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rtz(float3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rtz(float4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rtz(float8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rtz(float16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half2_rtp(float2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rtp(float3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rtp(float4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rtp(float8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rtp(float16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half2_rtn(float2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rtn(float3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rtn(float4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rtn(float8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rtn(float16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half2(float2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3(float3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4(float4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8(float8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16(float16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half2_rte(float2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3_rte(float3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4_rte(float4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8_rte(float8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16_rte(float16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half2_rtz(float2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3_rtz(float3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4_rtz(float4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8_rtz(float8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16_rtz(float16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half2_rtp(float2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3_rtp(float3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4_rtp(float4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8_rtp(float8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16_rtp(float16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half2_rtn(float2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3_rtn(float3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4_rtn(float4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8_rtn(float8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16_rtn(float16 data, size_t offset, __private half *p);
+void __ovld vstorea_half2(float2, size_t, __global half *);
+void __ovld vstorea_half3(float3, size_t, __global half *);
+void __ovld vstorea_half4(float4, size_t, __global half *);
+void __ovld vstorea_half8(float8, size_t, __global half *);
+void __ovld vstorea_half16(float16, size_t, __global half *);
+
+void __ovld vstorea_half2_rte(float2, size_t, __global half *);
+void __ovld vstorea_half3_rte(float3, size_t, __global half *);
+void __ovld vstorea_half4_rte(float4, size_t, __global half *);
+void __ovld vstorea_half8_rte(float8, size_t, __global half *);
+void __ovld vstorea_half16_rte(float16, size_t, __global half *);
+
+void __ovld vstorea_half2_rtz(float2, size_t, __global half *);
+void __ovld vstorea_half3_rtz(float3, size_t, __global half *);
+void __ovld vstorea_half4_rtz(float4, size_t, __global half *);
+void __ovld vstorea_half8_rtz(float8, size_t, __global half *);
+void __ovld vstorea_half16_rtz(float16, size_t, __global half *);
+
+void __ovld vstorea_half2_rtp(float2, size_t, __global half *);
+void __ovld vstorea_half3_rtp(float3, size_t, __global half *);
+void __ovld vstorea_half4_rtp(float4, size_t, __global half *);
+void __ovld vstorea_half8_rtp(float8, size_t, __global half *);
+void __ovld vstorea_half16_rtp(float16, size_t, __global half *);
+
+void __ovld vstorea_half2_rtn(float2, size_t, __global half *);
+void __ovld vstorea_half3_rtn(float3, size_t, __global half *);
+void __ovld vstorea_half4_rtn(float4, size_t, __global half *);
+void __ovld vstorea_half8_rtn(float8, size_t, __global half *);
+void __ovld vstorea_half16_rtn(float16, size_t, __global half *);
+
+void __ovld vstorea_half2(float2, size_t, __local half *);
+void __ovld vstorea_half3(float3, size_t, __local half *);
+void __ovld vstorea_half4(float4, size_t, __local half *);
+void __ovld vstorea_half8(float8, size_t, __local half *);
+void __ovld vstorea_half16(float16, size_t, __local half *);
+
+void __ovld vstorea_half2_rte(float2, size_t, __local half *);
+void __ovld vstorea_half3_rte(float3, size_t, __local half *);
+void __ovld vstorea_half4_rte(float4, size_t, __local half *);
+void __ovld vstorea_half8_rte(float8, size_t, __local half *);
+void __ovld vstorea_half16_rte(float16, size_t, __local half *);
+
+void __ovld vstorea_half2_rtz(float2, size_t, __local half *);
+void __ovld vstorea_half3_rtz(float3, size_t, __local half *);
+void __ovld vstorea_half4_rtz(float4, size_t, __local half *);
+void __ovld vstorea_half8_rtz(float8, size_t, __local half *);
+void __ovld vstorea_half16_rtz(float16, size_t, __local half *);
+
+void __ovld vstorea_half2_rtp(float2, size_t, __local half *);
+void __ovld vstorea_half3_rtp(float3, size_t, __local half *);
+void __ovld vstorea_half4_rtp(float4, size_t, __local half *);
+void __ovld vstorea_half8_rtp(float8, size_t, __local half *);
+void __ovld vstorea_half16_rtp(float16, size_t, __local half *);
+
+void __ovld vstorea_half2_rtn(float2, size_t, __local half *);
+void __ovld vstorea_half3_rtn(float3, size_t, __local half *);
+void __ovld vstorea_half4_rtn(float4, size_t, __local half *);
+void __ovld vstorea_half8_rtn(float8, size_t, __local half *);
+void __ovld vstorea_half16_rtn(float16, size_t, __local half *);
+
+void __ovld vstorea_half2(float2, size_t, __private half *);
+void __ovld vstorea_half3(float3, size_t, __private half *);
+void __ovld vstorea_half4(float4, size_t, __private half *);
+void __ovld vstorea_half8(float8, size_t, __private half *);
+void __ovld vstorea_half16(float16, size_t, __private half *);
+
+void __ovld vstorea_half2_rte(float2, size_t, __private half *);
+void __ovld vstorea_half3_rte(float3, size_t, __private half *);
+void __ovld vstorea_half4_rte(float4, size_t, __private half *);
+void __ovld vstorea_half8_rte(float8, size_t, __private half *);
+void __ovld vstorea_half16_rte(float16, size_t, __private half *);
+
+void __ovld vstorea_half2_rtz(float2, size_t, __private half *);
+void __ovld vstorea_half3_rtz(float3, size_t, __private half *);
+void __ovld vstorea_half4_rtz(float4, size_t, __private half *);
+void __ovld vstorea_half8_rtz(float8, size_t, __private half *);
+void __ovld vstorea_half16_rtz(float16, size_t, __private half *);
+
+void __ovld vstorea_half2_rtp(float2, size_t, __private half *);
+void __ovld vstorea_half3_rtp(float3, size_t, __private half *);
+void __ovld vstorea_half4_rtp(float4, size_t, __private half *);
+void __ovld vstorea_half8_rtp(float8, size_t, __private half *);
+void __ovld vstorea_half16_rtp(float16, size_t, __private half *);
+
+void __ovld vstorea_half2_rtn(float2, size_t, __private half *);
+void __ovld vstorea_half3_rtn(float3, size_t, __private half *);
+void __ovld vstorea_half4_rtn(float4, size_t, __private half *);
+void __ovld vstorea_half8_rtn(float8, size_t, __private half *);
+void __ovld vstorea_half16_rtn(float16, size_t, __private half *);
#ifdef cl_khr_fp64
-void __ovld vstorea_half2(double2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3(double3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4(double4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8(double8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16(double16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half2_rte(double2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rte(double3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rte(double4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rte(double8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rte(double16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half2_rtz(double2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rtz(double3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rtz(double4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rtz(double8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rtz(double16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half2_rtp(double2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rtp(double3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rtp(double4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rtp(double8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rtp(double16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half2_rtn(double2 data, size_t offset, __global half *p);
-void __ovld vstorea_half3_rtn(double3 data, size_t offset, __global half *p);
-void __ovld vstorea_half4_rtn(double4 data, size_t offset, __global half *p);
-void __ovld vstorea_half8_rtn(double8 data, size_t offset, __global half *p);
-void __ovld vstorea_half16_rtn(double16 data, size_t offset, __global half *p);
-
-void __ovld vstorea_half2(double2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3(double3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4(double4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8(double8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16(double16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half2_rte(double2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rte(double3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rte(double4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rte(double8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rte(double16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half2_rtz(double2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rtz(double3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rtz(double4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rtz(double8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rtz(double16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half2_rtp(double2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rtp(double3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rtp(double4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rtp(double8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rtp(double16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half2_rtn(double2 data, size_t offset, __local half *p);
-void __ovld vstorea_half3_rtn(double3 data, size_t offset, __local half *p);
-void __ovld vstorea_half4_rtn(double4 data, size_t offset, __local half *p);
-void __ovld vstorea_half8_rtn(double8 data, size_t offset, __local half *p);
-void __ovld vstorea_half16_rtn(double16 data, size_t offset, __local half *p);
-
-void __ovld vstorea_half2(double2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3(double3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4(double4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8(double8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16(double16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half2_rte(double2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3_rte(double3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4_rte(double4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8_rte(double8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16_rte(double16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half2_rtz(double2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3_rtz(double3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4_rtz(double4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8_rtz(double8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16_rtz(double16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half2_rtp(double2 data, size_t offset, __private half *p);
-void __ovld vstorea_half3_rtp(double3 data, size_t offset, __private half *p);
-void __ovld vstorea_half4_rtp(double4 data, size_t offset, __private half *p);
-void __ovld vstorea_half8_rtp(double8 data, size_t offset, __private half *p);
-void __ovld vstorea_half16_rtp(double16 data, size_t offset, __private half *p);
-
-void __ovld vstorea_half2_rtn(double2 data,size_t offset, __private half *p);
-void __ovld vstorea_half3_rtn(double3 data,size_t offset, __private half *p);
-void __ovld vstorea_half4_rtn(double4 data,size_t offset, __private half *p);
-void __ovld vstorea_half8_rtn(double8 data,size_t offset, __private half *p);
-void __ovld vstorea_half16_rtn(double16 data,size_t offset, __private half *p);
+void __ovld vstorea_half2(double2, size_t, __global half *);
+void __ovld vstorea_half3(double3, size_t, __global half *);
+void __ovld vstorea_half4(double4, size_t, __global half *);
+void __ovld vstorea_half8(double8, size_t, __global half *);
+void __ovld vstorea_half16(double16, size_t, __global half *);
+
+void __ovld vstorea_half2_rte(double2, size_t, __global half *);
+void __ovld vstorea_half3_rte(double3, size_t, __global half *);
+void __ovld vstorea_half4_rte(double4, size_t, __global half *);
+void __ovld vstorea_half8_rte(double8, size_t, __global half *);
+void __ovld vstorea_half16_rte(double16, size_t, __global half *);
+
+void __ovld vstorea_half2_rtz(double2, size_t, __global half *);
+void __ovld vstorea_half3_rtz(double3, size_t, __global half *);
+void __ovld vstorea_half4_rtz(double4, size_t, __global half *);
+void __ovld vstorea_half8_rtz(double8, size_t, __global half *);
+void __ovld vstorea_half16_rtz(double16, size_t, __global half *);
+
+void __ovld vstorea_half2_rtp(double2, size_t, __global half *);
+void __ovld vstorea_half3_rtp(double3, size_t, __global half *);
+void __ovld vstorea_half4_rtp(double4, size_t, __global half *);
+void __ovld vstorea_half8_rtp(double8, size_t, __global half *);
+void __ovld vstorea_half16_rtp(double16, size_t, __global half *);
+
+void __ovld vstorea_half2_rtn(double2, size_t, __global half *);
+void __ovld vstorea_half3_rtn(double3, size_t, __global half *);
+void __ovld vstorea_half4_rtn(double4, size_t, __global half *);
+void __ovld vstorea_half8_rtn(double8, size_t, __global half *);
+void __ovld vstorea_half16_rtn(double16, size_t, __global half *);
+
+void __ovld vstorea_half2(double2, size_t, __local half *);
+void __ovld vstorea_half3(double3, size_t, __local half *);
+void __ovld vstorea_half4(double4, size_t, __local half *);
+void __ovld vstorea_half8(double8, size_t, __local half *);
+void __ovld vstorea_half16(double16, size_t, __local half *);
+
+void __ovld vstorea_half2_rte(double2, size_t, __local half *);
+void __ovld vstorea_half3_rte(double3, size_t, __local half *);
+void __ovld vstorea_half4_rte(double4, size_t, __local half *);
+void __ovld vstorea_half8_rte(double8, size_t, __local half *);
+void __ovld vstorea_half16_rte(double16, size_t, __local half *);
+
+void __ovld vstorea_half2_rtz(double2, size_t, __local half *);
+void __ovld vstorea_half3_rtz(double3, size_t, __local half *);
+void __ovld vstorea_half4_rtz(double4, size_t, __local half *);
+void __ovld vstorea_half8_rtz(double8, size_t, __local half *);
+void __ovld vstorea_half16_rtz(double16, size_t, __local half *);
+
+void __ovld vstorea_half2_rtp(double2, size_t, __local half *);
+void __ovld vstorea_half3_rtp(double3, size_t, __local half *);
+void __ovld vstorea_half4_rtp(double4, size_t, __local half *);
+void __ovld vstorea_half8_rtp(double8, size_t, __local half *);
+void __ovld vstorea_half16_rtp(double16, size_t, __local half *);
+
+void __ovld vstorea_half2_rtn(double2, size_t, __local half *);
+void __ovld vstorea_half3_rtn(double3, size_t, __local half *);
+void __ovld vstorea_half4_rtn(double4, size_t, __local half *);
+void __ovld vstorea_half8_rtn(double8, size_t, __local half *);
+void __ovld vstorea_half16_rtn(double16, size_t, __local half *);
+
+void __ovld vstorea_half2(double2, size_t, __private half *);
+void __ovld vstorea_half3(double3, size_t, __private half *);
+void __ovld vstorea_half4(double4, size_t, __private half *);
+void __ovld vstorea_half8(double8, size_t, __private half *);
+void __ovld vstorea_half16(double16, size_t, __private half *);
+
+void __ovld vstorea_half2_rte(double2, size_t, __private half *);
+void __ovld vstorea_half3_rte(double3, size_t, __private half *);
+void __ovld vstorea_half4_rte(double4, size_t, __private half *);
+void __ovld vstorea_half8_rte(double8, size_t, __private half *);
+void __ovld vstorea_half16_rte(double16, size_t, __private half *);
+
+void __ovld vstorea_half2_rtz(double2, size_t, __private half *);
+void __ovld vstorea_half3_rtz(double3, size_t, __private half *);
+void __ovld vstorea_half4_rtz(double4, size_t, __private half *);
+void __ovld vstorea_half8_rtz(double8, size_t, __private half *);
+void __ovld vstorea_half16_rtz(double16, size_t, __private half *);
+
+void __ovld vstorea_half2_rtp(double2, size_t, __private half *);
+void __ovld vstorea_half3_rtp(double3, size_t, __private half *);
+void __ovld vstorea_half4_rtp(double4, size_t, __private half *);
+void __ovld vstorea_half8_rtp(double8, size_t, __private half *);
+void __ovld vstorea_half16_rtp(double16, size_t, __private half *);
+
+void __ovld vstorea_half2_rtn(double2, size_t, __private half *);
+void __ovld vstorea_half3_rtn(double3, size_t, __private half *);
+void __ovld vstorea_half4_rtn(double4, size_t, __private half *);
+void __ovld vstorea_half8_rtn(double8, size_t, __private half *);
+void __ovld vstorea_half16_rtn(double16, size_t, __private half *);
#endif //cl_khr_fp64
#endif //defined(__opencl_c_named_address_space_builtins)
@@ -12426,7 +12417,7 @@ void __ovld vstorea_half16_rtn(double16 data,size_t offset, __private half *p);
void __ovld __conv barrier(cl_mem_fence_flags flags);
#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-void __ovld __conv work_group_barrier(cl_mem_fence_flags flags, memory_scope scope);
+void __ovld __conv work_group_barrier(cl_mem_fence_flags flags, memory_scope);
void __ovld __conv work_group_barrier(cl_mem_fence_flags flags);
#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
@@ -12515,141 +12506,141 @@ cl_mem_fence_flags __ovld get_fence(void *ptr);
* synchronization of source data such as using a
* barrier before performing the copy.
*/
-event_t __ovld async_work_group_copy(__local char *dst, const __global char *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uchar *dst, const __global uchar *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local short *dst, const __global short *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ushort *dst, const __global ushort *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local int *dst, const __global int *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uint *dst, const __global uint *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local long *dst, const __global long *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ulong *dst, const __global ulong *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local float *dst, const __global float *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local char2 *dst, const __global char2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uchar2 *dst, const __global uchar2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local short2 *dst, const __global short2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ushort2 *dst, const __global ushort2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local int2 *dst, const __global int2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uint2 *dst, const __global uint2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local long2 *dst, const __global long2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ulong2 *dst, const __global ulong2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local float2 *dst, const __global float2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local char3 *dst, const __global char3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uchar3 *dst, const __global uchar3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local short3 *dst, const __global short3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ushort3 *dst, const __global ushort3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local int3 *dst, const __global int3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uint3 *dst, const __global uint3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local long3 *dst, const __global long3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ulong3 *dst, const __global ulong3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local float3 *dst, const __global float3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local char4 *dst, const __global char4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uchar4 *dst, const __global uchar4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local short4 *dst, const __global short4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ushort4 *dst, const __global ushort4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local int4 *dst, const __global int4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uint4 *dst, const __global uint4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local long4 *dst, const __global long4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ulong4 *dst, const __global ulong4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local float4 *dst, const __global float4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local char8 *dst, const __global char8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uchar8 *dst, const __global uchar8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local short8 *dst, const __global short8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ushort8 *dst, const __global ushort8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local int8 *dst, const __global int8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uint8 *dst, const __global uint8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local long8 *dst, const __global long8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ulong8 *dst, const __global ulong8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local float8 *dst, const __global float8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local char16 *dst, const __global char16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uchar16 *dst, const __global uchar16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local short16 *dst, const __global short16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ushort16 *dst, const __global ushort16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local int16 *dst, const __global int16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local uint16 *dst, const __global uint16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local long16 *dst, const __global long16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local ulong16 *dst, const __global ulong16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local float16 *dst, const __global float16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global char *dst, const __local char *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uchar *dst, const __local uchar *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global short *dst, const __local short *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ushort *dst, const __local ushort *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global int *dst, const __local int *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uint *dst, const __local uint *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global long *dst, const __local long *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ulong *dst, const __local ulong *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global float *dst, const __local float *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global char2 *dst, const __local char2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uchar2 *dst, const __local uchar2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global short2 *dst, const __local short2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ushort2 *dst, const __local ushort2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global int2 *dst, const __local int2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uint2 *dst, const __local uint2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global long2 *dst, const __local long2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ulong2 *dst, const __local ulong2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global float2 *dst, const __local float2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global char3 *dst, const __local char3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uchar3 *dst, const __local uchar3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global short3 *dst, const __local short3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ushort3 *dst, const __local ushort3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global int3 *dst, const __local int3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uint3 *dst, const __local uint3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global long3 *dst, const __local long3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ulong3 *dst, const __local ulong3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global float3 *dst, const __local float3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global char4 *dst, const __local char4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uchar4 *dst, const __local uchar4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global short4 *dst, const __local short4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ushort4 *dst, const __local ushort4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global int4 *dst, const __local int4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uint4 *dst, const __local uint4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global long4 *dst, const __local long4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ulong4 *dst, const __local ulong4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global float4 *dst, const __local float4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global char8 *dst, const __local char8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uchar8 *dst, const __local uchar8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global short8 *dst, const __local short8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ushort8 *dst, const __local ushort8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global int8 *dst, const __local int8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uint8 *dst, const __local uint8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global long8 *dst, const __local long8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ulong8 *dst, const __local ulong8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global float8 *dst, const __local float8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global char16 *dst, const __local char16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uchar16 *dst, const __local uchar16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global short16 *dst, const __local short16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ushort16 *dst, const __local ushort16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global int16 *dst, const __local int16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global uint16 *dst, const __local uint16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global long16 *dst, const __local long16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global ulong16 *dst, const __local ulong16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global float16 *dst, const __local float16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local char *, const __global char *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uchar *, const __global uchar *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local short *, const __global short *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ushort *, const __global ushort *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local int *, const __global int *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uint *, const __global uint *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local long *, const __global long *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ulong *, const __global ulong *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local float *, const __global float *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local char2 *, const __global char2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uchar2 *, const __global uchar2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local short2 *, const __global short2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ushort2 *, const __global ushort2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local int2 *, const __global int2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uint2 *, const __global uint2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local long2 *, const __global long2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ulong2 *, const __global ulong2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local float2 *, const __global float2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local char3 *, const __global char3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uchar3 *, const __global uchar3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local short3 *, const __global short3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ushort3 *, const __global ushort3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local int3 *, const __global int3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uint3 *, const __global uint3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local long3 *, const __global long3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ulong3 *, const __global ulong3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local float3 *, const __global float3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local char4 *, const __global char4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uchar4 *, const __global uchar4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local short4 *, const __global short4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ushort4 *, const __global ushort4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local int4 *, const __global int4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uint4 *, const __global uint4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local long4 *, const __global long4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ulong4 *, const __global ulong4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local float4 *, const __global float4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local char8 *, const __global char8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uchar8 *, const __global uchar8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local short8 *, const __global short8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ushort8 *, const __global ushort8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local int8 *, const __global int8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uint8 *, const __global uint8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local long8 *, const __global long8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ulong8 *, const __global ulong8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local float8 *, const __global float8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local char16 *, const __global char16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uchar16 *, const __global uchar16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local short16 *, const __global short16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ushort16 *, const __global ushort16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local int16 *, const __global int16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local uint16 *, const __global uint16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local long16 *, const __global long16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local ulong16 *, const __global ulong16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local float16 *, const __global float16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global char *, const __local char *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uchar *, const __local uchar *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global short *, const __local short *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ushort *, const __local ushort *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global int *, const __local int *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uint *, const __local uint *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global long *, const __local long *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ulong *, const __local ulong *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global float *, const __local float *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global char2 *, const __local char2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uchar2 *, const __local uchar2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global short2 *, const __local short2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ushort2 *, const __local ushort2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global int2 *, const __local int2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uint2 *, const __local uint2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global long2 *, const __local long2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ulong2 *, const __local ulong2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global float2 *, const __local float2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global char3 *, const __local char3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uchar3 *, const __local uchar3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global short3 *, const __local short3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ushort3 *, const __local ushort3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global int3 *, const __local int3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uint3 *, const __local uint3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global long3 *, const __local long3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ulong3 *, const __local ulong3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global float3 *, const __local float3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global char4 *, const __local char4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uchar4 *, const __local uchar4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global short4 *, const __local short4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ushort4 *, const __local ushort4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global int4 *, const __local int4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uint4 *, const __local uint4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global long4 *, const __local long4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ulong4 *, const __local ulong4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global float4 *, const __local float4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global char8 *, const __local char8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uchar8 *, const __local uchar8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global short8 *, const __local short8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ushort8 *, const __local ushort8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global int8 *, const __local int8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uint8 *, const __local uint8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global long8 *, const __local long8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ulong8 *, const __local ulong8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global float8 *, const __local float8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global char16 *, const __local char16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uchar16 *, const __local uchar16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global short16 *, const __local short16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ushort16 *, const __local ushort16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global int16 *, const __local int16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global uint16 *, const __local uint16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global long16 *, const __local long16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global ulong16 *, const __local ulong16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global float16 *, const __local float16 *, size_t, event_t);
#ifdef cl_khr_fp64
-event_t __ovld async_work_group_copy(__local double *dst, const __global double *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local double2 *dst, const __global double2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local double3 *dst, const __global double3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local double4 *dst, const __global double4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local double8 *dst, const __global double8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local double16 *dst, const __global double16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global double *dst, const __local double *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global double2 *dst, const __local double2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global double3 *dst, const __local double3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global double4 *dst, const __local double4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global double8 *dst, const __local double8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global double16 *dst, const __local double16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local double *, const __global double *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local double2 *, const __global double2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local double3 *, const __global double3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local double4 *, const __global double4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local double8 *, const __global double8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local double16 *, const __global double16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global double *, const __local double *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global double2 *, const __local double2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global double3 *, const __local double3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global double4 *, const __local double4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global double8 *, const __local double8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global double16 *, const __local double16 *, size_t, event_t);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-event_t __ovld async_work_group_copy(__local half *dst, const __global half *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local half2 *dst, const __global half2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local half3 *dst, const __global half3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local half4 *dst, const __global half4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local half8 *dst, const __global half8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__local half16 *dst, const __global half16 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global half *dst, const __local half *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global half2 *dst, const __local half2 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global half3 *dst, const __local half3 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global half4 *dst, const __local half4 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global half8 *dst, const __local half8 *src, size_t num_elements, event_t event);
-event_t __ovld async_work_group_copy(__global half16 *dst, const __local half16 *src, size_t num_elements, event_t event);
+event_t __ovld async_work_group_copy(__local half *, const __global half *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local half2 *, const __global half2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local half3 *, const __global half3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local half4 *, const __global half4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local half8 *, const __global half8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__local half16 *, const __global half16 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global half *, const __local half *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global half2 *, const __local half2 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global half3 *, const __local half3 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global half4 *, const __local half4 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global half8 *, const __local half8 *, size_t, event_t);
+event_t __ovld async_work_group_copy(__global half16 *, const __local half16 *, size_t, event_t);
#endif //cl_khr_fp16
/**
@@ -12678,141 +12669,141 @@ event_t __ovld async_work_group_copy(__global half16 *dst, const __local half16
* synchronization of source data such as using a
* barrier before performing the copy.
*/
-event_t __ovld async_work_group_strided_copy(__local char *dst, const __global char *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uchar *dst, const __global uchar *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local short *dst, const __global short *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ushort *dst, const __global ushort *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local int *dst, const __global int *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uint *dst, const __global uint *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local long *dst, const __global long *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ulong *dst, const __global ulong *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local float *dst, const __global float *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local char2 *dst, const __global char2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uchar2 *dst, const __global uchar2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local short2 *dst, const __global short2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ushort2 *dst, const __global ushort2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local int2 *dst, const __global int2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uint2 *dst, const __global uint2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local long2 *dst, const __global long2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ulong2 *dst, const __global ulong2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local float2 *dst, const __global float2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local char3 *dst, const __global char3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uchar3 *dst, const __global uchar3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local short3 *dst, const __global short3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ushort3 *dst, const __global ushort3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local int3 *dst, const __global int3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uint3 *dst, const __global uint3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local long3 *dst, const __global long3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ulong3 *dst, const __global ulong3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local float3 *dst, const __global float3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local char4 *dst, const __global char4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uchar4 *dst, const __global uchar4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local short4 *dst, const __global short4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ushort4 *dst, const __global ushort4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local int4 *dst, const __global int4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uint4 *dst, const __global uint4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local long4 *dst, const __global long4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ulong4 *dst, const __global ulong4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local float4 *dst, const __global float4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local char8 *dst, const __global char8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uchar8 *dst, const __global uchar8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local short8 *dst, const __global short8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ushort8 *dst, const __global ushort8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local int8 *dst, const __global int8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uint8 *dst, const __global uint8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local long8 *dst, const __global long8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ulong8 *dst, const __global ulong8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local float8 *dst, const __global float8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local char16 *dst, const __global char16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uchar16 *dst, const __global uchar16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local short16 *dst, const __global short16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ushort16 *dst, const __global ushort16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local int16 *dst, const __global int16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local uint16 *dst, const __global uint16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local long16 *dst, const __global long16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local ulong16 *dst, const __global ulong16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local float16 *dst, const __global float16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global char *dst, const __local char *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uchar *dst, const __local uchar *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global short *dst, const __local short *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ushort *dst, const __local ushort *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global int *dst, const __local int *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uint *dst, const __local uint *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global long *dst, const __local long *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ulong *dst, const __local ulong *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global float *dst, const __local float *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global char2 *dst, const __local char2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uchar2 *dst, const __local uchar2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global short2 *dst, const __local short2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ushort2 *dst, const __local ushort2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global int2 *dst, const __local int2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uint2 *dst, const __local uint2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global long2 *dst, const __local long2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ulong2 *dst, const __local ulong2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global float2 *dst, const __local float2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global char3 *dst, const __local char3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uchar3 *dst, const __local uchar3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global short3 *dst, const __local short3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ushort3 *dst, const __local ushort3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global int3 *dst, const __local int3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uint3 *dst, const __local uint3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global long3 *dst, const __local long3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ulong3 *dst, const __local ulong3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global float3 *dst, const __local float3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global char4 *dst, const __local char4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uchar4 *dst, const __local uchar4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global short4 *dst, const __local short4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ushort4 *dst, const __local ushort4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global int4 *dst, const __local int4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uint4 *dst, const __local uint4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global long4 *dst, const __local long4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ulong4 *dst, const __local ulong4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global float4 *dst, const __local float4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global char8 *dst, const __local char8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uchar8 *dst, const __local uchar8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global short8 *dst, const __local short8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ushort8 *dst, const __local ushort8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global int8 *dst, const __local int8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uint8 *dst, const __local uint8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global long8 *dst, const __local long8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ulong8 *dst, const __local ulong8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global float8 *dst, const __local float8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global char16 *dst, const __local char16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uchar16 *dst, const __local uchar16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global short16 *dst, const __local short16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ushort16 *dst, const __local ushort16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global int16 *dst, const __local int16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global uint16 *dst, const __local uint16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global long16 *dst, const __local long16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global ulong16 *dst, const __local ulong16 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global float16 *dst, const __local float16 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local char *, const __global char *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uchar *, const __global uchar *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local short *, const __global short *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ushort *, const __global ushort *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local int *, const __global int *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uint *, const __global uint *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local long *, const __global long *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ulong *, const __global ulong *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local float *, const __global float *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local char2 *, const __global char2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uchar2 *, const __global uchar2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local short2 *, const __global short2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ushort2 *, const __global ushort2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local int2 *, const __global int2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uint2 *, const __global uint2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local long2 *, const __global long2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ulong2 *, const __global ulong2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local float2 *, const __global float2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local char3 *, const __global char3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uchar3 *, const __global uchar3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local short3 *, const __global short3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ushort3 *, const __global ushort3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local int3 *, const __global int3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uint3 *, const __global uint3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local long3 *, const __global long3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ulong3 *, const __global ulong3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local float3 *, const __global float3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local char4 *, const __global char4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uchar4 *, const __global uchar4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local short4 *, const __global short4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ushort4 *, const __global ushort4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local int4 *, const __global int4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uint4 *, const __global uint4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local long4 *, const __global long4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ulong4 *, const __global ulong4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local float4 *, const __global float4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local char8 *, const __global char8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uchar8 *, const __global uchar8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local short8 *, const __global short8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ushort8 *, const __global ushort8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local int8 *, const __global int8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uint8 *, const __global uint8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local long8 *, const __global long8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ulong8 *, const __global ulong8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local float8 *, const __global float8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local char16 *, const __global char16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uchar16 *, const __global uchar16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local short16 *, const __global short16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ushort16 *, const __global ushort16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local int16 *, const __global int16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local uint16 *, const __global uint16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local long16 *, const __global long16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local ulong16 *, const __global ulong16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local float16 *, const __global float16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global char *, const __local char *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uchar *, const __local uchar *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global short *, const __local short *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ushort *, const __local ushort *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global int *, const __local int *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uint *, const __local uint *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global long *, const __local long *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ulong *, const __local ulong *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global float *, const __local float *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global char2 *, const __local char2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uchar2 *, const __local uchar2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global short2 *, const __local short2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ushort2 *, const __local ushort2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global int2 *, const __local int2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uint2 *, const __local uint2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global long2 *, const __local long2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ulong2 *, const __local ulong2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global float2 *, const __local float2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global char3 *, const __local char3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uchar3 *, const __local uchar3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global short3 *, const __local short3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ushort3 *, const __local ushort3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global int3 *, const __local int3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uint3 *, const __local uint3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global long3 *, const __local long3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ulong3 *, const __local ulong3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global float3 *, const __local float3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global char4 *, const __local char4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uchar4 *, const __local uchar4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global short4 *, const __local short4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ushort4 *, const __local ushort4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global int4 *, const __local int4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uint4 *, const __local uint4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global long4 *, const __local long4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ulong4 *, const __local ulong4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global float4 *, const __local float4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global char8 *, const __local char8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uchar8 *, const __local uchar8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global short8 *, const __local short8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ushort8 *, const __local ushort8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global int8 *, const __local int8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uint8 *, const __local uint8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global long8 *, const __local long8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ulong8 *, const __local ulong8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global float8 *, const __local float8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global char16 *, const __local char16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uchar16 *, const __local uchar16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global short16 *, const __local short16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ushort16 *, const __local ushort16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global int16 *, const __local int16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global uint16 *, const __local uint16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global long16 *, const __local long16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global ulong16 *, const __local ulong16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global float16 *, const __local float16 *, size_t, size_t, event_t);
#ifdef cl_khr_fp64
-event_t __ovld async_work_group_strided_copy(__local double *dst, const __global double *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local double2 *dst, const __global double2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local double3 *dst, const __global double3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local double4 *dst, const __global double4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local double8 *dst, const __global double8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local double16 *dst, const __global double16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global double *dst, const __local double *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global double2 *dst, const __local double2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global double3 *dst, const __local double3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global double4 *dst, const __local double4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global double8 *dst, const __local double8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global double16 *dst, const __local double16 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local double *, const __global double *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local double2 *, const __global double2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local double3 *, const __global double3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local double4 *, const __global double4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local double8 *, const __global double8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local double16 *, const __global double16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global double *, const __local double *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global double2 *, const __local double2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global double3 *, const __local double3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global double4 *, const __local double4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global double8 *, const __local double8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global double16 *, const __local double16 *, size_t, size_t, event_t);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-event_t __ovld async_work_group_strided_copy(__local half *dst, const __global half *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local half2 *dst, const __global half2 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local half3 *dst, const __global half3 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local half4 *dst, const __global half4 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local half8 *dst, const __global half8 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__local half16 *dst, const __global half16 *src, size_t num_elements, size_t src_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global half *dst, const __local half *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global half2 *dst, const __local half2 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global half3 *dst, const __local half3 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global half4 *dst, const __local half4 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global half8 *dst, const __local half8 *src, size_t num_elements, size_t dst_stride, event_t event);
-event_t __ovld async_work_group_strided_copy(__global half16 *dst, const __local half16 *src, size_t num_elements, size_t dst_stride, event_t event);
+event_t __ovld async_work_group_strided_copy(__local half *, const __global half *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local half2 *, const __global half2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local half3 *, const __global half3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local half4 *, const __global half4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local half8 *, const __global half8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__local half16 *, const __global half16 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global half *, const __local half *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global half2 *, const __local half2 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global half3 *, const __local half3 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global half4 *, const __local half4 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global half8 *, const __local half8 *, size_t, size_t, event_t);
+event_t __ovld async_work_group_strided_copy(__global half16 *, const __local half16 *, size_t, size_t, event_t);
#endif //cl_khr_fp16
/**
@@ -12826,7 +12817,7 @@ event_t __ovld async_work_group_strided_copy(__global half16 *dst, const __local
* the same num_events and event objects specified
* in event_list; otherwise the results are undefined.
*/
-void __ovld wait_group_events(int num_events, event_t *event_list);
+void __ovld wait_group_events(int, event_t *);
/**
* Prefetch num_elements * sizeof(gentype)
@@ -12835,75 +12826,75 @@ void __ovld wait_group_events(int num_events, event_t *event_list);
* and does not affect the functional
* behavior of the kernel.
*/
-void __ovld prefetch(const __global char *p, size_t num_elements);
-void __ovld prefetch(const __global uchar *p, size_t num_elements);
-void __ovld prefetch(const __global short *p, size_t num_elements);
-void __ovld prefetch(const __global ushort *p, size_t num_elements);
-void __ovld prefetch(const __global int *p, size_t num_elements);
-void __ovld prefetch(const __global uint *p, size_t num_elements);
-void __ovld prefetch(const __global long *p, size_t num_elements);
-void __ovld prefetch(const __global ulong *p, size_t num_elements);
-void __ovld prefetch(const __global float *p, size_t num_elements);
-void __ovld prefetch(const __global char2 *p, size_t num_elements);
-void __ovld prefetch(const __global uchar2 *p, size_t num_elements);
-void __ovld prefetch(const __global short2 *p, size_t num_elements);
-void __ovld prefetch(const __global ushort2 *p, size_t num_elements);
-void __ovld prefetch(const __global int2 *p, size_t num_elements);
-void __ovld prefetch(const __global uint2 *p, size_t num_elements);
-void __ovld prefetch(const __global long2 *p, size_t num_elements);
-void __ovld prefetch(const __global ulong2 *p, size_t num_elements);
-void __ovld prefetch(const __global float2 *p, size_t num_elements);
-void __ovld prefetch(const __global char3 *p, size_t num_elements);
-void __ovld prefetch(const __global uchar3 *p, size_t num_elements);
-void __ovld prefetch(const __global short3 *p, size_t num_elements);
-void __ovld prefetch(const __global ushort3 *p, size_t num_elements);
-void __ovld prefetch(const __global int3 *p, size_t num_elements);
-void __ovld prefetch(const __global uint3 *p, size_t num_elements);
-void __ovld prefetch(const __global long3 *p, size_t num_elements);
-void __ovld prefetch(const __global ulong3 *p, size_t num_elements);
-void __ovld prefetch(const __global float3 *p, size_t num_elements);
-void __ovld prefetch(const __global char4 *p, size_t num_elements);
-void __ovld prefetch(const __global uchar4 *p, size_t num_elements);
-void __ovld prefetch(const __global short4 *p, size_t num_elements);
-void __ovld prefetch(const __global ushort4 *p, size_t num_elements);
-void __ovld prefetch(const __global int4 *p, size_t num_elements);
-void __ovld prefetch(const __global uint4 *p, size_t num_elements);
-void __ovld prefetch(const __global long4 *p, size_t num_elements);
-void __ovld prefetch(const __global ulong4 *p, size_t num_elements);
-void __ovld prefetch(const __global float4 *p, size_t num_elements);
-void __ovld prefetch(const __global char8 *p, size_t num_elements);
-void __ovld prefetch(const __global uchar8 *p, size_t num_elements);
-void __ovld prefetch(const __global short8 *p, size_t num_elements);
-void __ovld prefetch(const __global ushort8 *p, size_t num_elements);
-void __ovld prefetch(const __global int8 *p, size_t num_elements);
-void __ovld prefetch(const __global uint8 *p, size_t num_elements);
-void __ovld prefetch(const __global long8 *p, size_t num_elements);
-void __ovld prefetch(const __global ulong8 *p, size_t num_elements);
-void __ovld prefetch(const __global float8 *p, size_t num_elements);
-void __ovld prefetch(const __global char16 *p, size_t num_elements);
-void __ovld prefetch(const __global uchar16 *p, size_t num_elements);
-void __ovld prefetch(const __global short16 *p, size_t num_elements);
-void __ovld prefetch(const __global ushort16 *p, size_t num_elements);
-void __ovld prefetch(const __global int16 *p, size_t num_elements);
-void __ovld prefetch(const __global uint16 *p, size_t num_elements);
-void __ovld prefetch(const __global long16 *p, size_t num_elements);
-void __ovld prefetch(const __global ulong16 *p, size_t num_elements);
-void __ovld prefetch(const __global float16 *p, size_t num_elements);
+void __ovld prefetch(const __global char *, size_t);
+void __ovld prefetch(const __global uchar *, size_t);
+void __ovld prefetch(const __global short *, size_t);
+void __ovld prefetch(const __global ushort *, size_t);
+void __ovld prefetch(const __global int *, size_t);
+void __ovld prefetch(const __global uint *, size_t);
+void __ovld prefetch(const __global long *, size_t);
+void __ovld prefetch(const __global ulong *, size_t);
+void __ovld prefetch(const __global float *, size_t);
+void __ovld prefetch(const __global char2 *, size_t);
+void __ovld prefetch(const __global uchar2 *, size_t);
+void __ovld prefetch(const __global short2 *, size_t);
+void __ovld prefetch(const __global ushort2 *, size_t);
+void __ovld prefetch(const __global int2 *, size_t);
+void __ovld prefetch(const __global uint2 *, size_t);
+void __ovld prefetch(const __global long2 *, size_t);
+void __ovld prefetch(const __global ulong2 *, size_t);
+void __ovld prefetch(const __global float2 *, size_t);
+void __ovld prefetch(const __global char3 *, size_t);
+void __ovld prefetch(const __global uchar3 *, size_t);
+void __ovld prefetch(const __global short3 *, size_t);
+void __ovld prefetch(const __global ushort3 *, size_t);
+void __ovld prefetch(const __global int3 *, size_t);
+void __ovld prefetch(const __global uint3 *, size_t);
+void __ovld prefetch(const __global long3 *, size_t);
+void __ovld prefetch(const __global ulong3 *, size_t);
+void __ovld prefetch(const __global float3 *, size_t);
+void __ovld prefetch(const __global char4 *, size_t);
+void __ovld prefetch(const __global uchar4 *, size_t);
+void __ovld prefetch(const __global short4 *, size_t);
+void __ovld prefetch(const __global ushort4 *, size_t);
+void __ovld prefetch(const __global int4 *, size_t);
+void __ovld prefetch(const __global uint4 *, size_t);
+void __ovld prefetch(const __global long4 *, size_t);
+void __ovld prefetch(const __global ulong4 *, size_t);
+void __ovld prefetch(const __global float4 *, size_t);
+void __ovld prefetch(const __global char8 *, size_t);
+void __ovld prefetch(const __global uchar8 *, size_t);
+void __ovld prefetch(const __global short8 *, size_t);
+void __ovld prefetch(const __global ushort8 *, size_t);
+void __ovld prefetch(const __global int8 *, size_t);
+void __ovld prefetch(const __global uint8 *, size_t);
+void __ovld prefetch(const __global long8 *, size_t);
+void __ovld prefetch(const __global ulong8 *, size_t);
+void __ovld prefetch(const __global float8 *, size_t);
+void __ovld prefetch(const __global char16 *, size_t);
+void __ovld prefetch(const __global uchar16 *, size_t);
+void __ovld prefetch(const __global short16 *, size_t);
+void __ovld prefetch(const __global ushort16 *, size_t);
+void __ovld prefetch(const __global int16 *, size_t);
+void __ovld prefetch(const __global uint16 *, size_t);
+void __ovld prefetch(const __global long16 *, size_t);
+void __ovld prefetch(const __global ulong16 *, size_t);
+void __ovld prefetch(const __global float16 *, size_t);
#ifdef cl_khr_fp64
-void __ovld prefetch(const __global double *p, size_t num_elements);
-void __ovld prefetch(const __global double2 *p, size_t num_elements);
-void __ovld prefetch(const __global double3 *p, size_t num_elements);
-void __ovld prefetch(const __global double4 *p, size_t num_elements);
-void __ovld prefetch(const __global double8 *p, size_t num_elements);
-void __ovld prefetch(const __global double16 *p, size_t num_elements);
+void __ovld prefetch(const __global double *, size_t);
+void __ovld prefetch(const __global double2 *, size_t);
+void __ovld prefetch(const __global double3 *, size_t);
+void __ovld prefetch(const __global double4 *, size_t);
+void __ovld prefetch(const __global double8 *, size_t);
+void __ovld prefetch(const __global double16 *, size_t);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-void __ovld prefetch(const __global half *p, size_t num_elements);
-void __ovld prefetch(const __global half2 *p, size_t num_elements);
-void __ovld prefetch(const __global half3 *p, size_t num_elements);
-void __ovld prefetch(const __global half4 *p, size_t num_elements);
-void __ovld prefetch(const __global half8 *p, size_t num_elements);
-void __ovld prefetch(const __global half16 *p, size_t num_elements);
+void __ovld prefetch(const __global half *, size_t);
+void __ovld prefetch(const __global half2 *, size_t);
+void __ovld prefetch(const __global half3 *, size_t);
+void __ovld prefetch(const __global half4 *, size_t);
+void __ovld prefetch(const __global half8 *, size_t);
+void __ovld prefetch(const __global half16 *, size_t);
#endif // cl_khr_fp16
// OpenCL v1.1 s6.11.1, v1.2 s6.12.11 - Atomic Functions
@@ -12919,28 +12910,28 @@ void __ovld prefetch(const __global half16 *p, size_t num_elements);
* pointed by p. The function returns old.
*/
int __ovld atomic_add(volatile __global int *p, int val);
-unsigned int __ovld atomic_add(volatile __global unsigned int *p, unsigned int val);
+uint __ovld atomic_add(volatile __global uint *p, uint val);
int __ovld atomic_add(volatile __local int *p, int val);
-unsigned int __ovld atomic_add(volatile __local unsigned int *p, unsigned int val);
+uint __ovld atomic_add(volatile __local uint *p, uint val);
#ifdef __OPENCL_CPP_VERSION__
int __ovld atomic_add(volatile int *p, int val);
-unsigned int __ovld atomic_add(volatile unsigned int *p, unsigned int val);
+uint __ovld atomic_add(volatile uint *p, uint val);
#endif
#if defined(cl_khr_global_int32_base_atomics)
int __ovld atom_add(volatile __global int *p, int val);
-unsigned int __ovld atom_add(volatile __global unsigned int *p, unsigned int val);
+uint __ovld atom_add(volatile __global uint *p, uint val);
#endif
#if defined(cl_khr_local_int32_base_atomics)
int __ovld atom_add(volatile __local int *p, int val);
-unsigned int __ovld atom_add(volatile __local unsigned int *p, unsigned int val);
+uint __ovld atom_add(volatile __local uint *p, uint val);
#endif
#if defined(cl_khr_int64_base_atomics)
long __ovld atom_add(volatile __global long *p, long val);
-unsigned long __ovld atom_add(volatile __global unsigned long *p, unsigned long val);
+ulong __ovld atom_add(volatile __global ulong *p, ulong val);
long __ovld atom_add(volatile __local long *p, long val);
-unsigned long __ovld atom_add(volatile __local unsigned long *p, unsigned long val);
+ulong __ovld atom_add(volatile __local ulong *p, ulong val);
#endif
/**
@@ -12949,28 +12940,28 @@ unsigned long __ovld atom_add(volatile __local unsigned long *p, unsigned long v
* returns old.
*/
int __ovld atomic_sub(volatile __global int *p, int val);
-unsigned int __ovld atomic_sub(volatile __global unsigned int *p, unsigned int val);
+uint __ovld atomic_sub(volatile __global uint *p, uint val);
int __ovld atomic_sub(volatile __local int *p, int val);
-unsigned int __ovld atomic_sub(volatile __local unsigned int *p, unsigned int val);
+uint __ovld atomic_sub(volatile __local uint *p, uint val);
#ifdef __OPENCL_CPP_VERSION__
int __ovld atomic_sub(volatile int *p, int val);
-unsigned int __ovld atomic_sub(volatile unsigned int *p, unsigned int val);
+uint __ovld atomic_sub(volatile uint *p, uint val);
#endif
#if defined(cl_khr_global_int32_base_atomics)
int __ovld atom_sub(volatile __global int *p, int val);
-unsigned int __ovld atom_sub(volatile __global unsigned int *p, unsigned int val);
+uint __ovld atom_sub(volatile __global uint *p, uint val);
#endif
#if defined(cl_khr_local_int32_base_atomics)
int __ovld atom_sub(volatile __local int *p, int val);
-unsigned int __ovld atom_sub(volatile __local unsigned int *p, unsigned int val);
+uint __ovld atom_sub(volatile __local uint *p, uint val);
#endif
#if defined(cl_khr_int64_base_atomics)
long __ovld atom_sub(volatile __global long *p, long val);
-unsigned long __ovld atom_sub(volatile __global unsigned long *p, unsigned long val);
+ulong __ovld atom_sub(volatile __global ulong *p, ulong val);
long __ovld atom_sub(volatile __local long *p, long val);
-unsigned long __ovld atom_sub(volatile __local unsigned long *p, unsigned long val);
+ulong __ovld atom_sub(volatile __local ulong *p, ulong val);
#endif
/**
@@ -12979,31 +12970,31 @@ unsigned long __ovld atom_sub(volatile __local unsigned long *p, unsigned long v
* value.
*/
int __ovld atomic_xchg(volatile __global int *p, int val);
-unsigned int __ovld atomic_xchg(volatile __global unsigned int *p, unsigned int val);
+uint __ovld atomic_xchg(volatile __global uint *p, uint val);
int __ovld atomic_xchg(volatile __local int *p, int val);
-unsigned int __ovld atomic_xchg(volatile __local unsigned int *p, unsigned int val);
+uint __ovld atomic_xchg(volatile __local uint *p, uint val);
float __ovld atomic_xchg(volatile __global float *p, float val);
float __ovld atomic_xchg(volatile __local float *p, float val);
#ifdef __OPENCL_CPP_VERSION__
int __ovld atomic_xchg(volatile int *p, int val);
-unsigned int __ovld atomic_xchg(volatile unsigned int *p, unsigned int val);
+uint __ovld atomic_xchg(volatile uint *p, uint val);
float __ovld atomic_xchg(volatile float *p, float val);
#endif
#if defined(cl_khr_global_int32_base_atomics)
int __ovld atom_xchg(volatile __global int *p, int val);
-unsigned int __ovld atom_xchg(volatile __global unsigned int *p, unsigned int val);
+uint __ovld atom_xchg(volatile __global uint *p, uint val);
#endif
#if defined(cl_khr_local_int32_base_atomics)
int __ovld atom_xchg(volatile __local int *p, int val);
-unsigned int __ovld atom_xchg(volatile __local unsigned int *p, unsigned int val);
+uint __ovld atom_xchg(volatile __local uint *p, uint val);
#endif
#if defined(cl_khr_int64_base_atomics)
long __ovld atom_xchg(volatile __global long *p, long val);
long __ovld atom_xchg(volatile __local long *p, long val);
-unsigned long __ovld atom_xchg(volatile __global unsigned long *p, unsigned long val);
-unsigned long __ovld atom_xchg(volatile __local unsigned long *p, unsigned long val);
+ulong __ovld atom_xchg(volatile __global ulong *p, ulong val);
+ulong __ovld atom_xchg(volatile __local ulong *p, ulong val);
#endif
/**
@@ -13012,29 +13003,29 @@ unsigned long __ovld atom_xchg(volatile __local unsigned long *p, unsigned long
* (old + 1) and store result at location
* pointed by p. The function returns old.
*/
-int __ovld atomic_inc(volatile __global int *p);
-unsigned int __ovld atomic_inc(volatile __global unsigned int *p);
-int __ovld atomic_inc(volatile __local int *p);
-unsigned int __ovld atomic_inc(volatile __local unsigned int *p);
+int __ovld atomic_inc(volatile __global int *);
+uint __ovld atomic_inc(volatile __global uint *);
+int __ovld atomic_inc(volatile __local int *);
+uint __ovld atomic_inc(volatile __local uint *);
#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_inc(volatile int *p);
-unsigned int __ovld atomic_inc(volatile unsigned int *p);
+int __ovld atomic_inc(volatile int *);
+uint __ovld atomic_inc(volatile uint *);
#endif
#if defined(cl_khr_global_int32_base_atomics)
-int __ovld atom_inc(volatile __global int *p);
-unsigned int __ovld atom_inc(volatile __global unsigned int *p);
+int __ovld atom_inc(volatile __global int *);
+uint __ovld atom_inc(volatile __global uint *);
#endif
#if defined(cl_khr_local_int32_base_atomics)
-int __ovld atom_inc(volatile __local int *p);
-unsigned int __ovld atom_inc(volatile __local unsigned int *p);
+int __ovld atom_inc(volatile __local int *);
+uint __ovld atom_inc(volatile __local uint *);
#endif
#if defined(cl_khr_int64_base_atomics)
-long __ovld atom_inc(volatile __global long *p);
-unsigned long __ovld atom_inc(volatile __global unsigned long *p);
-long __ovld atom_inc(volatile __local long *p);
-unsigned long __ovld atom_inc(volatile __local unsigned long *p);
+long __ovld atom_inc(volatile __global long *);
+ulong __ovld atom_inc(volatile __global ulong *);
+long __ovld atom_inc(volatile __local long *);
+ulong __ovld atom_inc(volatile __local ulong *);
#endif
/**
@@ -13043,29 +13034,29 @@ unsigned long __ovld atom_inc(volatile __local unsigned long *p);
* (old - 1) and store result at location
* pointed by p. The function returns old.
*/
-int __ovld atomic_dec(volatile __global int *p);
-unsigned int __ovld atomic_dec(volatile __global unsigned int *p);
-int __ovld atomic_dec(volatile __local int *p);
-unsigned int __ovld atomic_dec(volatile __local unsigned int *p);
+int __ovld atomic_dec(volatile __global int *);
+uint __ovld atomic_dec(volatile __global uint *);
+int __ovld atomic_dec(volatile __local int *);
+uint __ovld atomic_dec(volatile __local uint *);
#ifdef __OPENCL_CPP_VERSION__
-int __ovld atomic_dec(volatile int *p);
-unsigned int __ovld atomic_dec(volatile unsigned int *p);
+int __ovld atomic_dec(volatile int *);
+uint __ovld atomic_dec(volatile uint *);
#endif
#if defined(cl_khr_global_int32_base_atomics)
-int __ovld atom_dec(volatile __global int *p);
-unsigned int __ovld atom_dec(volatile __global unsigned int *p);
+int __ovld atom_dec(volatile __global int *);
+uint __ovld atom_dec(volatile __global uint *);
#endif
#if defined(cl_khr_local_int32_base_atomics)
-int __ovld atom_dec(volatile __local int *p);
-unsigned int __ovld atom_dec(volatile __local unsigned int *p);
+int __ovld atom_dec(volatile __local int *);
+uint __ovld atom_dec(volatile __local uint *);
#endif
#if defined(cl_khr_int64_base_atomics)
-long __ovld atom_dec(volatile __global long *p);
-unsigned long __ovld atom_dec(volatile __global unsigned long *p);
-long __ovld atom_dec(volatile __local long *p);
-unsigned long __ovld atom_dec(volatile __local unsigned long *p);
+long __ovld atom_dec(volatile __global long *);
+ulong __ovld atom_dec(volatile __global ulong *);
+long __ovld atom_dec(volatile __local long *);
+ulong __ovld atom_dec(volatile __local ulong *);
#endif
/**
@@ -13076,28 +13067,28 @@ unsigned long __ovld atom_dec(volatile __local unsigned long *p);
* returns old.
*/
int __ovld atomic_cmpxchg(volatile __global int *p, int cmp, int val);
-unsigned int __ovld atomic_cmpxchg(volatile __global unsigned int *p, unsigned int cmp, unsigned int val);
+uint __ovld atomic_cmpxchg(volatile __global uint *p, uint cmp, uint val);
int __ovld atomic_cmpxchg(volatile __local int *p, int cmp, int val);
-unsigned int __ovld atomic_cmpxchg(volatile __local unsigned int *p, unsigned int cmp, unsigned int val);
+uint __ovld atomic_cmpxchg(volatile __local uint *p, uint cmp, uint val);
#ifdef __OPENCL_CPP_VERSION__
int __ovld atomic_cmpxchg(volatile int *p, int cmp, int val);
-unsigned int __ovld atomic_cmpxchg(volatile unsigned int *p, unsigned int cmp, unsigned int val);
+uint __ovld atomic_cmpxchg(volatile uint *p, uint cmp, uint val);
#endif
#if defined(cl_khr_global_int32_base_atomics)
int __ovld atom_cmpxchg(volatile __global int *p, int cmp, int val);
-unsigned int __ovld atom_cmpxchg(volatile __global unsigned int *p, unsigned int cmp, unsigned int val);
+uint __ovld atom_cmpxchg(volatile __global uint *p, uint cmp, uint val);
#endif
#if defined(cl_khr_local_int32_base_atomics)
int __ovld atom_cmpxchg(volatile __local int *p, int cmp, int val);
-unsigned int __ovld atom_cmpxchg(volatile __local unsigned int *p, unsigned int cmp, unsigned int val);
+uint __ovld atom_cmpxchg(volatile __local uint *p, uint cmp, uint val);
#endif
#if defined(cl_khr_int64_base_atomics)
long __ovld atom_cmpxchg(volatile __global long *p, long cmp, long val);
-unsigned long __ovld atom_cmpxchg(volatile __global unsigned long *p, unsigned long cmp, unsigned long val);
+ulong __ovld atom_cmpxchg(volatile __global ulong *p, ulong cmp, ulong val);
long __ovld atom_cmpxchg(volatile __local long *p, long cmp, long val);
-unsigned long __ovld atom_cmpxchg(volatile __local unsigned long *p, unsigned long cmp, unsigned long val);
+ulong __ovld atom_cmpxchg(volatile __local ulong *p, ulong cmp, ulong val);
#endif
/**
@@ -13108,28 +13099,28 @@ unsigned long __ovld atom_cmpxchg(volatile __local unsigned long *p, unsigned lo
* returns old.
*/
int __ovld atomic_min(volatile __global int *p, int val);
-unsigned int __ovld atomic_min(volatile __global unsigned int *p, unsigned int val);
+uint __ovld atomic_min(volatile __global uint *p, uint val);
int __ovld atomic_min(volatile __local int *p, int val);
-unsigned int __ovld atomic_min(volatile __local unsigned int *p, unsigned int val);
+uint __ovld atomic_min(volatile __local uint *p, uint val);
#ifdef __OPENCL_CPP_VERSION__
int __ovld atomic_min(volatile int *p, int val);
-unsigned int __ovld atomic_min(volatile unsigned int *p, unsigned int val);
+uint __ovld atomic_min(volatile uint *p, uint val);
#endif
#if defined(cl_khr_global_int32_extended_atomics)
int __ovld atom_min(volatile __global int *p, int val);
-unsigned int __ovld atom_min(volatile __global unsigned int *p, unsigned int val);
+uint __ovld atom_min(volatile __global uint *p, uint val);
#endif
#if defined(cl_khr_local_int32_extended_atomics)
int __ovld atom_min(volatile __local int *p, int val);
-unsigned int __ovld atom_min(volatile __local unsigned int *p, unsigned int val);
+uint __ovld atom_min(volatile __local uint *p, uint val);
#endif
#if defined(cl_khr_int64_extended_atomics)
long __ovld atom_min(volatile __global long *p, long val);
-unsigned long __ovld atom_min(volatile __global unsigned long *p, unsigned long val);
+ulong __ovld atom_min(volatile __global ulong *p, ulong val);
long __ovld atom_min(volatile __local long *p, long val);
-unsigned long __ovld atom_min(volatile __local unsigned long *p, unsigned long val);
+ulong __ovld atom_min(volatile __local ulong *p, ulong val);
#endif
/**
@@ -13140,28 +13131,28 @@ unsigned long __ovld atom_min(volatile __local unsigned long *p, unsigned long v
* returns old.
*/
int __ovld atomic_max(volatile __global int *p, int val);
-unsigned int __ovld atomic_max(volatile __global unsigned int *p, unsigned int val);
+uint __ovld atomic_max(volatile __global uint *p, uint val);
int __ovld atomic_max(volatile __local int *p, int val);
-unsigned int __ovld atomic_max(volatile __local unsigned int *p, unsigned int val);
+uint __ovld atomic_max(volatile __local uint *p, uint val);
#ifdef __OPENCL_CPP_VERSION__
int __ovld atomic_max(volatile int *p, int val);
-unsigned int __ovld atomic_max(volatile unsigned int *p, unsigned int val);
+uint __ovld atomic_max(volatile uint *p, uint val);
#endif
#if defined(cl_khr_global_int32_extended_atomics)
int __ovld atom_max(volatile __global int *p, int val);
-unsigned int __ovld atom_max(volatile __global unsigned int *p, unsigned int val);
+uint __ovld atom_max(volatile __global uint *p, uint val);
#endif
#if defined(cl_khr_local_int32_extended_atomics)
int __ovld atom_max(volatile __local int *p, int val);
-unsigned int __ovld atom_max(volatile __local unsigned int *p, unsigned int val);
+uint __ovld atom_max(volatile __local uint *p, uint val);
#endif
#if defined(cl_khr_int64_extended_atomics)
long __ovld atom_max(volatile __global long *p, long val);
-unsigned long __ovld atom_max(volatile __global unsigned long *p, unsigned long val);
+ulong __ovld atom_max(volatile __global ulong *p, ulong val);
long __ovld atom_max(volatile __local long *p, long val);
-unsigned long __ovld atom_max(volatile __local unsigned long *p, unsigned long val);
+ulong __ovld atom_max(volatile __local ulong *p, ulong val);
#endif
/**
@@ -13171,28 +13162,28 @@ unsigned long __ovld atom_max(volatile __local unsigned long *p, unsigned long v
* pointed by p. The function returns old.
*/
int __ovld atomic_and(volatile __global int *p, int val);
-unsigned int __ovld atomic_and(volatile __global unsigned int *p, unsigned int val);
+uint __ovld atomic_and(volatile __global uint *p, uint val);
int __ovld atomic_and(volatile __local int *p, int val);
-unsigned int __ovld atomic_and(volatile __local unsigned int *p, unsigned int val);
+uint __ovld atomic_and(volatile __local uint *p, uint val);
#ifdef __OPENCL_CPP_VERSION__
int __ovld atomic_and(volatile int *p, int val);
-unsigned int __ovld atomic_and(volatile unsigned int *p, unsigned int val);
+uint __ovld atomic_and(volatile uint *p, uint val);
#endif
#if defined(cl_khr_global_int32_extended_atomics)
int __ovld atom_and(volatile __global int *p, int val);
-unsigned int __ovld atom_and(volatile __global unsigned int *p, unsigned int val);
+uint __ovld atom_and(volatile __global uint *p, uint val);
#endif
#if defined(cl_khr_local_int32_extended_atomics)
int __ovld atom_and(volatile __local int *p, int val);
-unsigned int __ovld atom_and(volatile __local unsigned int *p, unsigned int val);
+uint __ovld atom_and(volatile __local uint *p, uint val);
#endif
#if defined(cl_khr_int64_extended_atomics)
long __ovld atom_and(volatile __global long *p, long val);
-unsigned long __ovld atom_and(volatile __global unsigned long *p, unsigned long val);
+ulong __ovld atom_and(volatile __global ulong *p, ulong val);
long __ovld atom_and(volatile __local long *p, long val);
-unsigned long __ovld atom_and(volatile __local unsigned long *p, unsigned long val);
+ulong __ovld atom_and(volatile __local ulong *p, ulong val);
#endif
/**
@@ -13202,28 +13193,28 @@ unsigned long __ovld atom_and(volatile __local unsigned long *p, unsigned long v
* pointed by p. The function returns old.
*/
int __ovld atomic_or(volatile __global int *p, int val);
-unsigned int __ovld atomic_or(volatile __global unsigned int *p, unsigned int val);
+uint __ovld atomic_or(volatile __global uint *p, uint val);
int __ovld atomic_or(volatile __local int *p, int val);
-unsigned int __ovld atomic_or(volatile __local unsigned int *p, unsigned int val);
+uint __ovld atomic_or(volatile __local uint *p, uint val);
#ifdef __OPENCL_CPP_VERSION__
int __ovld atomic_or(volatile int *p, int val);
-unsigned int __ovld atomic_or(volatile unsigned int *p, unsigned int val);
+uint __ovld atomic_or(volatile uint *p, uint val);
#endif
#if defined(cl_khr_global_int32_extended_atomics)
int __ovld atom_or(volatile __global int *p, int val);
-unsigned int __ovld atom_or(volatile __global unsigned int *p, unsigned int val);
+uint __ovld atom_or(volatile __global uint *p, uint val);
#endif
#if defined(cl_khr_local_int32_extended_atomics)
int __ovld atom_or(volatile __local int *p, int val);
-unsigned int __ovld atom_or(volatile __local unsigned int *p, unsigned int val);
+uint __ovld atom_or(volatile __local uint *p, uint val);
#endif
#if defined(cl_khr_int64_extended_atomics)
long __ovld atom_or(volatile __global long *p, long val);
-unsigned long __ovld atom_or(volatile __global unsigned long *p, unsigned long val);
+ulong __ovld atom_or(volatile __global ulong *p, ulong val);
long __ovld atom_or(volatile __local long *p, long val);
-unsigned long __ovld atom_or(volatile __local unsigned long *p, unsigned long val);
+ulong __ovld atom_or(volatile __local ulong *p, ulong val);
#endif
/**
@@ -13233,28 +13224,28 @@ unsigned long __ovld atom_or(volatile __local unsigned long *p, unsigned long va
* pointed by p. The function returns old.
*/
int __ovld atomic_xor(volatile __global int *p, int val);
-unsigned int __ovld atomic_xor(volatile __global unsigned int *p, unsigned int val);
+uint __ovld atomic_xor(volatile __global uint *p, uint val);
int __ovld atomic_xor(volatile __local int *p, int val);
-unsigned int __ovld atomic_xor(volatile __local unsigned int *p, unsigned int val);
+uint __ovld atomic_xor(volatile __local uint *p, uint val);
#ifdef __OPENCL_CPP_VERSION__
int __ovld atomic_xor(volatile int *p, int val);
-unsigned int __ovld atomic_xor(volatile unsigned int *p, unsigned int val);
+uint __ovld atomic_xor(volatile uint *p, uint val);
#endif
#if defined(cl_khr_global_int32_extended_atomics)
int __ovld atom_xor(volatile __global int *p, int val);
-unsigned int __ovld atom_xor(volatile __global unsigned int *p, unsigned int val);
+uint __ovld atom_xor(volatile __global uint *p, uint val);
#endif
#if defined(cl_khr_local_int32_extended_atomics)
int __ovld atom_xor(volatile __local int *p, int val);
-unsigned int __ovld atom_xor(volatile __local unsigned int *p, unsigned int val);
+uint __ovld atom_xor(volatile __local uint *p, uint val);
#endif
#if defined(cl_khr_int64_extended_atomics)
long __ovld atom_xor(volatile __global long *p, long val);
-unsigned long __ovld atom_xor(volatile __global unsigned long *p, unsigned long val);
+ulong __ovld atom_xor(volatile __global ulong *p, ulong val);
long __ovld atom_xor(volatile __local long *p, long val);
-unsigned long __ovld atom_xor(volatile __local unsigned long *p, unsigned long val);
+ulong __ovld atom_xor(volatile __local ulong *p, ulong val);
#endif
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
@@ -13274,38 +13265,38 @@ unsigned long __ovld atom_xor(volatile __local unsigned long *p, unsigned long v
// atomic_init()
#if defined(__opencl_c_generic_address_space)
-void __ovld atomic_init(volatile atomic_int *object, int value);
-void __ovld atomic_init(volatile atomic_uint *object, uint value);
-void __ovld atomic_init(volatile atomic_float *object, float value);
+void __ovld atomic_init(volatile atomic_int *, int);
+void __ovld atomic_init(volatile atomic_uint *, uint);
+void __ovld atomic_init(volatile atomic_float *, float);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-void __ovld atomic_init(volatile atomic_long *object, long value);
-void __ovld atomic_init(volatile atomic_ulong *object, ulong value);
+void __ovld atomic_init(volatile atomic_long *, long);
+void __ovld atomic_init(volatile atomic_ulong *, ulong);
#ifdef cl_khr_fp64
-void __ovld atomic_init(volatile atomic_double *object, double value);
+void __ovld atomic_init(volatile atomic_double *, double);
#endif //cl_khr_fp64
#endif
#endif //defined(__opencl_c_generic_address_space)
#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-void __ovld atomic_init(volatile __global atomic_int *object, int value);
-void __ovld atomic_init(volatile __local atomic_int *object, int value);
-void __ovld atomic_init(volatile __global atomic_uint *object, uint value);
-void __ovld atomic_init(volatile __local atomic_uint *object, uint value);
-void __ovld atomic_init(volatile __global atomic_float *object, float value);
-void __ovld atomic_init(volatile __local atomic_float *object, float value);
+void __ovld atomic_init(volatile __global atomic_int *, int);
+void __ovld atomic_init(volatile __local atomic_int *, int);
+void __ovld atomic_init(volatile __global atomic_uint *, uint);
+void __ovld atomic_init(volatile __local atomic_uint *, uint);
+void __ovld atomic_init(volatile __global atomic_float *, float);
+void __ovld atomic_init(volatile __local atomic_float *, float);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-void __ovld atomic_init(volatile __global atomic_long *object, long value);
-void __ovld atomic_init(volatile __local atomic_long *object, long value);
-void __ovld atomic_init(volatile __global atomic_ulong *object, ulong value);
-void __ovld atomic_init(volatile __local atomic_ulong *object, ulong value);
+void __ovld atomic_init(volatile __global atomic_long *, long);
+void __ovld atomic_init(volatile __local atomic_long *, long);
+void __ovld atomic_init(volatile __global atomic_ulong *, ulong);
+void __ovld atomic_init(volatile __local atomic_ulong *, ulong);
#ifdef cl_khr_fp64
-void __ovld atomic_init(volatile __global atomic_double *object, double value);
-void __ovld atomic_init(volatile __local atomic_double *object, double value);
+void __ovld atomic_init(volatile __global atomic_double *, double);
+void __ovld atomic_init(volatile __local atomic_double *, double);
#endif //cl_khr_fp64
#endif
#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
// atomic_work_item_fence()
-void __ovld atomic_work_item_fence(cl_mem_fence_flags flags, memory_order order, memory_scope scope);
+void __ovld atomic_work_item_fence(cl_mem_fence_flags, memory_order, memory_scope);
// atomic_fetch()
// OpenCL v2.0 s6.13.11.7.5:
@@ -13313,356 +13304,356 @@ void __ovld atomic_work_item_fence(cl_mem_fence_flags flags, memory_order order,
#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
#if defined(__opencl_c_generic_address_space)
-int __ovld atomic_fetch_add(volatile atomic_int *object, int operand);
-uint __ovld atomic_fetch_add(volatile atomic_uint *object, uint operand);
-int __ovld atomic_fetch_sub(volatile atomic_int *object, int operand);
-uint __ovld atomic_fetch_sub(volatile atomic_uint *object, uint operand);
-int __ovld atomic_fetch_or(volatile atomic_int *object, int operand);
-uint __ovld atomic_fetch_or(volatile atomic_uint *object, uint operand);
-int __ovld atomic_fetch_xor(volatile atomic_int *object, int operand);
-uint __ovld atomic_fetch_xor(volatile atomic_uint *object, uint operand);
-int __ovld atomic_fetch_and(volatile atomic_int *object, int operand);
-uint __ovld atomic_fetch_and(volatile atomic_uint *object, uint operand);
-int __ovld atomic_fetch_min(volatile atomic_int *object, int operand);
-uint __ovld atomic_fetch_min(volatile atomic_uint *object, uint operand);
-int __ovld atomic_fetch_max(volatile atomic_int *object, int operand);
-uint __ovld atomic_fetch_max(volatile atomic_uint *object, uint operand);
+int __ovld atomic_fetch_add(volatile atomic_int *, int);
+uint __ovld atomic_fetch_add(volatile atomic_uint *, uint);
+int __ovld atomic_fetch_sub(volatile atomic_int *, int);
+uint __ovld atomic_fetch_sub(volatile atomic_uint *, uint);
+int __ovld atomic_fetch_or(volatile atomic_int *, int);
+uint __ovld atomic_fetch_or(volatile atomic_uint *, uint);
+int __ovld atomic_fetch_xor(volatile atomic_int *, int);
+uint __ovld atomic_fetch_xor(volatile atomic_uint *, uint);
+int __ovld atomic_fetch_and(volatile atomic_int *, int);
+uint __ovld atomic_fetch_and(volatile atomic_uint *, uint);
+int __ovld atomic_fetch_min(volatile atomic_int *, int);
+uint __ovld atomic_fetch_min(volatile atomic_uint *, uint);
+int __ovld atomic_fetch_max(volatile atomic_int *, int);
+uint __ovld atomic_fetch_max(volatile atomic_uint *, uint);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-long __ovld atomic_fetch_add(volatile atomic_long *object, long operand);
-ulong __ovld atomic_fetch_add(volatile atomic_ulong *object, ulong operand);
-long __ovld atomic_fetch_sub(volatile atomic_long *object, long operand);
-ulong __ovld atomic_fetch_sub(volatile atomic_ulong *object, ulong operand);
-long __ovld atomic_fetch_or(volatile atomic_long *object, long operand);
-ulong __ovld atomic_fetch_or(volatile atomic_ulong *object, ulong operand);
-long __ovld atomic_fetch_xor(volatile atomic_long *object, long operand);
-ulong __ovld atomic_fetch_xor(volatile atomic_ulong *object, ulong operand);
-long __ovld atomic_fetch_and(volatile atomic_long *object, long operand);
-ulong __ovld atomic_fetch_and(volatile atomic_ulong *object, ulong operand);
-long __ovld atomic_fetch_min(volatile atomic_long *object, long operand);
-ulong __ovld atomic_fetch_min(volatile atomic_ulong *object, ulong operand);
-long __ovld atomic_fetch_max(volatile atomic_long *object, long operand);
-ulong __ovld atomic_fetch_max(volatile atomic_ulong *object, ulong operand);
-uintptr_t __ovld atomic_fetch_add(volatile atomic_uintptr_t *object, ptrdiff_t operand);
-uintptr_t __ovld atomic_fetch_sub(volatile atomic_uintptr_t *object, ptrdiff_t operand);
+long __ovld atomic_fetch_add(volatile atomic_long *, long);
+ulong __ovld atomic_fetch_add(volatile atomic_ulong *, ulong);
+long __ovld atomic_fetch_sub(volatile atomic_long *, long);
+ulong __ovld atomic_fetch_sub(volatile atomic_ulong *, ulong);
+long __ovld atomic_fetch_or(volatile atomic_long *, long);
+ulong __ovld atomic_fetch_or(volatile atomic_ulong *, ulong);
+long __ovld atomic_fetch_xor(volatile atomic_long *, long);
+ulong __ovld atomic_fetch_xor(volatile atomic_ulong *, ulong);
+long __ovld atomic_fetch_and(volatile atomic_long *, long);
+ulong __ovld atomic_fetch_and(volatile atomic_ulong *, ulong);
+long __ovld atomic_fetch_min(volatile atomic_long *, long);
+ulong __ovld atomic_fetch_min(volatile atomic_ulong *, ulong);
+long __ovld atomic_fetch_max(volatile atomic_long *, long);
+ulong __ovld atomic_fetch_max(volatile atomic_ulong *, ulong);
+uintptr_t __ovld atomic_fetch_add(volatile atomic_uintptr_t *, ptrdiff_t);
+uintptr_t __ovld atomic_fetch_sub(volatile atomic_uintptr_t *, ptrdiff_t);
#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#endif //defined(__opencl_c_generic_address_space)
#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-int __ovld atomic_fetch_add(volatile __global atomic_int *object, int operand);
-int __ovld atomic_fetch_add(volatile __local atomic_int *object, int operand);
-uint __ovld atomic_fetch_add(volatile __global atomic_uint *object, uint operand);
-uint __ovld atomic_fetch_add(volatile __local atomic_uint *object, uint operand);
-int __ovld atomic_fetch_sub(volatile __global atomic_int *object, int operand);
-int __ovld atomic_fetch_sub(volatile __local atomic_int *object, int operand);
-uint __ovld atomic_fetch_sub(volatile __global atomic_uint *object, uint operand);
-uint __ovld atomic_fetch_sub(volatile __local atomic_uint *object, uint operand);
-int __ovld atomic_fetch_or(volatile __global atomic_int *object, int operand);
-int __ovld atomic_fetch_or(volatile __local atomic_int *object, int operand);
-uint __ovld atomic_fetch_or(volatile __global atomic_uint *object, uint operand);
-uint __ovld atomic_fetch_or(volatile __local atomic_uint *object, uint operand);
-int __ovld atomic_fetch_xor(volatile __global atomic_int *object, int operand);
-int __ovld atomic_fetch_xor(volatile __local atomic_int *object, int operand);
-uint __ovld atomic_fetch_xor(volatile __global atomic_uint *object, uint operand);
-uint __ovld atomic_fetch_xor(volatile __local atomic_uint *object, uint operand);
-int __ovld atomic_fetch_and(volatile __global atomic_int *object, int operand);
-int __ovld atomic_fetch_and(volatile __local atomic_int *object, int operand);
-uint __ovld atomic_fetch_and(volatile __global atomic_uint *object, uint operand);
-uint __ovld atomic_fetch_and(volatile __local atomic_uint *object, uint operand);
-int __ovld atomic_fetch_min(volatile __global atomic_int *object, int operand);
-int __ovld atomic_fetch_min(volatile __local atomic_int *object, int operand);
-uint __ovld atomic_fetch_min(volatile __global atomic_uint *object, uint operand);
-uint __ovld atomic_fetch_min(volatile __local atomic_uint *object, uint operand);
-int __ovld atomic_fetch_max(volatile __global atomic_int *object, int operand);
-int __ovld atomic_fetch_max(volatile __local atomic_int *object, int operand);
-uint __ovld atomic_fetch_max(volatile __global atomic_uint *object, uint operand);
-uint __ovld atomic_fetch_max(volatile __local atomic_uint *object, uint operand);
+int __ovld atomic_fetch_add(volatile __global atomic_int *, int);
+int __ovld atomic_fetch_add(volatile __local atomic_int *, int);
+uint __ovld atomic_fetch_add(volatile __global atomic_uint *, uint);
+uint __ovld atomic_fetch_add(volatile __local atomic_uint *, uint);
+int __ovld atomic_fetch_sub(volatile __global atomic_int *, int);
+int __ovld atomic_fetch_sub(volatile __local atomic_int *, int);
+uint __ovld atomic_fetch_sub(volatile __global atomic_uint *, uint);
+uint __ovld atomic_fetch_sub(volatile __local atomic_uint *, uint);
+int __ovld atomic_fetch_or(volatile __global atomic_int *, int);
+int __ovld atomic_fetch_or(volatile __local atomic_int *, int);
+uint __ovld atomic_fetch_or(volatile __global atomic_uint *, uint);
+uint __ovld atomic_fetch_or(volatile __local atomic_uint *, uint);
+int __ovld atomic_fetch_xor(volatile __global atomic_int *, int);
+int __ovld atomic_fetch_xor(volatile __local atomic_int *, int);
+uint __ovld atomic_fetch_xor(volatile __global atomic_uint *, uint);
+uint __ovld atomic_fetch_xor(volatile __local atomic_uint *, uint);
+int __ovld atomic_fetch_and(volatile __global atomic_int *, int);
+int __ovld atomic_fetch_and(volatile __local atomic_int *, int);
+uint __ovld atomic_fetch_and(volatile __global atomic_uint *, uint);
+uint __ovld atomic_fetch_and(volatile __local atomic_uint *, uint);
+int __ovld atomic_fetch_min(volatile __global atomic_int *, int);
+int __ovld atomic_fetch_min(volatile __local atomic_int *, int);
+uint __ovld atomic_fetch_min(volatile __global atomic_uint *, uint);
+uint __ovld atomic_fetch_min(volatile __local atomic_uint *, uint);
+int __ovld atomic_fetch_max(volatile __global atomic_int *, int);
+int __ovld atomic_fetch_max(volatile __local atomic_int *, int);
+uint __ovld atomic_fetch_max(volatile __global atomic_uint *, uint);
+uint __ovld atomic_fetch_max(volatile __local atomic_uint *, uint);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-long __ovld atomic_fetch_add(volatile __global atomic_long *object, long operand);
-long __ovld atomic_fetch_add(volatile __local atomic_long *object, long operand);
-ulong __ovld atomic_fetch_add(volatile __global atomic_ulong *object, ulong operand);
-ulong __ovld atomic_fetch_add(volatile __local atomic_ulong *object, ulong operand);
-uintptr_t __ovld atomic_fetch_add(volatile __global atomic_uintptr_t *object, ptrdiff_t operand);
-uintptr_t __ovld atomic_fetch_add(volatile __local atomic_uintptr_t *object, ptrdiff_t operand);
-long __ovld atomic_fetch_sub(volatile __global atomic_long *object, long operand);
-long __ovld atomic_fetch_sub(volatile __local atomic_long *object, long operand);
-ulong __ovld atomic_fetch_sub(volatile __global atomic_ulong *object, ulong operand);
-ulong __ovld atomic_fetch_sub(volatile __local atomic_ulong *object, ulong operand);
-uintptr_t __ovld atomic_fetch_sub(volatile __global atomic_uintptr_t *object, ptrdiff_t operand);
-uintptr_t __ovld atomic_fetch_sub(volatile __local atomic_uintptr_t *object, ptrdiff_t operand);
-long __ovld atomic_fetch_or(volatile __global atomic_long *object, long operand);
-long __ovld atomic_fetch_or(volatile __local atomic_long *object, long operand);
-ulong __ovld atomic_fetch_or(volatile __global atomic_ulong *object, ulong operand);
-ulong __ovld atomic_fetch_or(volatile __local atomic_ulong *object, ulong operand);
-uintptr_t __ovld atomic_fetch_or(volatile __global atomic_uintptr_t *object, intptr_t operand);
-uintptr_t __ovld atomic_fetch_or(volatile __local atomic_uintptr_t *object, intptr_t operand);
-intptr_t __ovld atomic_fetch_or(volatile __global atomic_intptr_t *object, uintptr_t operand);
-intptr_t __ovld atomic_fetch_or(volatile __local atomic_intptr_t *object, uintptr_t operand);
-long __ovld atomic_fetch_xor(volatile __global atomic_long *object, long operand);
-long __ovld atomic_fetch_xor(volatile __local atomic_long *object, long operand);
-ulong __ovld atomic_fetch_xor(volatile __global atomic_ulong *object, ulong operand);
-ulong __ovld atomic_fetch_xor(volatile __local atomic_ulong *object, ulong operand);
-uintptr_t __ovld atomic_fetch_xor(volatile __global atomic_uintptr_t *object, intptr_t operand);
-uintptr_t __ovld atomic_fetch_xor(volatile __local atomic_uintptr_t *object, intptr_t operand);
-intptr_t __ovld atomic_fetch_xor(volatile __global atomic_intptr_t *object, uintptr_t operand);
-intptr_t __ovld atomic_fetch_xor(volatile __local atomic_intptr_t *object, uintptr_t operand);
-long __ovld atomic_fetch_and(volatile __global atomic_long *object, long operand);
-long __ovld atomic_fetch_and(volatile __local atomic_long *object, long operand);
-ulong __ovld atomic_fetch_and(volatile __global atomic_ulong *object, ulong operand);
-ulong __ovld atomic_fetch_and(volatile __local atomic_ulong *object, ulong operand);
-uintptr_t __ovld atomic_fetch_and(volatile __global atomic_uintptr_t *object, intptr_t operand);
-uintptr_t __ovld atomic_fetch_and(volatile __local atomic_uintptr_t *object, intptr_t operand);
-intptr_t __ovld atomic_fetch_and(volatile __global atomic_intptr_t *object, uintptr_t operand);
-intptr_t __ovld atomic_fetch_and(volatile __local atomic_intptr_t *object, uintptr_t operand);
-long __ovld atomic_fetch_min(volatile __global atomic_long *object, long operand);
-long __ovld atomic_fetch_min(volatile __local atomic_long *object, long operand);
-ulong __ovld atomic_fetch_min(volatile __global atomic_ulong *object, ulong operand);
-ulong __ovld atomic_fetch_min(volatile __local atomic_ulong *object, ulong operand);
-uintptr_t __ovld atomic_fetch_min(volatile __global atomic_uintptr_t *object, intptr_t operand);
-uintptr_t __ovld atomic_fetch_min(volatile __local atomic_uintptr_t *object, intptr_t operand);
-intptr_t __ovld atomic_fetch_min(volatile __global atomic_intptr_t *object, uintptr_t operand);
-intptr_t __ovld atomic_fetch_min(volatile __local atomic_intptr_t *object, uintptr_t operand);
-long __ovld atomic_fetch_max(volatile __global atomic_long *object, long operand);
-long __ovld atomic_fetch_max(volatile __local atomic_long *object, long operand);
-ulong __ovld atomic_fetch_max(volatile __global atomic_ulong *object, ulong operand);
-ulong __ovld atomic_fetch_max(volatile __local atomic_ulong *object, ulong operand);
-uintptr_t __ovld atomic_fetch_add(volatile __global atomic_uintptr_t *object, ptrdiff_t operand);
-uintptr_t __ovld atomic_fetch_sub(volatile __local atomic_uintptr_t *object, ptrdiff_t operand);
+long __ovld atomic_fetch_add(volatile __global atomic_long *, long);
+long __ovld atomic_fetch_add(volatile __local atomic_long *, long);
+ulong __ovld atomic_fetch_add(volatile __global atomic_ulong *, ulong);
+ulong __ovld atomic_fetch_add(volatile __local atomic_ulong *, ulong);
+uintptr_t __ovld atomic_fetch_add(volatile __global atomic_uintptr_t *, ptrdiff_t);
+uintptr_t __ovld atomic_fetch_add(volatile __local atomic_uintptr_t *, ptrdiff_t);
+long __ovld atomic_fetch_sub(volatile __global atomic_long *, long);
+long __ovld atomic_fetch_sub(volatile __local atomic_long *, long);
+ulong __ovld atomic_fetch_sub(volatile __global atomic_ulong *, ulong);
+ulong __ovld atomic_fetch_sub(volatile __local atomic_ulong *, ulong);
+uintptr_t __ovld atomic_fetch_sub(volatile __global atomic_uintptr_t *, ptrdiff_t);
+uintptr_t __ovld atomic_fetch_sub(volatile __local atomic_uintptr_t *, ptrdiff_t);
+long __ovld atomic_fetch_or(volatile __global atomic_long *, long);
+long __ovld atomic_fetch_or(volatile __local atomic_long *, long);
+ulong __ovld atomic_fetch_or(volatile __global atomic_ulong *, ulong);
+ulong __ovld atomic_fetch_or(volatile __local atomic_ulong *, ulong);
+uintptr_t __ovld atomic_fetch_or(volatile __global atomic_uintptr_t *, intptr_t);
+uintptr_t __ovld atomic_fetch_or(volatile __local atomic_uintptr_t *, intptr_t);
+intptr_t __ovld atomic_fetch_or(volatile __global atomic_intptr_t *, uintptr_t);
+intptr_t __ovld atomic_fetch_or(volatile __local atomic_intptr_t *, uintptr_t);
+long __ovld atomic_fetch_xor(volatile __global atomic_long *, long);
+long __ovld atomic_fetch_xor(volatile __local atomic_long *, long);
+ulong __ovld atomic_fetch_xor(volatile __global atomic_ulong *, ulong);
+ulong __ovld atomic_fetch_xor(volatile __local atomic_ulong *, ulong);
+uintptr_t __ovld atomic_fetch_xor(volatile __global atomic_uintptr_t *, intptr_t);
+uintptr_t __ovld atomic_fetch_xor(volatile __local atomic_uintptr_t *, intptr_t);
+intptr_t __ovld atomic_fetch_xor(volatile __global atomic_intptr_t *, uintptr_t);
+intptr_t __ovld atomic_fetch_xor(volatile __local atomic_intptr_t *, uintptr_t);
+long __ovld atomic_fetch_and(volatile __global atomic_long *, long);
+long __ovld atomic_fetch_and(volatile __local atomic_long *, long);
+ulong __ovld atomic_fetch_and(volatile __global atomic_ulong *, ulong);
+ulong __ovld atomic_fetch_and(volatile __local atomic_ulong *, ulong);
+uintptr_t __ovld atomic_fetch_and(volatile __global atomic_uintptr_t *, intptr_t);
+uintptr_t __ovld atomic_fetch_and(volatile __local atomic_uintptr_t *, intptr_t);
+intptr_t __ovld atomic_fetch_and(volatile __global atomic_intptr_t *, uintptr_t);
+intptr_t __ovld atomic_fetch_and(volatile __local atomic_intptr_t *, uintptr_t);
+long __ovld atomic_fetch_min(volatile __global atomic_long *, long);
+long __ovld atomic_fetch_min(volatile __local atomic_long *, long);
+ulong __ovld atomic_fetch_min(volatile __global atomic_ulong *, ulong);
+ulong __ovld atomic_fetch_min(volatile __local atomic_ulong *, ulong);
+uintptr_t __ovld atomic_fetch_min(volatile __global atomic_uintptr_t *, intptr_t);
+uintptr_t __ovld atomic_fetch_min(volatile __local atomic_uintptr_t *, intptr_t);
+intptr_t __ovld atomic_fetch_min(volatile __global atomic_intptr_t *, uintptr_t);
+intptr_t __ovld atomic_fetch_min(volatile __local atomic_intptr_t *, uintptr_t);
+long __ovld atomic_fetch_max(volatile __global atomic_long *, long);
+long __ovld atomic_fetch_max(volatile __local atomic_long *, long);
+ulong __ovld atomic_fetch_max(volatile __global atomic_ulong *, ulong);
+ulong __ovld atomic_fetch_max(volatile __local atomic_ulong *, ulong);
+uintptr_t __ovld atomic_fetch_max(volatile __global atomic_uintptr_t *, uintptr_t);
+uintptr_t __ovld atomic_fetch_max(volatile __local atomic_uintptr_t *, uintptr_t);
#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
#endif
#if defined(__opencl_c_atomic_scope_device)
#if defined(__opencl_c_generic_address_space)
-int __ovld atomic_fetch_add_explicit(volatile atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_add_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_sub_explicit(volatile atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_sub_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_or_explicit(volatile atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_or_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_xor_explicit(volatile atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_xor_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_and_explicit(volatile atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_and_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_min_explicit(volatile atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_max_explicit(volatile atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, uint operand, memory_order order);
+int __ovld atomic_fetch_add_explicit(volatile atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_add_explicit(volatile atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_sub_explicit(volatile atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_sub_explicit(volatile atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_or_explicit(volatile atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_or_explicit(volatile atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_xor_explicit(volatile atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_xor_explicit(volatile atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_and_explicit(volatile atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_and_explicit(volatile atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_min_explicit(volatile atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_max_explicit(volatile atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *, uint, memory_order);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-long __ovld atomic_fetch_add_explicit(volatile atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_add_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-long __ovld atomic_fetch_sub_explicit(volatile atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_sub_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-long __ovld atomic_fetch_or_explicit(volatile atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_or_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-long __ovld atomic_fetch_xor_explicit(volatile atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_xor_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-long __ovld atomic_fetch_and_explicit(volatile atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_and_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-long __ovld atomic_fetch_min_explicit(volatile atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-long __ovld atomic_fetch_max_explicit(volatile atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, ulong operand, memory_order order);
-uintptr_t __ovld atomic_fetch_add_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_sub_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order);
+long __ovld atomic_fetch_add_explicit(volatile atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_add_explicit(volatile atomic_ulong *, ulong, memory_order);
+long __ovld atomic_fetch_sub_explicit(volatile atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_sub_explicit(volatile atomic_ulong *, ulong, memory_order);
+long __ovld atomic_fetch_or_explicit(volatile atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_or_explicit(volatile atomic_ulong *, ulong, memory_order);
+long __ovld atomic_fetch_xor_explicit(volatile atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_xor_explicit(volatile atomic_ulong *, ulong, memory_order);
+long __ovld atomic_fetch_and_explicit(volatile atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_and_explicit(volatile atomic_ulong *, ulong, memory_order);
+long __ovld atomic_fetch_min_explicit(volatile atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *, ulong, memory_order);
+long __ovld atomic_fetch_max_explicit(volatile atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *, ulong, memory_order);
+uintptr_t __ovld atomic_fetch_add_explicit(volatile atomic_uintptr_t *, ptrdiff_t, memory_order);
+uintptr_t __ovld atomic_fetch_sub_explicit(volatile atomic_uintptr_t *, ptrdiff_t, memory_order);
#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#endif //defined(__opencl_c_generic_address_space)
#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-int __ovld atomic_fetch_add_explicit(volatile __global atomic_int *object, int operand, memory_order order);
-int __ovld atomic_fetch_add_explicit(volatile __local atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_add_explicit(volatile __global atomic_uint *object, uint operand, memory_order order);
-uint __ovld atomic_fetch_add_explicit(volatile __local atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_sub_explicit(volatile __global atomic_int *object, int operand, memory_order order);
-int __ovld atomic_fetch_sub_explicit(volatile __local atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_sub_explicit(volatile __global atomic_uint *object, uint operand, memory_order order);
-uint __ovld atomic_fetch_sub_explicit(volatile __local atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_or_explicit(volatile __global atomic_int *object, int operand, memory_order order);
-int __ovld atomic_fetch_or_explicit(volatile __local atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_or_explicit(volatile __global atomic_uint *object, uint operand, memory_order order);
-uint __ovld atomic_fetch_or_explicit(volatile __local atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_xor_explicit(volatile __global atomic_int *object, int operand, memory_order order);
-int __ovld atomic_fetch_xor_explicit(volatile __local atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_xor_explicit(volatile __global atomic_uint *object, uint operand, memory_order order);
-uint __ovld atomic_fetch_xor_explicit(volatile __local atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_and_explicit(volatile __global atomic_int *object, int operand, memory_order order);
-int __ovld atomic_fetch_and_explicit(volatile __local atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_and_explicit(volatile __global atomic_uint *object, uint operand, memory_order order);
-uint __ovld atomic_fetch_and_explicit(volatile __local atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_min_explicit(volatile __global atomic_int *object, int operand, memory_order order);
-int __ovld atomic_fetch_min_explicit(volatile __local atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_min_explicit(volatile __global atomic_uint *object, uint operand, memory_order order);
-uint __ovld atomic_fetch_min_explicit(volatile __local atomic_uint *object, uint operand, memory_order order);
-int __ovld atomic_fetch_max_explicit(volatile __global atomic_int *object, int operand, memory_order order);
-int __ovld atomic_fetch_max_explicit(volatile __local atomic_int *object, int operand, memory_order order);
-uint __ovld atomic_fetch_max_explicit(volatile __global atomic_uint *object, uint operand, memory_order order);
-uint __ovld atomic_fetch_max_explicit(volatile __local atomic_uint *object, uint operand, memory_order order);
+int __ovld atomic_fetch_add_explicit(volatile __global atomic_int *, int, memory_order);
+int __ovld atomic_fetch_add_explicit(volatile __local atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_add_explicit(volatile __global atomic_uint *, uint, memory_order);
+uint __ovld atomic_fetch_add_explicit(volatile __local atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_sub_explicit(volatile __global atomic_int *, int, memory_order);
+int __ovld atomic_fetch_sub_explicit(volatile __local atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_sub_explicit(volatile __global atomic_uint *, uint, memory_order);
+uint __ovld atomic_fetch_sub_explicit(volatile __local atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_or_explicit(volatile __global atomic_int *, int, memory_order);
+int __ovld atomic_fetch_or_explicit(volatile __local atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_or_explicit(volatile __global atomic_uint *, uint, memory_order);
+uint __ovld atomic_fetch_or_explicit(volatile __local atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_xor_explicit(volatile __global atomic_int *, int, memory_order);
+int __ovld atomic_fetch_xor_explicit(volatile __local atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_xor_explicit(volatile __global atomic_uint *, uint, memory_order);
+uint __ovld atomic_fetch_xor_explicit(volatile __local atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_and_explicit(volatile __global atomic_int *, int, memory_order);
+int __ovld atomic_fetch_and_explicit(volatile __local atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_and_explicit(volatile __global atomic_uint *, uint, memory_order);
+uint __ovld atomic_fetch_and_explicit(volatile __local atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_min_explicit(volatile __global atomic_int *, int, memory_order);
+int __ovld atomic_fetch_min_explicit(volatile __local atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_min_explicit(volatile __global atomic_uint *, uint, memory_order);
+uint __ovld atomic_fetch_min_explicit(volatile __local atomic_uint *, uint, memory_order);
+int __ovld atomic_fetch_max_explicit(volatile __global atomic_int *, int, memory_order);
+int __ovld atomic_fetch_max_explicit(volatile __local atomic_int *, int, memory_order);
+uint __ovld atomic_fetch_max_explicit(volatile __global atomic_uint *, uint, memory_order);
+uint __ovld atomic_fetch_max_explicit(volatile __local atomic_uint *, uint, memory_order);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-long __ovld atomic_fetch_add_explicit(volatile __global atomic_long *object, long operand, memory_order order);
-long __ovld atomic_fetch_add_explicit(volatile __local atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_add_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order);
-ulong __ovld atomic_fetch_add_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order);
-uintptr_t __ovld atomic_fetch_add_explicit(volatile __global atomic_uintptr_t *object, ptrdiff_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_add_explicit(volatile __local atomic_uintptr_t *object, ptrdiff_t operand, memory_order order);
-long __ovld atomic_fetch_sub_explicit(volatile __global atomic_long *object, long operand, memory_order order);
-long __ovld atomic_fetch_sub_explicit(volatile __local atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_sub_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order);
-ulong __ovld atomic_fetch_sub_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order);
-uintptr_t __ovld atomic_fetch_sub_explicit(volatile __global atomic_uintptr_t *object, ptrdiff_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_sub_explicit(volatile __local atomic_uintptr_t *object, ptrdiff_t operand, memory_order order);
-long __ovld atomic_fetch_or_explicit(volatile __global atomic_long *object, long operand, memory_order order);
-long __ovld atomic_fetch_or_explicit(volatile __local atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_or_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order);
-ulong __ovld atomic_fetch_or_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order);
-uintptr_t __ovld atomic_fetch_or_explicit(volatile __global atomic_uintptr_t *object, intptr_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_or_explicit(volatile __local atomic_uintptr_t *object, intptr_t operand, memory_order order);
-intptr_t __ovld atomic_fetch_or_explicit(volatile __global atomic_intptr_t *object, uintptr_t operand, memory_order order);
-intptr_t __ovld atomic_fetch_or_explicit(volatile __local atomic_intptr_t *object, uintptr_t operand, memory_order order);
-long __ovld atomic_fetch_xor_explicit(volatile __global atomic_long *object, long operand, memory_order order);
-long __ovld atomic_fetch_xor_explicit(volatile __local atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_xor_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order);
-ulong __ovld atomic_fetch_xor_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order);
-uintptr_t __ovld atomic_fetch_xor_explicit(volatile __global atomic_uintptr_t *object, intptr_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_xor_explicit(volatile __local atomic_uintptr_t *object, intptr_t operand, memory_order order);
-intptr_t __ovld atomic_fetch_xor_explicit(volatile __global atomic_intptr_t *object, uintptr_t operand, memory_order order);
-intptr_t __ovld atomic_fetch_xor_explicit(volatile __local atomic_intptr_t *object, uintptr_t operand, memory_order order);
-long __ovld atomic_fetch_and_explicit(volatile __global atomic_long *object, long operand, memory_order order);
-long __ovld atomic_fetch_and_explicit(volatile __local atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_and_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order);
-ulong __ovld atomic_fetch_and_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order);
-uintptr_t __ovld atomic_fetch_and_explicit(volatile __global atomic_uintptr_t *object, intptr_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_and_explicit(volatile __local atomic_uintptr_t *object, intptr_t operand, memory_order order);
-intptr_t __ovld atomic_fetch_and_explicit(volatile __global atomic_intptr_t *object, uintptr_t operand, memory_order order);
-intptr_t __ovld atomic_fetch_and_explicit(volatile __local atomic_intptr_t *object, uintptr_t operand, memory_order order);
-long __ovld atomic_fetch_min_explicit(volatile __global atomic_long *object, long operand, memory_order order);
-long __ovld atomic_fetch_min_explicit(volatile __local atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_min_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order);
-ulong __ovld atomic_fetch_min_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order);
-uintptr_t __ovld atomic_fetch_min_explicit(volatile __global atomic_uintptr_t *object, intptr_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_min_explicit(volatile __local atomic_uintptr_t *object, intptr_t operand, memory_order order);
-intptr_t __ovld atomic_fetch_min_explicit(volatile __global atomic_intptr_t *object, uintptr_t operand, memory_order order);
-intptr_t __ovld atomic_fetch_min_explicit(volatile __local atomic_intptr_t *object, uintptr_t operand, memory_order order);
-long __ovld atomic_fetch_max_explicit(volatile __global atomic_long *object, long operand, memory_order order);
-long __ovld atomic_fetch_max_explicit(volatile __local atomic_long *object, long operand, memory_order order);
-ulong __ovld atomic_fetch_max_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order);
-ulong __ovld atomic_fetch_max_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order);
-uintptr_t __ovld atomic_fetch_add_explicit(volatile __global atomic_uintptr_t *object, ptrdiff_t operand, memory_order order);
-uintptr_t __ovld atomic_fetch_sub_explicit(volatile __local atomic_uintptr_t *object, ptrdiff_t operand, memory_order order);
+long __ovld atomic_fetch_add_explicit(volatile __global atomic_long *, long, memory_order);
+long __ovld atomic_fetch_add_explicit(volatile __local atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_add_explicit(volatile __global atomic_ulong *, ulong, memory_order);
+ulong __ovld atomic_fetch_add_explicit(volatile __local atomic_ulong *, ulong, memory_order);
+uintptr_t __ovld atomic_fetch_add_explicit(volatile __global atomic_uintptr_t *, ptrdiff_t, memory_order);
+uintptr_t __ovld atomic_fetch_add_explicit(volatile __local atomic_uintptr_t *, ptrdiff_t, memory_order);
+long __ovld atomic_fetch_sub_explicit(volatile __global atomic_long *, long, memory_order);
+long __ovld atomic_fetch_sub_explicit(volatile __local atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_sub_explicit(volatile __global atomic_ulong *, ulong, memory_order);
+ulong __ovld atomic_fetch_sub_explicit(volatile __local atomic_ulong *, ulong, memory_order);
+uintptr_t __ovld atomic_fetch_sub_explicit(volatile __global atomic_uintptr_t *, ptrdiff_t, memory_order);
+uintptr_t __ovld atomic_fetch_sub_explicit(volatile __local atomic_uintptr_t *, ptrdiff_t, memory_order);
+long __ovld atomic_fetch_or_explicit(volatile __global atomic_long *, long, memory_order);
+long __ovld atomic_fetch_or_explicit(volatile __local atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_or_explicit(volatile __global atomic_ulong *, ulong, memory_order);
+ulong __ovld atomic_fetch_or_explicit(volatile __local atomic_ulong *, ulong, memory_order);
+uintptr_t __ovld atomic_fetch_or_explicit(volatile __global atomic_uintptr_t *, intptr_t, memory_order);
+uintptr_t __ovld atomic_fetch_or_explicit(volatile __local atomic_uintptr_t *, intptr_t, memory_order);
+intptr_t __ovld atomic_fetch_or_explicit(volatile __global atomic_intptr_t *, uintptr_t, memory_order);
+intptr_t __ovld atomic_fetch_or_explicit(volatile __local atomic_intptr_t *, uintptr_t, memory_order);
+long __ovld atomic_fetch_xor_explicit(volatile __global atomic_long *, long, memory_order);
+long __ovld atomic_fetch_xor_explicit(volatile __local atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_xor_explicit(volatile __global atomic_ulong *, ulong, memory_order);
+ulong __ovld atomic_fetch_xor_explicit(volatile __local atomic_ulong *, ulong, memory_order);
+uintptr_t __ovld atomic_fetch_xor_explicit(volatile __global atomic_uintptr_t *, intptr_t, memory_order);
+uintptr_t __ovld atomic_fetch_xor_explicit(volatile __local atomic_uintptr_t *, intptr_t, memory_order);
+intptr_t __ovld atomic_fetch_xor_explicit(volatile __global atomic_intptr_t *, uintptr_t, memory_order);
+intptr_t __ovld atomic_fetch_xor_explicit(volatile __local atomic_intptr_t *, uintptr_t, memory_order);
+long __ovld atomic_fetch_and_explicit(volatile __global atomic_long *, long, memory_order);
+long __ovld atomic_fetch_and_explicit(volatile __local atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_and_explicit(volatile __global atomic_ulong *, ulong, memory_order);
+ulong __ovld atomic_fetch_and_explicit(volatile __local atomic_ulong *, ulong, memory_order);
+uintptr_t __ovld atomic_fetch_and_explicit(volatile __global atomic_uintptr_t *, intptr_t, memory_order);
+uintptr_t __ovld atomic_fetch_and_explicit(volatile __local atomic_uintptr_t *, intptr_t, memory_order);
+intptr_t __ovld atomic_fetch_and_explicit(volatile __global atomic_intptr_t *, uintptr_t, memory_order);
+intptr_t __ovld atomic_fetch_and_explicit(volatile __local atomic_intptr_t *, uintptr_t, memory_order);
+long __ovld atomic_fetch_min_explicit(volatile __global atomic_long *, long, memory_order);
+long __ovld atomic_fetch_min_explicit(volatile __local atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_min_explicit(volatile __global atomic_ulong *, ulong, memory_order);
+ulong __ovld atomic_fetch_min_explicit(volatile __local atomic_ulong *, ulong, memory_order);
+uintptr_t __ovld atomic_fetch_min_explicit(volatile __global atomic_uintptr_t *, intptr_t, memory_order);
+uintptr_t __ovld atomic_fetch_min_explicit(volatile __local atomic_uintptr_t *, intptr_t, memory_order);
+intptr_t __ovld atomic_fetch_min_explicit(volatile __global atomic_intptr_t *, uintptr_t, memory_order);
+intptr_t __ovld atomic_fetch_min_explicit(volatile __local atomic_intptr_t *, uintptr_t, memory_order);
+long __ovld atomic_fetch_max_explicit(volatile __global atomic_long *, long, memory_order);
+long __ovld atomic_fetch_max_explicit(volatile __local atomic_long *, long, memory_order);
+ulong __ovld atomic_fetch_max_explicit(volatile __global atomic_ulong *, ulong, memory_order);
+ulong __ovld atomic_fetch_max_explicit(volatile __local atomic_ulong *, ulong, memory_order);
+uintptr_t __ovld atomic_fetch_max_explicit(volatile __global atomic_uintptr_t *, uintptr_t, memory_order);
+uintptr_t __ovld atomic_fetch_max_explicit(volatile __local atomic_uintptr_t *, uintptr_t, memory_order);
#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
#endif
#if defined(__opencl_c_generic_address_space)
-int __ovld atomic_fetch_add_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_add_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_sub_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_sub_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_or_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_or_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_xor_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_xor_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_and_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_and_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_min_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_max_explicit(volatile atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *object, uint operand, memory_order order, memory_scope scope);
+int __ovld atomic_fetch_add_explicit(volatile atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_add_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_sub_explicit(volatile atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_sub_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_or_explicit(volatile atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_or_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_xor_explicit(volatile atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_xor_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_and_explicit(volatile atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_and_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_min_explicit(volatile atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_min_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_max_explicit(volatile atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_max_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-long __ovld atomic_fetch_add_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_add_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_sub_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_sub_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_or_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_or_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_xor_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_xor_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_and_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_and_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_min_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_max_explicit(volatile atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_add_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_sub_explicit(volatile atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
+long __ovld atomic_fetch_add_explicit(volatile atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_add_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);
+long __ovld atomic_fetch_sub_explicit(volatile atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_sub_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);
+long __ovld atomic_fetch_or_explicit(volatile atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_or_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);
+long __ovld atomic_fetch_xor_explicit(volatile atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_xor_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);
+long __ovld atomic_fetch_and_explicit(volatile atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_and_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);
+long __ovld atomic_fetch_min_explicit(volatile atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_min_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);
+long __ovld atomic_fetch_max_explicit(volatile atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_max_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_add_explicit(volatile atomic_uintptr_t *, ptrdiff_t, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_sub_explicit(volatile atomic_uintptr_t *, ptrdiff_t, memory_order, memory_scope);
#endif
#endif //defined(__opencl_c_generic_address_space)
#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-int __ovld atomic_fetch_add_explicit(volatile __global atomic_int *object, int operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_add_explicit(volatile __local atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_add_explicit(volatile __global atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_add_explicit(volatile __local atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_sub_explicit(volatile __global atomic_int *object, int operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_sub_explicit(volatile __local atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_sub_explicit(volatile __global atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_sub_explicit(volatile __local atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_or_explicit(volatile __global atomic_int *object, int operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_or_explicit(volatile __local atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_or_explicit(volatile __global atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_or_explicit(volatile __local atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_xor_explicit(volatile __global atomic_int *object, int operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_xor_explicit(volatile __local atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_xor_explicit(volatile __global atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_xor_explicit(volatile __local atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_and_explicit(volatile __global atomic_int *object, int operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_and_explicit(volatile __local atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_and_explicit(volatile __global atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_and_explicit(volatile __local atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_min_explicit(volatile __global atomic_int *object, int operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_min_explicit(volatile __local atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_min_explicit(volatile __global atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_min_explicit(volatile __local atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_max_explicit(volatile __global atomic_int *object, int operand, memory_order order, memory_scope scope);
-int __ovld atomic_fetch_max_explicit(volatile __local atomic_int *object, int operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_max_explicit(volatile __global atomic_uint *object, uint operand, memory_order order, memory_scope scope);
-uint __ovld atomic_fetch_max_explicit(volatile __local atomic_uint *object, uint operand, memory_order order, memory_scope scope);
+int __ovld atomic_fetch_add_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);
+int __ovld atomic_fetch_add_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_add_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);
+uint __ovld atomic_fetch_add_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_sub_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);
+int __ovld atomic_fetch_sub_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_sub_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);
+uint __ovld atomic_fetch_sub_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_or_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);
+int __ovld atomic_fetch_or_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_or_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);
+uint __ovld atomic_fetch_or_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_xor_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);
+int __ovld atomic_fetch_xor_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_xor_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);
+uint __ovld atomic_fetch_xor_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_and_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);
+int __ovld atomic_fetch_and_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_and_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);
+uint __ovld atomic_fetch_and_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_min_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);
+int __ovld atomic_fetch_min_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_min_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);
+uint __ovld atomic_fetch_min_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);
+int __ovld atomic_fetch_max_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);
+int __ovld atomic_fetch_max_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_fetch_max_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);
+uint __ovld atomic_fetch_max_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
-long __ovld atomic_fetch_add_explicit(volatile __global atomic_long *object, long operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_add_explicit(volatile __local atomic_long *object, long operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_add_explicit(volatile __global atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_add_explicit(volatile __local atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_add_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_add_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_sub_explicit(volatile __global atomic_long *object, long operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_sub_explicit(volatile __local atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_sub_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_sub_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_sub_explicit(volatile __global atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_sub_explicit(volatile __local atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_or_explicit(volatile __global atomic_long *object, long operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_or_explicit(volatile __local atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_or_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_or_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_or_explicit(volatile __global atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_or_explicit(volatile __local atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
-intptr_t __ovld atomic_fetch_or_explicit(volatile __global atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
-intptr_t __ovld atomic_fetch_or_explicit(volatile __local atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_xor_explicit(volatile __global atomic_long *object, long operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_xor_explicit(volatile __local atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_xor_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_xor_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_xor_explicit(volatile __global atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_xor_explicit(volatile __local atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
-intptr_t __ovld atomic_fetch_xor_explicit(volatile __global atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
-intptr_t __ovld atomic_fetch_xor_explicit(volatile __local atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_and_explicit(volatile __global atomic_long *object, long operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_and_explicit(volatile __local atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_and_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_and_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_and_explicit(volatile __global atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_and_explicit(volatile __local atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
-intptr_t __ovld atomic_fetch_and_explicit(volatile __global atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
-intptr_t __ovld atomic_fetch_and_explicit(volatile __local atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_min_explicit(volatile __global atomic_long *object, long operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_min_explicit(volatile __local atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_min_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_min_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_min_explicit(volatile __global atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_min_explicit(volatile __local atomic_uintptr_t *object, intptr_t operand, memory_order order, memory_scope scope);
-intptr_t __ovld atomic_fetch_min_explicit(volatile __global atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
-intptr_t __ovld atomic_fetch_min_explicit(volatile __local atomic_intptr_t *object, uintptr_t operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_max_explicit(volatile __global atomic_long *object, long operand, memory_order order, memory_scope scope);
-long __ovld atomic_fetch_max_explicit(volatile __local atomic_long *object, long operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_max_explicit(volatile __global atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-ulong __ovld atomic_fetch_max_explicit(volatile __local atomic_ulong *object, ulong operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_add_explicit(volatile __global atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
-uintptr_t __ovld atomic_fetch_sub_explicit(volatile __local atomic_uintptr_t *object, ptrdiff_t operand, memory_order order, memory_scope scope);
+long __ovld atomic_fetch_add_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);
+long __ovld atomic_fetch_add_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_add_explicit(volatile __global atomic_uintptr_t *, ptrdiff_t, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_add_explicit(volatile __local atomic_uintptr_t *, ptrdiff_t, memory_order, memory_scope);
+ulong __ovld atomic_fetch_add_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);
+ulong __ovld atomic_fetch_add_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);
+long __ovld atomic_fetch_sub_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);
+long __ovld atomic_fetch_sub_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_sub_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);
+ulong __ovld atomic_fetch_sub_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_sub_explicit(volatile __global atomic_uintptr_t *, ptrdiff_t, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_sub_explicit(volatile __local atomic_uintptr_t *, ptrdiff_t, memory_order, memory_scope);
+long __ovld atomic_fetch_or_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);
+long __ovld atomic_fetch_or_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_or_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);
+ulong __ovld atomic_fetch_or_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_or_explicit(volatile __global atomic_uintptr_t *, intptr_t, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_or_explicit(volatile __local atomic_uintptr_t *, intptr_t, memory_order, memory_scope);
+intptr_t __ovld atomic_fetch_or_explicit(volatile __global atomic_intptr_t *, uintptr_t, memory_order, memory_scope);
+intptr_t __ovld atomic_fetch_or_explicit(volatile __local atomic_intptr_t *, uintptr_t, memory_order, memory_scope);
+long __ovld atomic_fetch_xor_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);
+long __ovld atomic_fetch_xor_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_xor_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);
+ulong __ovld atomic_fetch_xor_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_xor_explicit(volatile __global atomic_uintptr_t *, intptr_t, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_xor_explicit(volatile __local atomic_uintptr_t *, intptr_t, memory_order, memory_scope);
+intptr_t __ovld atomic_fetch_xor_explicit(volatile __global atomic_intptr_t *, uintptr_t, memory_order, memory_scope);
+intptr_t __ovld atomic_fetch_xor_explicit(volatile __local atomic_intptr_t *, uintptr_t, memory_order, memory_scope);
+long __ovld atomic_fetch_and_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);
+long __ovld atomic_fetch_and_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_and_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);
+ulong __ovld atomic_fetch_and_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_and_explicit(volatile __global atomic_uintptr_t *, intptr_t, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_and_explicit(volatile __local atomic_uintptr_t *, intptr_t, memory_order, memory_scope);
+intptr_t __ovld atomic_fetch_and_explicit(volatile __global atomic_intptr_t *, uintptr_t, memory_order, memory_scope);
+intptr_t __ovld atomic_fetch_and_explicit(volatile __local atomic_intptr_t *, uintptr_t, memory_order, memory_scope);
+long __ovld atomic_fetch_min_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);
+long __ovld atomic_fetch_min_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_min_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);
+ulong __ovld atomic_fetch_min_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_min_explicit(volatile __global atomic_uintptr_t *, intptr_t, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_min_explicit(volatile __local atomic_uintptr_t *, intptr_t, memory_order, memory_scope);
+intptr_t __ovld atomic_fetch_min_explicit(volatile __global atomic_intptr_t *, uintptr_t, memory_order, memory_scope);
+intptr_t __ovld atomic_fetch_min_explicit(volatile __local atomic_intptr_t *, uintptr_t, memory_order, memory_scope);
+long __ovld atomic_fetch_max_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);
+long __ovld atomic_fetch_max_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_fetch_max_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);
+ulong __ovld atomic_fetch_max_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_max_explicit(volatile __global atomic_uintptr_t *, uintptr_t, memory_order, memory_scope);
+uintptr_t __ovld atomic_fetch_max_explicit(volatile __local atomic_uintptr_t *, uintptr_t, memory_order, memory_scope);
#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
@@ -13670,373 +13661,308 @@ uintptr_t __ovld atomic_fetch_sub_explicit(volatile __local atomic_uintptr_t *ob
#if defined(cl_ext_float_atomics)
#if defined(__opencl_c_ext_fp16_global_atomic_load_store)
-void __ovld atomic_store(volatile __global atomic_half *object, half operand);
-void __ovld atomic_store_explicit(volatile __global atomic_half *object,
- half operand, memory_order order);
-void __ovld atomic_store_explicit(volatile __global atomic_half *object,
- half operand, memory_order order,
- memory_scope scope);
-half __ovld atomic_load(volatile __global atomic_half *object);
-half __ovld atomic_load_explicit(volatile __global atomic_half *object,
- memory_order order);
-half __ovld atomic_load_explicit(volatile __global atomic_half *object,
- memory_order order, memory_scope scope);
-half __ovld atomic_exchange(volatile __global atomic_half *object,
- half operand);
-half __ovld atomic_exchange_explicit(volatile __global atomic_half *object,
- half operand, memory_order order);
-half __ovld atomic_exchange_explicit(volatile __global atomic_half *object,
- half operand, memory_order order,
- memory_scope scope);
+void __ovld atomic_store(volatile __global atomic_half *, half);
+void __ovld atomic_store_explicit(volatile __global atomic_half *,
+ half, memory_order);
+void __ovld atomic_store_explicit(volatile __global atomic_half *,
+ half, memory_order, memory_scope);
+half __ovld atomic_load(volatile __global atomic_half *);
+half __ovld atomic_load_explicit(volatile __global atomic_half *,
+ memory_order);
+half __ovld atomic_load_explicit(volatile __global atomic_half *,
+ memory_order, memory_scope);
+half __ovld atomic_exchange(volatile __global atomic_half *, half);
+half __ovld atomic_exchange_explicit(volatile __global atomic_half *,
+ half, memory_order);
+half __ovld atomic_exchange_explicit(volatile __global atomic_half *,
+ half, memory_order, memory_scope);
#endif // defined(__opencl_c_ext_fp16_global_atomic_load_store)
#if defined(__opencl_c_ext_fp16_local_atomic_load_store)
-void __ovld atomic_store(volatile __local atomic_half *object, half operand);
-void __ovld atomic_store_explicit(volatile __local atomic_half *object,
- half operand, memory_order order);
-void __ovld atomic_store_explicit(volatile __local atomic_half *object,
- half operand, memory_order order,
- memory_scope scope);
-half __ovld atomic_load(volatile __local atomic_half *object);
-half __ovld atomic_load_explicit(volatile __local atomic_half *object,
- memory_order order);
-half __ovld atomic_load_explicit(volatile __local atomic_half *object,
- memory_order order, memory_scope scope);
-half __ovld atomic_exchange(volatile __local atomic_half *object, half operand);
-half __ovld atomic_exchange_explicit(volatile __local atomic_half *object,
- half operand, memory_order order);
-half __ovld atomic_exchange_explicit(volatile __local atomic_half *object,
- half operand, memory_order order,
- memory_scope scope);
+void __ovld atomic_store(volatile __local atomic_half *, half);
+void __ovld atomic_store_explicit(volatile __local atomic_half *,
+ half, memory_order);
+void __ovld atomic_store_explicit(volatile __local atomic_half *,
+ half, memory_order, memory_scope);
+half __ovld atomic_load(volatile __local atomic_half *);
+half __ovld atomic_load_explicit(volatile __local atomic_half *,
+ memory_order);
+half __ovld atomic_load_explicit(volatile __local atomic_half *,
+ memory_order, memory_scope);
+half __ovld atomic_exchange(volatile __local atomic_half *, half);
+half __ovld atomic_exchange_explicit(volatile __local atomic_half *,
+ half, memory_order);
+half __ovld atomic_exchange_explicit(volatile __local atomic_half *,
+ half, memory_order, memory_scope);
#endif // defined(__opencl_c_ext_fp16_local_atomic_load_store)
#if defined(__opencl_c_ext_fp16_global_atomic_load_store) && \
defined(__opencl_c_ext_fp16_local_atomic_load_store)
-void __ovld atomic_store(volatile atomic_half *object, half operand);
-void __ovld atomic_store_explicit(volatile atomic_half *object, half operand,
- memory_order order);
-void __ovld atomic_store_explicit(volatile atomic_half *object, half operand,
- memory_order order, memory_scope scope);
-half __ovld atomic_load(volatile atomic_half *object);
-half __ovld atomic_load_explicit(volatile atomic_half *object,
- memory_order order);
-half __ovld atomic_load_explicit(volatile atomic_half *object,
- memory_order order, memory_scope scope);
-half __ovld atomic_exchange(volatile atomic_half *object, half operand);
-half __ovld atomic_exchange_explicit(volatile atomic_half *object, half operand,
- memory_order order);
-half __ovld atomic_exchange_explicit(volatile atomic_half *object, half operand,
- memory_order order, memory_scope scope);
+void __ovld atomic_store(volatile atomic_half *, half);
+void __ovld atomic_store_explicit(volatile atomic_half *, half,
+ memory_order);
+void __ovld atomic_store_explicit(volatile atomic_half *, half,
+ memory_order, memory_scope);
+half __ovld atomic_load(volatile atomic_half *);
+half __ovld atomic_load_explicit(volatile atomic_half *,
+ memory_order);
+half __ovld atomic_load_explicit(volatile atomic_half *,
+ memory_order, memory_scope);
+half __ovld atomic_exchange(volatile atomic_half *, half);
+half __ovld atomic_exchange_explicit(volatile atomic_half *, half,
+ memory_order);
+half __ovld atomic_exchange_explicit(volatile atomic_half *, half,
+ memory_order, memory_scope);
#endif // defined(__opencl_c_ext_fp16_global_atomic_load_store) &&
// defined(__opencl_c_ext_fp16_local_atomic_load_store)
#if defined(__opencl_c_ext_fp16_global_atomic_min_max)
-half __ovld atomic_fetch_min(volatile __global atomic_half *object,
- half operand);
-half __ovld atomic_fetch_max(volatile __global atomic_half *object,
- half operand);
-half __ovld atomic_fetch_min_explicit(volatile __global atomic_half *object,
- half operand, memory_order order);
-half __ovld atomic_fetch_max_explicit(volatile __global atomic_half *object,
- half operand, memory_order order);
-half __ovld atomic_fetch_min_explicit(volatile __global atomic_half *object,
- half operand, memory_order order,
- memory_scope scope);
-half __ovld atomic_fetch_max_explicit(volatile __global atomic_half *object,
- half operand, memory_order order,
- memory_scope scope);
+half __ovld atomic_fetch_min(volatile __global atomic_half *, half);
+half __ovld atomic_fetch_max(volatile __global atomic_half *, half);
+half __ovld atomic_fetch_min_explicit(volatile __global atomic_half *,
+ half, memory_order);
+half __ovld atomic_fetch_max_explicit(volatile __global atomic_half *,
+ half, memory_order);
+half __ovld atomic_fetch_min_explicit(volatile __global atomic_half *,
+ half, memory_order, memory_scope);
+half __ovld atomic_fetch_max_explicit(volatile __global atomic_half *,
+ half, memory_order, memory_scope);
#endif // defined(__opencl_c_ext_fp16_global_atomic_min_max)
#if defined(__opencl_c_ext_fp16_local_atomic_min_max)
-half __ovld atomic_fetch_min(volatile __local atomic_half *object,
- half operand);
-half __ovld atomic_fetch_max(volatile __local atomic_half *object,
- half operand);
-half __ovld atomic_fetch_min_explicit(volatile __local atomic_half *object,
- half operand, memory_order order);
-half __ovld atomic_fetch_max_explicit(volatile __local atomic_half *object,
- half operand, memory_order order);
-half __ovld atomic_fetch_min_explicit(volatile __local atomic_half *object,
- half operand, memory_order order,
- memory_scope scope);
-half __ovld atomic_fetch_max_explicit(volatile __local atomic_half *object,
- half operand, memory_order order,
- memory_scope scope);
+half __ovld atomic_fetch_min(volatile __local atomic_half *, half);
+half __ovld atomic_fetch_max(volatile __local atomic_half *, half);
+half __ovld atomic_fetch_min_explicit(volatile __local atomic_half *,
+ half, memory_order);
+half __ovld atomic_fetch_max_explicit(volatile __local atomic_half *,
+ half, memory_order);
+half __ovld atomic_fetch_min_explicit(volatile __local atomic_half *,
+ half, memory_order, memory_scope);
+half __ovld atomic_fetch_max_explicit(volatile __local atomic_half *,
+ half, memory_order, memory_scope);
#endif // defined(__opencl_c_ext_fp16_local_atomic_min_max)
#if defined(__opencl_c_ext_fp16_global_atomic_min_max) && \
defined(__opencl_c_ext_fp16_local_atomic_min_max)
-half __ovld atomic_fetch_min(volatile atomic_half *object, half operand);
-half __ovld atomic_fetch_max(volatile atomic_half *object, half operand);
-half __ovld atomic_fetch_min_explicit(volatile atomic_half *object,
- half operand, memory_order order);
-half __ovld atomic_fetch_max_explicit(volatile atomic_half *object,
- half operand, memory_order order);
-half __ovld atomic_fetch_min_explicit(volatile atomic_half *object,
- half operand, memory_order order,
- memory_scope scope);
-half __ovld atomic_fetch_max_explicit(volatile atomic_half *object,
- half operand, memory_order order,
- memory_scope scope);
+half __ovld atomic_fetch_min(volatile atomic_half *, half);
+half __ovld atomic_fetch_max(volatile atomic_half *, half);
+half __ovld atomic_fetch_min_explicit(volatile atomic_half *,
+ half, memory_order);
+half __ovld atomic_fetch_max_explicit(volatile atomic_half *,
+ half, memory_order);
+half __ovld atomic_fetch_min_explicit(volatile atomic_half *,
+ half, memory_order, memory_scope);
+half __ovld atomic_fetch_max_explicit(volatile atomic_half *,
+ half, memory_order, memory_scope);
#endif // defined(__opencl_c_ext_fp16_global_atomic_min_max) && \
defined(__opencl_c_ext_fp16_local_atomic_min_max)
#if defined(__opencl_c_ext_fp32_global_atomic_min_max)
-float __ovld atomic_fetch_min(volatile __global atomic_float *object,
- float operand);
-float __ovld atomic_fetch_max(volatile __global atomic_float *object,
- float operand);
-float __ovld atomic_fetch_min_explicit(volatile __global atomic_float *object,
- float operand, memory_order order);
-float __ovld atomic_fetch_max_explicit(volatile __global atomic_float *object,
- float operand, memory_order order);
-float __ovld atomic_fetch_min_explicit(volatile __global atomic_float *object,
- float operand, memory_order order,
- memory_scope scope);
-float __ovld atomic_fetch_max_explicit(volatile __global atomic_float *object,
- float operand, memory_order order,
- memory_scope scope);
+float __ovld atomic_fetch_min(volatile __global atomic_float *, float);
+float __ovld atomic_fetch_max(volatile __global atomic_float *, float);
+float __ovld atomic_fetch_min_explicit(volatile __global atomic_float *,
+ float, memory_order);
+float __ovld atomic_fetch_max_explicit(volatile __global atomic_float *,
+ float, memory_order);
+float __ovld atomic_fetch_min_explicit(volatile __global atomic_float *,
+ float, memory_order, memory_scope);
+float __ovld atomic_fetch_max_explicit(volatile __global atomic_float *,
+ float, memory_order, memory_scope);
#endif // defined(__opencl_c_ext_fp32_global_atomic_min_max)
#if defined(__opencl_c_ext_fp32_local_atomic_min_max)
-float __ovld atomic_fetch_min(volatile __local atomic_float *object,
- float operand);
-float __ovld atomic_fetch_max(volatile __local atomic_float *object,
- float operand);
-float __ovld atomic_fetch_min_explicit(volatile __local atomic_float *object,
- float operand, memory_order order);
-float __ovld atomic_fetch_max_explicit(volatile __local atomic_float *object,
- float operand, memory_order order);
-float __ovld atomic_fetch_min_explicit(volatile __local atomic_float *object,
- float operand, memory_order order,
- memory_scope scope);
-float __ovld atomic_fetch_max_explicit(volatile __local atomic_float *object,
- float operand, memory_order order,
- memory_scope scope);
+float __ovld atomic_fetch_min(volatile __local atomic_float *, float);
+float __ovld atomic_fetch_max(volatile __local atomic_float *, float);
+float __ovld atomic_fetch_min_explicit(volatile __local atomic_float *,
+ float, memory_order);
+float __ovld atomic_fetch_max_explicit(volatile __local atomic_float *,
+ float, memory_order);
+float __ovld atomic_fetch_min_explicit(volatile __local atomic_float *,
+ float, memory_order, memory_scope);
+float __ovld atomic_fetch_max_explicit(volatile __local atomic_float *,
+ float, memory_order, memory_scope);
#endif // defined(__opencl_c_ext_fp32_local_atomic_min_max)
#if defined(__opencl_c_ext_fp32_global_atomic_min_max) && \
defined(__opencl_c_ext_fp32_local_atomic_min_max)
-float __ovld atomic_fetch_min(volatile atomic_float *object, float operand);
-float __ovld atomic_fetch_max(volatile atomic_float *object, float operand);
-float __ovld atomic_fetch_min_explicit(volatile atomic_float *object,
- float operand, memory_order order);
-float __ovld atomic_fetch_max_explicit(volatile atomic_float *object,
- float operand, memory_order order);
-float __ovld atomic_fetch_min_explicit(volatile atomic_float *object,
- float operand, memory_order order,
- memory_scope scope);
-float __ovld atomic_fetch_max_explicit(volatile atomic_float *object,
- float operand, memory_order order,
- memory_scope scope);
+float __ovld atomic_fetch_min(volatile atomic_float *, float);
+float __ovld atomic_fetch_max(volatile atomic_float *, float);
+float __ovld atomic_fetch_min_explicit(volatile atomic_float *,
+ float, memory_order);
+float __ovld atomic_fetch_max_explicit(volatile atomic_float *,
+ float, memory_order);
+float __ovld atomic_fetch_min_explicit(volatile atomic_float *,
+ float, memory_order, memory_scope);
+float __ovld atomic_fetch_max_explicit(volatile atomic_float *,
+ float, memory_order, memory_scope);
#endif // defined(__opencl_c_ext_fp32_global_atomic_min_max) && \
defined(__opencl_c_ext_fp32_local_atomic_min_max)
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#if defined(__opencl_c_ext_fp64_global_atomic_min_max)
-double __ovld atomic_fetch_min(volatile __global atomic_double *object,
- double operand);
-double __ovld atomic_fetch_max(volatile __global atomic_double *object,
- double operand);
-double __ovld atomic_fetch_min_explicit(volatile __global atomic_double *object,
- double operand, memory_order order);
-double __ovld atomic_fetch_max_explicit(volatile __global atomic_double *object,
- double operand, memory_order order);
-double __ovld atomic_fetch_min_explicit(volatile __global atomic_double *object,
- double operand, memory_order order,
- memory_scope scope);
-double __ovld atomic_fetch_max_explicit(volatile __global atomic_double *object,
- double operand, memory_order order,
- memory_scope scope);
+double __ovld atomic_fetch_min(volatile __global atomic_double *, double);
+double __ovld atomic_fetch_max(volatile __global atomic_double *, double);
+double __ovld atomic_fetch_min_explicit(volatile __global atomic_double *,
+ double, memory_order);
+double __ovld atomic_fetch_max_explicit(volatile __global atomic_double *,
+ double, memory_order);
+double __ovld atomic_fetch_min_explicit(volatile __global atomic_double *,
+ double, memory_order, memory_scope);
+double __ovld atomic_fetch_max_explicit(volatile __global atomic_double *,
+ double, memory_order, memory_scope);
#endif // defined(__opencl_c_ext_fp64_global_atomic_min_max)
#if defined(__opencl_c_ext_fp64_local_atomic_min_max)
-double __ovld atomic_fetch_min(volatile __local atomic_double *object,
- double operand);
-double __ovld atomic_fetch_max(volatile __local atomic_double *object,
- double operand);
-double __ovld atomic_fetch_min_explicit(volatile __local atomic_double *object,
- double operand, memory_order order);
-double __ovld atomic_fetch_max_explicit(volatile __local atomic_double *object,
- double operand, memory_order order);
-double __ovld atomic_fetch_min_explicit(volatile __local atomic_double *object,
- double operand, memory_order order,
- memory_scope scope);
-double __ovld atomic_fetch_max_explicit(volatile __local atomic_double *object,
- double operand, memory_order order,
- memory_scope scope);
+double __ovld atomic_fetch_min(volatile __local atomic_double *, double);
+double __ovld atomic_fetch_max(volatile __local atomic_double *, double);
+double __ovld atomic_fetch_min_explicit(volatile __local atomic_double *,
+ double, memory_order);
+double __ovld atomic_fetch_max_explicit(volatile __local atomic_double *,
+ double, memory_order);
+double __ovld atomic_fetch_min_explicit(volatile __local atomic_double *,
+ double, memory_order, memory_scope);
+double __ovld atomic_fetch_max_explicit(volatile __local atomic_double *,
+ double, memory_order, memory_scope);
#endif // defined(__opencl_c_ext_fp64_local_atomic_min_max)
#if defined(__opencl_c_ext_fp64_global_atomic_min_max) && \
defined(__opencl_c_ext_fp64_local_atomic_min_max)
-double __ovld atomic_fetch_min(volatile atomic_double *object, double operand);
-double __ovld atomic_fetch_max(volatile atomic_double *object, double operand);
-double __ovld atomic_fetch_min_explicit(volatile atomic_double *object,
- double operand, memory_order order);
-double __ovld atomic_fetch_max_explicit(volatile atomic_double *object,
- double operand, memory_order order);
-double __ovld atomic_fetch_min_explicit(volatile atomic_double *object,
- double operand, memory_order order,
- memory_scope scope);
-double __ovld atomic_fetch_max_explicit(volatile atomic_double *object,
- double operand, memory_order order,
- memory_scope scope);
+double __ovld atomic_fetch_min(volatile atomic_double *, double);
+double __ovld atomic_fetch_max(volatile atomic_double *, double);
+double __ovld atomic_fetch_min_explicit(volatile atomic_double *,
+ double, memory_order);
+double __ovld atomic_fetch_max_explicit(volatile atomic_double *,
+ double, memory_order);
+double __ovld atomic_fetch_min_explicit(volatile atomic_double *,
+ double, memory_order, memory_scope);
+double __ovld atomic_fetch_max_explicit(volatile atomic_double *,
+ double, memory_order, memory_scope);
#endif // defined(__opencl_c_ext_fp64_global_atomic_min_max) && \
defined(__opencl_c_ext_fp64_local_atomic_min_max)
#endif // defined(cl_khr_int64_base_atomics) && \
defined(cl_khr_int64_extended_atomics)
#if defined(__opencl_c_ext_fp16_global_atomic_add)
-half __ovld atomic_fetch_add(volatile __global atomic_half *object,
- half operand);
-half __ovld atomic_fetch_sub(volatile __global atomic_half *object,
- half operand);
-half __ovld atomic_fetch_add_explicit(volatile __global atomic_half *object,
- half operand, memory_order order);
-half __ovld atomic_fetch_sub_explicit(volatile __global atomic_half *object,
- half operand, memory_order order);
-half __ovld atomic_fetch_add_explicit(volatile __global atomic_half *object,
- half operand, memory_order order,
- memory_scope scope);
-half __ovld atomic_fetch_sub_explicit(volatile __global atomic_half *object,
- half operand, memory_order order,
- memory_scope scope);
+half __ovld atomic_fetch_add(volatile __global atomic_half *, half);
+half __ovld atomic_fetch_sub(volatile __global atomic_half *, half);
+half __ovld atomic_fetch_add_explicit(volatile __global atomic_half *,
+ half, memory_order);
+half __ovld atomic_fetch_sub_explicit(volatile __global atomic_half *,
+ half, memory_order);
+half __ovld atomic_fetch_add_explicit(volatile __global atomic_half *,
+ half, memory_order, memory_scope);
+half __ovld atomic_fetch_sub_explicit(volatile __global atomic_half *,
+ half, memory_order, memory_scope);
#endif // defined(__opencl_c_ext_fp16_global_atomic_add)
#if defined(__opencl_c_ext_fp16_local_atomic_add)
-half __ovld atomic_fetch_add(volatile __local atomic_half *object,
- half operand);
-half __ovld atomic_fetch_sub(volatile __local atomic_half *object,
- half operand);
-half __ovld atomic_fetch_add_explicit(volatile __local atomic_half *object,
- half operand, memory_order order);
-half __ovld atomic_fetch_sub_explicit(volatile __local atomic_half *object,
- half operand, memory_order order);
-half __ovld atomic_fetch_add_explicit(volatile __local atomic_half *object,
- half operand, memory_order order,
- memory_scope scope);
-half __ovld atomic_fetch_sub_explicit(volatile __local atomic_half *object,
- half operand, memory_order order,
- memory_scope scope);
+half __ovld atomic_fetch_add(volatile __local atomic_half *, half);
+half __ovld atomic_fetch_sub(volatile __local atomic_half *, half);
+half __ovld atomic_fetch_add_explicit(volatile __local atomic_half *,
+ half, memory_order);
+half __ovld atomic_fetch_sub_explicit(volatile __local atomic_half *,
+ half, memory_order);
+half __ovld atomic_fetch_add_explicit(volatile __local atomic_half *,
+ half, memory_order, memory_scope);
+half __ovld atomic_fetch_sub_explicit(volatile __local atomic_half *,
+ half, memory_order, memory_scope);
#endif // defined(__opencl_c_ext_fp16_local_atomic_add)
#if defined(__opencl_c_ext_fp16_global_atomic_add) && \
defined(__opencl_c_ext_fp16_local_atomic_add)
-half __ovld atomic_fetch_add(volatile atomic_half *object, half operand);
-half __ovld atomic_fetch_sub(volatile atomic_half *object, half operand);
-half __ovld atomic_fetch_add_explicit(volatile atomic_half *object,
- half operand, memory_order order);
-half __ovld atomic_fetch_sub_explicit(volatile atomic_half *object,
- half operand, memory_order order);
-half __ovld atomic_fetch_add_explicit(volatile atomic_half *object,
- half operand, memory_order order,
- memory_scope scope);
-half __ovld atomic_fetch_sub_explicit(volatile atomic_half *object,
- half operand, memory_order order,
- memory_scope scope);
+half __ovld atomic_fetch_add(volatile atomic_half *, half);
+half __ovld atomic_fetch_sub(volatile atomic_half *, half);
+half __ovld atomic_fetch_add_explicit(volatile atomic_half *,
+ half, memory_order);
+half __ovld atomic_fetch_sub_explicit(volatile atomic_half *,
+ half, memory_order);
+half __ovld atomic_fetch_add_explicit(volatile atomic_half *,
+ half, memory_order, memory_scope);
+half __ovld atomic_fetch_sub_explicit(volatile atomic_half *,
+ half, memory_order, memory_scope);
#endif // defined(__opencl_c_ext_fp16_global_atomic_add) && \
defined(__opencl_c_ext_fp16_local_atomic_add)
#if defined(__opencl_c_ext_fp32_global_atomic_add)
-float __ovld atomic_fetch_add(volatile __global atomic_float *object,
- float operand);
-float __ovld atomic_fetch_sub(volatile __global atomic_float *object,
- float operand);
-float __ovld atomic_fetch_add_explicit(volatile __global atomic_float *object,
- float operand, memory_order order);
-float __ovld atomic_fetch_sub_explicit(volatile __global atomic_float *object,
- float operand, memory_order order);
-float __ovld atomic_fetch_add_explicit(volatile __global atomic_float *object,
- float operand, memory_order order,
- memory_scope scope);
-float __ovld atomic_fetch_sub_explicit(volatile __global atomic_float *object,
- float operand, memory_order order,
- memory_scope scope);
+float __ovld atomic_fetch_add(volatile __global atomic_float *, float);
+float __ovld atomic_fetch_sub(volatile __global atomic_float *, float);
+float __ovld atomic_fetch_add_explicit(volatile __global atomic_float *,
+ float, memory_order);
+float __ovld atomic_fetch_sub_explicit(volatile __global atomic_float *,
+ float, memory_order);
+float __ovld atomic_fetch_add_explicit(volatile __global atomic_float *,
+ float, memory_order, memory_scope);
+float __ovld atomic_fetch_sub_explicit(volatile __global atomic_float *,
+ float, memory_order, memory_scope);
#endif // defined(__opencl_c_ext_fp32_global_atomic_add)
#if defined(__opencl_c_ext_fp32_local_atomic_add)
-float __ovld atomic_fetch_add(volatile __local atomic_float *object,
- float operand);
-float __ovld atomic_fetch_sub(volatile __local atomic_float *object,
- float operand);
-float __ovld atomic_fetch_add_explicit(volatile __local atomic_float *object,
- float operand, memory_order order);
-float __ovld atomic_fetch_sub_explicit(volatile __local atomic_float *object,
- float operand, memory_order order);
-float __ovld atomic_fetch_add_explicit(volatile __local atomic_float *object,
- float operand, memory_order order,
- memory_scope scope);
-float __ovld atomic_fetch_sub_explicit(volatile __local atomic_float *object,
- float operand, memory_order order,
- memory_scope scope);
+float __ovld atomic_fetch_add(volatile __local atomic_float *, float);
+float __ovld atomic_fetch_sub(volatile __local atomic_float *, float);
+float __ovld atomic_fetch_add_explicit(volatile __local atomic_float *,
+ float, memory_order);
+float __ovld atomic_fetch_sub_explicit(volatile __local atomic_float *,
+ float, memory_order);
+float __ovld atomic_fetch_add_explicit(volatile __local atomic_float *,
+ float, memory_order, memory_scope);
+float __ovld atomic_fetch_sub_explicit(volatile __local atomic_float *,
+ float, memory_order, memory_scope);
#endif // defined(__opencl_c_ext_fp32_local_atomic_add)
#if defined(__opencl_c_ext_fp32_global_atomic_add) && \
defined(__opencl_c_ext_fp32_local_atomic_add)
-float __ovld atomic_fetch_add(volatile atomic_float *object, float operand);
-float __ovld atomic_fetch_sub(volatile atomic_float *object, float operand);
-float __ovld atomic_fetch_add_explicit(volatile atomic_float *object,
- float operand, memory_order order);
-float __ovld atomic_fetch_sub_explicit(volatile atomic_float *object,
- float operand, memory_order order);
-float __ovld atomic_fetch_add_explicit(volatile atomic_float *object,
- float operand, memory_order order,
- memory_scope scope);
-float __ovld atomic_fetch_sub_explicit(volatile atomic_float *object,
- float operand, memory_order order,
- memory_scope scope);
+float __ovld atomic_fetch_add(volatile atomic_float *, float);
+float __ovld atomic_fetch_sub(volatile atomic_float *, float);
+float __ovld atomic_fetch_add_explicit(volatile atomic_float *,
+ float, memory_order);
+float __ovld atomic_fetch_sub_explicit(volatile atomic_float *,
+ float, memory_order);
+float __ovld atomic_fetch_add_explicit(volatile atomic_float *,
+ float, memory_order, memory_scope);
+float __ovld atomic_fetch_sub_explicit(volatile atomic_float *,
+ float, memory_order, memory_scope);
#endif // defined(__opencl_c_ext_fp32_global_atomic_add) && \
defined(__opencl_c_ext_fp32_local_atomic_add)
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#if defined(__opencl_c_ext_fp64_global_atomic_add)
-double __ovld atomic_fetch_add(volatile __global atomic_double *object,
- double operand);
-double __ovld atomic_fetch_sub(volatile __global atomic_double *object,
- double operand);
-double __ovld atomic_fetch_add_explicit(volatile __global atomic_double *object,
- double operand, memory_order order);
-double __ovld atomic_fetch_sub_explicit(volatile __global atomic_double *object,
- double operand, memory_order order);
-double __ovld atomic_fetch_add_explicit(volatile __global atomic_double *object,
- double operand, memory_order order,
- memory_scope scope);
-double __ovld atomic_fetch_sub_explicit(volatile __global atomic_double *object,
- double operand, memory_order order,
- memory_scope scope);
+double __ovld atomic_fetch_add(volatile __global atomic_double *, double);
+double __ovld atomic_fetch_sub(volatile __global atomic_double *, double);
+double __ovld atomic_fetch_add_explicit(volatile __global atomic_double *,
+ double, memory_order);
+double __ovld atomic_fetch_sub_explicit(volatile __global atomic_double *,
+ double, memory_order);
+double __ovld atomic_fetch_add_explicit(volatile __global atomic_double *,
+ double, memory_order, memory_scope);
+double __ovld atomic_fetch_sub_explicit(volatile __global atomic_double *,
+ double, memory_order, memory_scope);
#endif // defined(__opencl_c_ext_fp64_global_atomic_add)
#if defined(__opencl_c_ext_fp64_local_atomic_add)
-double __ovld atomic_fetch_add(volatile __local atomic_double *object,
- double operand);
-double __ovld atomic_fetch_sub(volatile __local atomic_double *object,
- double operand);
-double __ovld atomic_fetch_add_explicit(volatile __local atomic_double *object,
- double operand, memory_order order);
-double __ovld atomic_fetch_sub_explicit(volatile __local atomic_double *object,
- double operand, memory_order order);
-double __ovld atomic_fetch_add_explicit(volatile __local atomic_double *object,
- double operand, memory_order order,
- memory_scope scope);
-double __ovld atomic_fetch_sub_explicit(volatile __local atomic_double *object,
- double operand, memory_order order,
- memory_scope scope);
+double __ovld atomic_fetch_add(volatile __local atomic_double *, double);
+double __ovld atomic_fetch_sub(volatile __local atomic_double *, double);
+double __ovld atomic_fetch_add_explicit(volatile __local atomic_double *,
+ double, memory_order);
+double __ovld atomic_fetch_sub_explicit(volatile __local atomic_double *,
+ double, memory_order);
+double __ovld atomic_fetch_add_explicit(volatile __local atomic_double *,
+ double, memory_order, memory_scope);
+double __ovld atomic_fetch_sub_explicit(volatile __local atomic_double *,
+ double, memory_order, memory_scope);
#endif // defined(__opencl_c_ext_fp64_local_atomic_add)
#if defined(__opencl_c_ext_fp64_global_atomic_add) && \
defined(__opencl_c_ext_fp64_local_atomic_add)
-double __ovld atomic_fetch_add(volatile atomic_double *object, double operand);
-double __ovld atomic_fetch_sub(volatile atomic_double *object, double operand);
-double __ovld atomic_fetch_add_explicit(volatile atomic_double *object,
- double operand, memory_order order);
-double __ovld atomic_fetch_sub_explicit(volatile atomic_double *object,
- double operand, memory_order order);
-double __ovld atomic_fetch_add_explicit(volatile atomic_double *object,
- double operand, memory_order order,
- memory_scope scope);
-double __ovld atomic_fetch_sub_explicit(volatile atomic_double *object,
- double operand, memory_order order,
- memory_scope scope);
+double __ovld atomic_fetch_add(volatile atomic_double *, double);
+double __ovld atomic_fetch_sub(volatile atomic_double *, double);
+double __ovld atomic_fetch_add_explicit(volatile atomic_double *,
+ double, memory_order);
+double __ovld atomic_fetch_sub_explicit(volatile atomic_double *,
+ double, memory_order);
+double __ovld atomic_fetch_add_explicit(volatile atomic_double *,
+ double, memory_order, memory_scope);
+double __ovld atomic_fetch_sub_explicit(volatile atomic_double *,
+ double, memory_order, memory_scope);
#endif // defined(__opencl_c_ext_fp64_global_atomic_add) && \
defined(__opencl_c_ext_fp64_local_atomic_add)
#endif // defined(cl_khr_int64_base_atomics) && \
@@ -14048,197 +13974,197 @@ double __ovld atomic_fetch_sub_explicit(volatile atomic_double *object,
#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
#if defined(__opencl_c_generic_address_space)
-void __ovld atomic_store(volatile atomic_int *object, int desired);
-void __ovld atomic_store(volatile atomic_uint *object, uint desired);
-void __ovld atomic_store(volatile atomic_float *object, float desired);
+void __ovld atomic_store(volatile atomic_int *, int);
+void __ovld atomic_store(volatile atomic_uint *, uint);
+void __ovld atomic_store(volatile atomic_float *, float);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#ifdef cl_khr_fp64
-void __ovld atomic_store(volatile atomic_double *object, double desired);
+void __ovld atomic_store(volatile atomic_double *, double);
#endif //cl_khr_fp64
-void __ovld atomic_store(volatile atomic_long *object, long desired);
-void __ovld atomic_store(volatile atomic_ulong *object, ulong desired);
+void __ovld atomic_store(volatile atomic_long *, long);
+void __ovld atomic_store(volatile atomic_ulong *, ulong);
#endif
#endif //defined(__opencl_c_generic_address_space)
#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-void __ovld atomic_store(volatile __global atomic_int *object, int desired);
-void __ovld atomic_store(volatile __local atomic_int *object, int desired);
-void __ovld atomic_store(volatile __global atomic_uint *object, uint desired);
-void __ovld atomic_store(volatile __local atomic_uint *object, uint desired);
-void __ovld atomic_store(volatile __global atomic_float *object, float desired);
-void __ovld atomic_store(volatile __local atomic_float *object, float desired);
+void __ovld atomic_store(volatile __global atomic_int *, int);
+void __ovld atomic_store(volatile __local atomic_int *, int);
+void __ovld atomic_store(volatile __global atomic_uint *, uint);
+void __ovld atomic_store(volatile __local atomic_uint *, uint);
+void __ovld atomic_store(volatile __global atomic_float *, float);
+void __ovld atomic_store(volatile __local atomic_float *, float);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#ifdef cl_khr_fp64
-void __ovld atomic_store(volatile __global atomic_double *object, double desired);
-void __ovld atomic_store(volatile __local atomic_double *object, double desired);
+void __ovld atomic_store(volatile __global atomic_double *, double);
+void __ovld atomic_store(volatile __local atomic_double *, double);
#endif //cl_khr_fp64
-void __ovld atomic_store(volatile __global atomic_long *object, long desired);
-void __ovld atomic_store(volatile __local atomic_long *object, long desired);
-void __ovld atomic_store(volatile __global atomic_ulong *object, ulong desired);
-void __ovld atomic_store(volatile __local atomic_ulong *object, ulong desired);
+void __ovld atomic_store(volatile __global atomic_long *, long);
+void __ovld atomic_store(volatile __local atomic_long *, long);
+void __ovld atomic_store(volatile __global atomic_ulong *, ulong);
+void __ovld atomic_store(volatile __local atomic_ulong *, ulong);
#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
#endif
#if defined(__opencl_c_atomic_scope_device)
#if defined(__opencl_c_generic_address_space)
-void __ovld atomic_store_explicit(volatile atomic_int *object, int desired, memory_order order);
-void __ovld atomic_store_explicit(volatile atomic_uint *object, uint desired, memory_order order);
-void __ovld atomic_store_explicit(volatile atomic_float *object, float desired, memory_order order);
+void __ovld atomic_store_explicit(volatile atomic_int *, int, memory_order);
+void __ovld atomic_store_explicit(volatile atomic_uint *, uint, memory_order);
+void __ovld atomic_store_explicit(volatile atomic_float *, float, memory_order);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#ifdef cl_khr_fp64
-void __ovld atomic_store_explicit(volatile atomic_double *object, double desired, memory_order order);
+void __ovld atomic_store_explicit(volatile atomic_double *, double, memory_order);
#endif //cl_khr_fp64
-void __ovld atomic_store_explicit(volatile atomic_long *object, long desired, memory_order order);
-void __ovld atomic_store_explicit(volatile atomic_ulong *object, ulong desired, memory_order order);
+void __ovld atomic_store_explicit(volatile atomic_long *, long, memory_order);
+void __ovld atomic_store_explicit(volatile atomic_ulong *, ulong, memory_order);
#endif
#endif //defined(__opencl_c_generic_address_space)
#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-void __ovld atomic_store_explicit(volatile __global atomic_int *object, int desired, memory_order order);
-void __ovld atomic_store_explicit(volatile __local atomic_int *object, int desired, memory_order order);
-void __ovld atomic_store_explicit(volatile __global atomic_uint *object, uint desired, memory_order order);
-void __ovld atomic_store_explicit(volatile __local atomic_uint *object, uint desired, memory_order order);
-void __ovld atomic_store_explicit(volatile __global atomic_float *object, float desired, memory_order order);
-void __ovld atomic_store_explicit(volatile __local atomic_float *object, float desired, memory_order order);
+void __ovld atomic_store_explicit(volatile __global atomic_int *, int, memory_order);
+void __ovld atomic_store_explicit(volatile __local atomic_int *, int, memory_order);
+void __ovld atomic_store_explicit(volatile __global atomic_uint *, uint, memory_order);
+void __ovld atomic_store_explicit(volatile __local atomic_uint *, uint, memory_order);
+void __ovld atomic_store_explicit(volatile __global atomic_float *, float, memory_order);
+void __ovld atomic_store_explicit(volatile __local atomic_float *, float, memory_order);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#ifdef cl_khr_fp64
-void __ovld atomic_store_explicit(volatile __global atomic_double *object, double desired, memory_order order);
-void __ovld atomic_store_explicit(volatile __local atomic_double *object, double desired, memory_order order);
+void __ovld atomic_store_explicit(volatile __global atomic_double *, double, memory_order);
+void __ovld atomic_store_explicit(volatile __local atomic_double *, double, memory_order);
#endif
-void __ovld atomic_store_explicit(volatile __global atomic_long *object, long desired, memory_order order);
-void __ovld atomic_store_explicit(volatile __local atomic_long *object, long desired, memory_order order);
-void __ovld atomic_store_explicit(volatile __global atomic_ulong *object, ulong desired, memory_order order);
-void __ovld atomic_store_explicit(volatile __local atomic_ulong *object, ulong desired, memory_order order);
+void __ovld atomic_store_explicit(volatile __global atomic_long *, long, memory_order);
+void __ovld atomic_store_explicit(volatile __local atomic_long *, long, memory_order);
+void __ovld atomic_store_explicit(volatile __global atomic_ulong *, ulong, memory_order);
+void __ovld atomic_store_explicit(volatile __local atomic_ulong *, ulong, memory_order);
#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
#endif
#if defined(__opencl_c_generic_address_space)
-void __ovld atomic_store_explicit(volatile atomic_int *object, int desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile atomic_uint *object, uint desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile atomic_float *object, float desired, memory_order order, memory_scope scope);
+void __ovld atomic_store_explicit(volatile atomic_int *, int, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile atomic_float *, float, memory_order, memory_scope);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#ifdef cl_khr_fp64
-void __ovld atomic_store_explicit(volatile atomic_double *object, double desired, memory_order order, memory_scope scope);
+void __ovld atomic_store_explicit(volatile atomic_double *, double, memory_order, memory_scope);
#endif //cl_khr_fp64
-void __ovld atomic_store_explicit(volatile atomic_long *object, long desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile atomic_ulong *object, ulong desired, memory_order order, memory_scope scope);
+void __ovld atomic_store_explicit(volatile atomic_long *, long, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);
#endif
#endif //defined(__opencl_c_generic_address_space)
#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-void __ovld atomic_store_explicit(volatile __global atomic_int *object, int desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile __local atomic_int *object, int desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile __global atomic_uint *object, uint desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile __local atomic_uint *object, uint desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile __global atomic_float *object, float desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile __local atomic_float *object, float desired, memory_order order, memory_scope scope);
+void __ovld atomic_store_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile __global atomic_float *, float, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile __local atomic_float *, float, memory_order, memory_scope);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#ifdef cl_khr_fp64
-void __ovld atomic_store_explicit(volatile __global atomic_double *object, double desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile __local atomic_double *object, double desired, memory_order order, memory_scope scope);
+void __ovld atomic_store_explicit(volatile __global atomic_double *, double, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile __local atomic_double *, double, memory_order, memory_scope);
#endif //cl_khr_fp64
-void __ovld atomic_store_explicit(volatile __global atomic_long *object, long desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile __local atomic_long *object, long desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile __global atomic_ulong *object, ulong desired, memory_order order, memory_scope scope);
-void __ovld atomic_store_explicit(volatile __local atomic_ulong *object, ulong desired, memory_order order, memory_scope scope);
+void __ovld atomic_store_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);
+void __ovld atomic_store_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);
#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
// atomic_load()
#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
#if defined(__opencl_c_generic_address_space)
-int __ovld atomic_load(volatile atomic_int *object);
-uint __ovld atomic_load(volatile atomic_uint *object);
-float __ovld atomic_load(volatile atomic_float *object);
+int __ovld atomic_load(volatile atomic_int *);
+uint __ovld atomic_load(volatile atomic_uint *);
+float __ovld atomic_load(volatile atomic_float *);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#ifdef cl_khr_fp64
-double __ovld atomic_load(volatile atomic_double *object);
+double __ovld atomic_load(volatile atomic_double *);
#endif //cl_khr_fp64
-long __ovld atomic_load(volatile atomic_long *object);
-ulong __ovld atomic_load(volatile atomic_ulong *object);
+long __ovld atomic_load(volatile atomic_long *);
+ulong __ovld atomic_load(volatile atomic_ulong *);
#endif
#endif //defined(__opencl_c_generic_address_space)
#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-int __ovld atomic_load(volatile __global atomic_int *object);
-int __ovld atomic_load(volatile __local atomic_int *object);
-uint __ovld atomic_load(volatile __global atomic_uint *object);
-uint __ovld atomic_load(volatile __local atomic_uint *object);
-float __ovld atomic_load(volatile __global atomic_float *object);
-float __ovld atomic_load(volatile __local atomic_float *object);
+int __ovld atomic_load(volatile __global atomic_int *);
+int __ovld atomic_load(volatile __local atomic_int *);
+uint __ovld atomic_load(volatile __global atomic_uint *);
+uint __ovld atomic_load(volatile __local atomic_uint *);
+float __ovld atomic_load(volatile __global atomic_float *);
+float __ovld atomic_load(volatile __local atomic_float *);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#ifdef cl_khr_fp64
-double __ovld atomic_load(volatile __global atomic_double *object);
-double __ovld atomic_load(volatile __local atomic_double *object);
+double __ovld atomic_load(volatile __global atomic_double *);
+double __ovld atomic_load(volatile __local atomic_double *);
#endif //cl_khr_fp64
-long __ovld atomic_load(volatile __global atomic_long *object);
-long __ovld atomic_load(volatile __local atomic_long *object);
-ulong __ovld atomic_load(volatile __global atomic_ulong *object);
-ulong __ovld atomic_load(volatile __local atomic_ulong *object);
+long __ovld atomic_load(volatile __global atomic_long *);
+long __ovld atomic_load(volatile __local atomic_long *);
+ulong __ovld atomic_load(volatile __global atomic_ulong *);
+ulong __ovld atomic_load(volatile __local atomic_ulong *);
#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
#endif
#if defined(__opencl_c_atomic_scope_device)
#if defined(__opencl_c_generic_address_space)
-int __ovld atomic_load_explicit(volatile atomic_int *object, memory_order order);
-uint __ovld atomic_load_explicit(volatile atomic_uint *object, memory_order order);
-float __ovld atomic_load_explicit(volatile atomic_float *object, memory_order order);
+int __ovld atomic_load_explicit(volatile atomic_int *, memory_order);
+uint __ovld atomic_load_explicit(volatile atomic_uint *, memory_order);
+float __ovld atomic_load_explicit(volatile atomic_float *, memory_order);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#ifdef cl_khr_fp64
-double __ovld atomic_load_explicit(volatile atomic_double *object, memory_order order);
+double __ovld atomic_load_explicit(volatile atomic_double *, memory_order);
#endif //cl_khr_fp64
-long __ovld atomic_load_explicit(volatile atomic_long *object, memory_order order);
-ulong __ovld atomic_load_explicit(volatile atomic_ulong *object, memory_order order);
+long __ovld atomic_load_explicit(volatile atomic_long *, memory_order);
+ulong __ovld atomic_load_explicit(volatile atomic_ulong *, memory_order);
#endif
#endif //defined(__opencl_c_generic_address_space)
#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-int __ovld atomic_load_explicit(volatile __global atomic_int *object, memory_order order);
-int __ovld atomic_load_explicit(volatile __local atomic_int *object, memory_order order);
-uint __ovld atomic_load_explicit(volatile __global atomic_uint *object, memory_order order);
-uint __ovld atomic_load_explicit(volatile __local atomic_uint *object, memory_order order);
-float __ovld atomic_load_explicit(volatile __global atomic_float *object, memory_order order);
-float __ovld atomic_load_explicit(volatile __local atomic_float *object, memory_order order);
+int __ovld atomic_load_explicit(volatile __global atomic_int *, memory_order);
+int __ovld atomic_load_explicit(volatile __local atomic_int *, memory_order);
+uint __ovld atomic_load_explicit(volatile __global atomic_uint *, memory_order);
+uint __ovld atomic_load_explicit(volatile __local atomic_uint *, memory_order);
+float __ovld atomic_load_explicit(volatile __global atomic_float *, memory_order);
+float __ovld atomic_load_explicit(volatile __local atomic_float *, memory_order);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#ifdef cl_khr_fp64
-double __ovld atomic_load_explicit(volatile __global atomic_double *object, memory_order order);
-double __ovld atomic_load_explicit(volatile __local atomic_double *object, memory_order order);
+double __ovld atomic_load_explicit(volatile __global atomic_double *, memory_order);
+double __ovld atomic_load_explicit(volatile __local atomic_double *, memory_order);
#endif //cl_khr_fp64
-long __ovld atomic_load_explicit(volatile __global atomic_long *object, memory_order order);
-long __ovld atomic_load_explicit(volatile __local atomic_long *object, memory_order order);
-ulong __ovld atomic_load_explicit(volatile __global atomic_ulong *object, memory_order order);
-ulong __ovld atomic_load_explicit(volatile __local atomic_ulong *object, memory_order order);
+long __ovld atomic_load_explicit(volatile __global atomic_long *, memory_order);
+long __ovld atomic_load_explicit(volatile __local atomic_long *, memory_order);
+ulong __ovld atomic_load_explicit(volatile __global atomic_ulong *, memory_order);
+ulong __ovld atomic_load_explicit(volatile __local atomic_ulong *, memory_order);
#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
#endif
#if defined(__opencl_c_generic_address_space)
-int __ovld atomic_load_explicit(volatile atomic_int *object, memory_order order, memory_scope scope);
-uint __ovld atomic_load_explicit(volatile atomic_uint *object, memory_order order, memory_scope scope);
-float __ovld atomic_load_explicit(volatile atomic_float *object, memory_order order, memory_scope scope);
+int __ovld atomic_load_explicit(volatile atomic_int *, memory_order, memory_scope);
+uint __ovld atomic_load_explicit(volatile atomic_uint *, memory_order, memory_scope);
+float __ovld atomic_load_explicit(volatile atomic_float *, memory_order, memory_scope);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#ifdef cl_khr_fp64
-double __ovld atomic_load_explicit(volatile atomic_double *object, memory_order order, memory_scope scope);
+double __ovld atomic_load_explicit(volatile atomic_double *, memory_order, memory_scope);
#endif //cl_khr_fp64
-long __ovld atomic_load_explicit(volatile atomic_long *object, memory_order order, memory_scope scope);
-ulong __ovld atomic_load_explicit(volatile atomic_ulong *object, memory_order order, memory_scope scope);
+long __ovld atomic_load_explicit(volatile atomic_long *, memory_order, memory_scope);
+ulong __ovld atomic_load_explicit(volatile atomic_ulong *, memory_order, memory_scope);
#endif
#endif //defined(__opencl_c_generic_address_space)
#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-int __ovld atomic_load_explicit(volatile __global atomic_int *object, memory_order order, memory_scope scope);
-int __ovld atomic_load_explicit(volatile __local atomic_int *object, memory_order order, memory_scope scope);
-uint __ovld atomic_load_explicit(volatile __global atomic_uint *object, memory_order order, memory_scope scope);
-uint __ovld atomic_load_explicit(volatile __local atomic_uint *object, memory_order order, memory_scope scope);
-float __ovld atomic_load_explicit(volatile __global atomic_float *object, memory_order order, memory_scope scope);
-float __ovld atomic_load_explicit(volatile __local atomic_float *object, memory_order order, memory_scope scope);
+int __ovld atomic_load_explicit(volatile __global atomic_int *, memory_order, memory_scope);
+int __ovld atomic_load_explicit(volatile __local atomic_int *, memory_order, memory_scope);
+uint __ovld atomic_load_explicit(volatile __global atomic_uint *, memory_order, memory_scope);
+uint __ovld atomic_load_explicit(volatile __local atomic_uint *, memory_order, memory_scope);
+float __ovld atomic_load_explicit(volatile __global atomic_float *, memory_order, memory_scope);
+float __ovld atomic_load_explicit(volatile __local atomic_float *, memory_order, memory_scope);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#ifdef cl_khr_fp64
-double __ovld atomic_load_explicit(volatile __global atomic_double *object, memory_order order, memory_scope scope);
-double __ovld atomic_load_explicit(volatile __local atomic_double *object, memory_order order, memory_scope scope);
+double __ovld atomic_load_explicit(volatile __global atomic_double *, memory_order, memory_scope);
+double __ovld atomic_load_explicit(volatile __local atomic_double *, memory_order, memory_scope);
#endif
-long __ovld atomic_load_explicit(volatile __global atomic_long *object, memory_order order, memory_scope scope);
-long __ovld atomic_load_explicit(volatile __local atomic_long *object, memory_order order, memory_scope scope);
-ulong __ovld atomic_load_explicit(volatile __global atomic_ulong *object, memory_order order, memory_scope scope);
-ulong __ovld atomic_load_explicit(volatile __local atomic_ulong *object, memory_order order, memory_scope scope);
+long __ovld atomic_load_explicit(volatile __global atomic_long *, memory_order, memory_scope);
+long __ovld atomic_load_explicit(volatile __local atomic_long *, memory_order, memory_scope);
+ulong __ovld atomic_load_explicit(volatile __global atomic_ulong *, memory_order, memory_scope);
+ulong __ovld atomic_load_explicit(volatile __local atomic_ulong *, memory_order, memory_scope);
#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
@@ -14246,599 +14172,433 @@ ulong __ovld atomic_load_explicit(volatile __local atomic_ulong *object, memory_
#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
#if defined(__opencl_c_generic_address_space)
-int __ovld atomic_exchange(volatile atomic_int *object, int desired);
-uint __ovld atomic_exchange(volatile atomic_uint *object, uint desired);
-float __ovld atomic_exchange(volatile atomic_float *object, float desired);
+int __ovld atomic_exchange(volatile atomic_int *, int);
+uint __ovld atomic_exchange(volatile atomic_uint *, uint);
+float __ovld atomic_exchange(volatile atomic_float *, float);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#ifdef cl_khr_fp64
-double __ovld atomic_exchange(volatile atomic_double *object, double desired);
+double __ovld atomic_exchange(volatile atomic_double *, double);
#endif //cl_khr_fp64
-long __ovld atomic_exchange(volatile atomic_long *object, long desired);
-ulong __ovld atomic_exchange(volatile atomic_ulong *object, ulong desired);
+long __ovld atomic_exchange(volatile atomic_long *, long);
+ulong __ovld atomic_exchange(volatile atomic_ulong *, ulong);
#endif
#endif //defined(__opencl_c_generic_address_space)
#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-int __ovld atomic_exchange(volatile __global atomic_int *object, int desired);
-int __ovld atomic_exchange(volatile __local atomic_int *object, int desired);
-uint __ovld atomic_exchange(volatile __global atomic_uint *object, uint desired);
-uint __ovld atomic_exchange(volatile __local atomic_uint *object, uint desired);
-float __ovld atomic_exchange(volatile __global atomic_float *object, float desired);
-float __ovld atomic_exchange(volatile __local atomic_float *object, float desired);
+int __ovld atomic_exchange(volatile __global atomic_int *, int);
+int __ovld atomic_exchange(volatile __local atomic_int *, int);
+uint __ovld atomic_exchange(volatile __global atomic_uint *, uint);
+uint __ovld atomic_exchange(volatile __local atomic_uint *, uint);
+float __ovld atomic_exchange(volatile __global atomic_float *, float);
+float __ovld atomic_exchange(volatile __local atomic_float *, float);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#ifdef cl_khr_fp64
-double __ovld atomic_exchange(volatile __global atomic_double *object, double desired);
-double __ovld atomic_exchange(volatile __local atomic_double *object, double desired);
+double __ovld atomic_exchange(volatile __global atomic_double *, double);
+double __ovld atomic_exchange(volatile __local atomic_double *, double);
#endif //cl_khr_fp64
-long __ovld atomic_exchange(volatile __global atomic_long *object, long desired);
-long __ovld atomic_exchange(volatile __local atomic_long *object, long desired);
-ulong __ovld atomic_exchange(volatile __global atomic_ulong *object, ulong desired);
-ulong __ovld atomic_exchange(volatile __local atomic_ulong *object, ulong desired);
+long __ovld atomic_exchange(volatile __global atomic_long *, long);
+long __ovld atomic_exchange(volatile __local atomic_long *, long);
+ulong __ovld atomic_exchange(volatile __global atomic_ulong *, ulong);
+ulong __ovld atomic_exchange(volatile __local atomic_ulong *, ulong);
#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
#endif
#if defined(__opencl_c_atomic_scope_device)
#if defined(__opencl_c_generic_address_space)
-int __ovld atomic_exchange_explicit(volatile atomic_int *object, int desired, memory_order order);
-uint __ovld atomic_exchange_explicit(volatile atomic_uint *object, uint desired, memory_order order);
-float __ovld atomic_exchange_explicit(volatile atomic_float *object, float desired, memory_order order);
+int __ovld atomic_exchange_explicit(volatile atomic_int *, int, memory_order);
+uint __ovld atomic_exchange_explicit(volatile atomic_uint *, uint, memory_order);
+float __ovld atomic_exchange_explicit(volatile atomic_float *, float, memory_order);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#ifdef cl_khr_fp64
-double __ovld atomic_exchange_explicit(volatile atomic_double *object, double desired, memory_order order);
+double __ovld atomic_exchange_explicit(volatile atomic_double *, double, memory_order);
#endif //cl_khr_fp64
-long __ovld atomic_exchange_explicit(volatile atomic_long *object, long desired, memory_order order);
-ulong __ovld atomic_exchange_explicit(volatile atomic_ulong *object, ulong desired, memory_order order);
+long __ovld atomic_exchange_explicit(volatile atomic_long *, long, memory_order);
+ulong __ovld atomic_exchange_explicit(volatile atomic_ulong *, ulong, memory_order);
#endif
#endif //defined(__opencl_c_generic_address_space)
#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-int __ovld atomic_exchange_explicit(volatile __global atomic_int *object, int desired, memory_order order);
-int __ovld atomic_exchange_explicit(volatile __local atomic_int *object, int desired, memory_order order);
-uint __ovld atomic_exchange_explicit(volatile __global atomic_uint *object, uint desired, memory_order order);
-uint __ovld atomic_exchange_explicit(volatile __local atomic_uint *object, uint desired, memory_order order);
-float __ovld atomic_exchange_explicit(volatile __global atomic_float *object, float desired, memory_order order);
-float __ovld atomic_exchange_explicit(volatile __local atomic_float *object, float desired, memory_order order);
+int __ovld atomic_exchange_explicit(volatile __global atomic_int *, int, memory_order);
+int __ovld atomic_exchange_explicit(volatile __local atomic_int *, int, memory_order);
+uint __ovld atomic_exchange_explicit(volatile __global atomic_uint *, uint, memory_order);
+uint __ovld atomic_exchange_explicit(volatile __local atomic_uint *, uint, memory_order);
+float __ovld atomic_exchange_explicit(volatile __global atomic_float *, float, memory_order);
+float __ovld atomic_exchange_explicit(volatile __local atomic_float *, float, memory_order);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#ifdef cl_khr_fp64
-double __ovld atomic_exchange_explicit(volatile __global atomic_double *object, double desired, memory_order order);
-double __ovld atomic_exchange_explicit(volatile __local atomic_double *object, double desired, memory_order order);
+double __ovld atomic_exchange_explicit(volatile __global atomic_double *, double, memory_order);
+double __ovld atomic_exchange_explicit(volatile __local atomic_double *, double, memory_order);
#endif //cl_khr_fp64
-long __ovld atomic_exchange_explicit(volatile __global atomic_long *object, long desired, memory_order order);
-long __ovld atomic_exchange_explicit(volatile __local atomic_long *object, long desired, memory_order order);
-ulong __ovld atomic_exchange_explicit(volatile __global atomic_ulong *object, ulong desired, memory_order order);
-ulong __ovld atomic_exchange_explicit(volatile __local atomic_ulong *object, ulong desired, memory_order order);
+long __ovld atomic_exchange_explicit(volatile __global atomic_long *, long, memory_order);
+long __ovld atomic_exchange_explicit(volatile __local atomic_long *, long, memory_order);
+ulong __ovld atomic_exchange_explicit(volatile __global atomic_ulong *, ulong, memory_order);
+ulong __ovld atomic_exchange_explicit(volatile __local atomic_ulong *, ulong, memory_order);
#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)wi
#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
#endif
#if defined(__opencl_c_generic_address_space)
-int __ovld atomic_exchange_explicit(volatile atomic_int *object, int desired, memory_order order, memory_scope scope);
-uint __ovld atomic_exchange_explicit(volatile atomic_uint *object, uint desired, memory_order order, memory_scope scope);
-float __ovld atomic_exchange_explicit(volatile atomic_float *object, float desired, memory_order order, memory_scope scope);
+int __ovld atomic_exchange_explicit(volatile atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_exchange_explicit(volatile atomic_uint *, uint, memory_order, memory_scope);
+float __ovld atomic_exchange_explicit(volatile atomic_float *, float, memory_order, memory_scope);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#ifdef cl_khr_fp64
-double __ovld atomic_exchange_explicit(volatile atomic_double *object, double desired, memory_order order, memory_scope scope);
+double __ovld atomic_exchange_explicit(volatile atomic_double *, double, memory_order, memory_scope);
#endif //cl_khr_fp64
-long __ovld atomic_exchange_explicit(volatile atomic_long *object, long desired, memory_order order, memory_scope scope);
-ulong __ovld atomic_exchange_explicit(volatile atomic_ulong *object, ulong desired, memory_order order, memory_scope scope);
+long __ovld atomic_exchange_explicit(volatile atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_exchange_explicit(volatile atomic_ulong *, ulong, memory_order, memory_scope);
#endif
#endif //defined(__opencl_c_generic_address_space)
#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-int __ovld atomic_exchange_explicit(volatile __global atomic_int *object, int desired, memory_order order, memory_scope scope);
-int __ovld atomic_exchange_explicit(volatile __local atomic_int *object, int desired, memory_order order, memory_scope scope);
-uint __ovld atomic_exchange_explicit(volatile __global atomic_uint *object, uint desired, memory_order order, memory_scope scope);
-uint __ovld atomic_exchange_explicit(volatile __local atomic_uint *object, uint desired, memory_order order, memory_scope scope);
-float __ovld atomic_exchange_explicit(volatile __global atomic_float *object, float desired, memory_order order, memory_scope scope);
-float __ovld atomic_exchange_explicit(volatile __local atomic_float *object, float desired, memory_order order, memory_scope scope);
+int __ovld atomic_exchange_explicit(volatile __global atomic_int *, int, memory_order, memory_scope);
+int __ovld atomic_exchange_explicit(volatile __local atomic_int *, int, memory_order, memory_scope);
+uint __ovld atomic_exchange_explicit(volatile __global atomic_uint *, uint, memory_order, memory_scope);
+uint __ovld atomic_exchange_explicit(volatile __local atomic_uint *, uint, memory_order, memory_scope);
+float __ovld atomic_exchange_explicit(volatile __global atomic_float *, float, memory_order, memory_scope);
+float __ovld atomic_exchange_explicit(volatile __local atomic_float *, float, memory_order, memory_scope);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#ifdef cl_khr_fp64
-double __ovld atomic_exchange_explicit(volatile __global atomic_double *object, double desired, memory_order order, memory_scope scope);
-double __ovld atomic_exchange_explicit(volatile __local atomic_double *object, double desired, memory_order order, memory_scope scope);
+double __ovld atomic_exchange_explicit(volatile __global atomic_double *, double, memory_order, memory_scope);
+double __ovld atomic_exchange_explicit(volatile __local atomic_double *, double, memory_order, memory_scope);
#endif //cl_khr_fp64
-long __ovld atomic_exchange_explicit(volatile __global atomic_long *object, long desired, memory_order order, memory_scope scope);
-long __ovld atomic_exchange_explicit(volatile __local atomic_long *object, long desired, memory_order order, memory_scope scope);
-ulong __ovld atomic_exchange_explicit(volatile __global atomic_ulong *object, ulong desired, memory_order order, memory_scope scope);
-ulong __ovld atomic_exchange_explicit(volatile __local atomic_ulong *object, ulong desired, memory_order order, memory_scope scope);
+long __ovld atomic_exchange_explicit(volatile __global atomic_long *, long, memory_order, memory_scope);
+long __ovld atomic_exchange_explicit(volatile __local atomic_long *, long, memory_order, memory_scope);
+ulong __ovld atomic_exchange_explicit(volatile __global atomic_ulong *, ulong, memory_order, memory_scope);
+ulong __ovld atomic_exchange_explicit(volatile __local atomic_ulong *, ulong, memory_order, memory_scope);
#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
// atomic_compare_exchange_strong() and atomic_compare_exchange_weak()
#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
#if defined(__opencl_c_generic_address_space)
-bool __ovld atomic_compare_exchange_strong(volatile atomic_int *object, int *expected, int desired);
-bool __ovld atomic_compare_exchange_strong(volatile atomic_uint *object, uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_weak(volatile atomic_int *object, int *expected, int desired);
-bool __ovld atomic_compare_exchange_weak(volatile atomic_uint *object, uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_strong(volatile atomic_float *object, float *expected, float desired);
-bool __ovld atomic_compare_exchange_weak(volatile atomic_float *object, float *expected, float desired);
+bool __ovld atomic_compare_exchange_strong(volatile atomic_int *, int *, int);
+bool __ovld atomic_compare_exchange_strong(volatile atomic_uint *, uint *, uint);
+bool __ovld atomic_compare_exchange_weak(volatile atomic_int *, int *, int);
+bool __ovld atomic_compare_exchange_weak(volatile atomic_uint *, uint *, uint);
+bool __ovld atomic_compare_exchange_strong(volatile atomic_float *, float *, float);
+bool __ovld atomic_compare_exchange_weak(volatile atomic_float *, float *, float);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#ifdef cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong(volatile atomic_double *object, double *expected, double desired);
-bool __ovld atomic_compare_exchange_weak(volatile atomic_double *object, double *expected, double desired);
+bool __ovld atomic_compare_exchange_strong(volatile atomic_double *, double *, double);
+bool __ovld atomic_compare_exchange_weak(volatile atomic_double *, double *, double);
#endif //cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong(volatile atomic_long *object, long *expected, long desired);
-bool __ovld atomic_compare_exchange_weak(volatile atomic_long *object, long *expected, long desired);
-bool __ovld atomic_compare_exchange_strong(volatile atomic_ulong *object, ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_weak(volatile atomic_ulong *object, ulong *expected, ulong desired);
+bool __ovld atomic_compare_exchange_strong(volatile atomic_long *, long *, long);
+bool __ovld atomic_compare_exchange_weak(volatile atomic_long *, long *, long);
+bool __ovld atomic_compare_exchange_strong(volatile atomic_ulong *, ulong *, ulong);
+bool __ovld atomic_compare_exchange_weak(volatile atomic_ulong *, ulong *, ulong);
#endif
#endif //defined(__opencl_c_generic_address_space)
#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_int *object, __global int *expected, int desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_int *object, __local int *expected, int desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_int *object, __private int *expected, int desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_int *object, __global int *expected, int desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_int *object, __local int *expected, int desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_int *object, __private int *expected, int desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_uint *object, __global uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_uint *object, __local uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_uint *object, __private uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_uint *object, __global uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_uint *object, __local uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_uint *object, __private uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_float *object, __global float *expected, float desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_float *object, __local float *expected, float desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_float *object, __private float *expected, float desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_float *object, __global float *expected, float desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_float *object, __local float *expected, float desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_float *object, __private float *expected, float desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_int *object, __global int *expected, int desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_int *object, __local int *expected, int desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_int *object, __private int *expected, int desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_int *object, __global int *expected, int desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_int *object, __local int *expected, int desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_int *object, __private int *expected, int desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_uint *object, __global uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_uint *object, __local uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_uint *object, __private uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_uint *object, __global uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_uint *object, __local uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_uint *object, __private uint *expected, uint desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_float *object, __global float *expected, float desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_float *object, __local float *expected, float desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_float *object, __private float *expected, float desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_float *object, __global float *expected, float desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_float *object, __local float *expected, float desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_float *object, __private float *expected, float desired);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_int *, __global int *, int);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_int *, __local int *, int);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_int *, __private int *, int);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_int *, __global int *, int);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_int *, __local int *, int);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_int *, __private int *, int);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_uint *, __global uint *, uint);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_uint *, __local uint *, uint);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_uint *, __private uint *, uint);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_uint *, __global uint *, uint);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_uint *, __local uint *, uint);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_uint *, __private uint *, uint);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_float *, __global float *, float);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_float *, __local float *, float);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_float *, __private float *, float);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_float *, __global float *, float);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_float *, __local float *, float);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_float *, __private float *, float);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_int *, __global int *, int);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_int *, __local int *, int);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_int *, __private int *, int);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_int *, __global int *, int);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_int *, __local int *, int);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_int *, __private int *, int);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_uint *, __global uint *, uint);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_uint *, __local uint *, uint);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_uint *, __private uint *, uint);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_uint *, __global uint *, uint);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_uint *, __local uint *, uint);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_uint *, __private uint *, uint);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_float *, __global float *, float);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_float *, __local float *, float);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_float *, __private float *, float);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_float *, __global float *, float);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_float *, __local float *, float);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_float *, __private float *, float);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#ifdef cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_double *object, __global double *expected, double desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_double *object, __local double *expected, double desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_double *object, __private double *expected, double desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_double *object, __global double *expected, double desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_double *object, __local double *expected, double desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_double *object, __private double *expected, double desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_double *object, __global double *expected, double desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_double *object, __local double *expected, double desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_double *object, __private double *expected, double desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_double *object, __global double *expected, double desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_double *object, __local double *expected, double desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_double *object, __private double *expected, double desired);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_double *, __global double *, double);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_double *, __local double *, double);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_double *, __private double *, double);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_double *, __global double *, double);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_double *, __local double *, double);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_double *, __private double *, double);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_double *, __global double *, double);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_double *, __local double *, double);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_double *, __private double *, double);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_double *, __global double *, double);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_double *, __local double *, double);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_double *, __private double *, double);
#endif //cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_long *object, __global long *expected, long desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_long *object, __local long *expected, long desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_long *object, __private long *expected, long desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_long *object, __global long *expected, long desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_long *object, __local long *expected, long desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_long *object, __private long *expected, long desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_ulong *object, __global ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_ulong *object, __local ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_strong(volatile __global atomic_ulong *object, __private ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_ulong *object, __global ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_ulong *object, __local ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_strong(volatile __local atomic_ulong *object, __private ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_long *object, __global long *expected, long desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_long *object, __local long *expected, long desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_long *object, __private long *expected, long desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_long *object, __global long *expected, long desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_long *object, __local long *expected, long desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_long *object, __private long *expected, long desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_ulong *object, __global ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_ulong *object, __local ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_weak(volatile __global atomic_ulong *object, __private ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_ulong *object, __global ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_ulong *object, __local ulong *expected, ulong desired);
-bool __ovld atomic_compare_exchange_weak(volatile __local atomic_ulong *object, __private ulong *expected, ulong desired);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_long *, __global long *, long);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_long *, __local long *, long);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_long *, __private long *, long);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_long *, __global long *, long);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_long *, __local long *, long);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_long *, __private long *, long);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_ulong *, __global ulong *, ulong);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_ulong *, __local ulong *, ulong);
+bool __ovld atomic_compare_exchange_strong(volatile __global atomic_ulong *, __private ulong *, ulong);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_ulong *, __global ulong *, ulong);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_ulong *, __local ulong *, ulong);
+bool __ovld atomic_compare_exchange_strong(volatile __local atomic_ulong *, __private ulong *, ulong);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_long *, __global long *, long);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_long *, __local long *, long);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_long *, __private long *, long);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_long *, __global long *, long);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_long *, __local long *, long);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_long *, __private long *, long);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_ulong *, __global ulong *, ulong);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_ulong *, __local ulong *, ulong);
+bool __ovld atomic_compare_exchange_weak(volatile __global atomic_ulong *, __private ulong *, ulong);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_ulong *, __global ulong *, ulong);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_ulong *, __local ulong *, ulong);
+bool __ovld atomic_compare_exchange_weak(volatile __local atomic_ulong *, __private ulong *, ulong);
#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
#endif
+#if defined(__opencl_c_atomic_scope_device)
#if defined(__opencl_c_generic_address_space)
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_int *object, int *expected,
- int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_uint *object, uint *expected,
- uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_int *object, int *expected,
- int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_uint *object, uint *expected,
- uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_float *object, float *expected,
- float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_float *object, float *expected,
- float desired, memory_order success, memory_order failure);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_int *, int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_uint *, uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_int *, int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_uint *, uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_float *, float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_float *, float *, float, memory_order, memory_order);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#ifdef cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_double *object, double *expected,
- double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_double *object, double *expected,
- double desired, memory_order success, memory_order failure);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_double *, double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_double *, double *, double, memory_order, memory_order);
#endif //cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_long *object, long *expected,
- long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_long *object, long *expected,
- long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_ulong *object, ulong *expected,
- ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_ulong *object, ulong *expected,
- ulong desired, memory_order success, memory_order failure);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_long *, long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_long *, long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_ulong *, ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_ulong *, ulong *, ulong, memory_order, memory_order);
#endif
#endif //defined(__opencl_c_generic_address_space)
#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *object, __global int *expected,
- int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *object, __local int *expected,
- int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *object, __private int *expected,
- int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *object, __global int *expected,
- int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *object, __local int *expected,
- int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *object, __private int *expected,
- int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *object, __global uint *expected,
- uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *object, __local uint *expected,
- uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *object, __private uint *expected,
- uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *object, __global uint *expected,
- uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *object, __local uint *expected,
- uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *object, __private uint *expected,
- uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *object, __global float *expected,
- float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *object, __local float *expected,
- float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *object, __private float *expected,
- float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *object, __global float *expected,
- float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *object, __local float *expected,
- float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *object, __private float *expected,
- float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *object, __global int *expected,
- int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *object, __local int *expected,
- int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *object, __private int *expected,
- int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *object, __global int *expected,
- int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *object, __local int *expected,
- int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *object, __private int *expected,
- int desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *object, __global uint *expected,
- uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *object, __local uint *expected,
- uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *object, __private uint *expected,
- uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *object, __global uint *expected,
- uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *object, __local uint *expected,
- uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *object, __private uint *expected,
- uint desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *object, __global float *expected,
- float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *object, __local float *expected,
- float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *object, __private float *expected,
- float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *object, __global float *expected,
- float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *object, __local float *expected,
- float desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *object, __private float *expected,
- float desired, memory_order success, memory_order failure);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *, __global int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *, __local int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *, __private int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *, __global int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *, __local int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *, __private int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *, __global uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *, __local uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *, __private uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *, __global uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *, __local uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *, __private uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *, __global float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *, __local float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *, __private float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *, __global float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *, __local float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *, __private float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *, __global int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *, __local int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *, __private int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *, __global int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *, __local int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *, __private int *, int, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *, __global uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *, __local uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *, __private uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *, __global uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *, __local uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *, __private uint *, uint, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *, __global float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *, __local float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *, __private float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *, __global float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *, __local float *, float, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *, __private float *, float, memory_order, memory_order);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#ifdef cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *object, __global double *expected,
- double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *object, __local double *expected,
- double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *object, __private double *expected,
- double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *object, __global double *expected,
- double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *object, __local double *expected,
- double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *object, __private double *expected,
- double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *object, __global double *expected,
- double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *object, __local double *expected,
- double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *object, __private double *expected,
- double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *object, __global double *expected,
- double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *object, __local double *expected,
- double desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *object, __private double *expected,
- double desired, memory_order success, memory_order failure);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *, __global double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *, __local double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *, __private double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *, __global double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *, __local double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *, __private double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *, __global double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *, __local double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *, __private double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *, __global double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *, __local double *, double, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *, __private double *, double, memory_order, memory_order);
#endif //cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *object, __global long *expected,
- long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *object, __local long *expected,
- long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *object, __private long *expected,
- long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *object, __global long *expected,
- long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *object, __local long *expected,
- long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *object, __private long *expected,
- long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *object, __global ulong *expected,
- ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *object, __local ulong *expected,
- ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *object, __private ulong *expected,
- ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *object, __global ulong *expected,
- ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *object, __local ulong *expected,
- ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *object, __private ulong *expected,
- ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *object, __global long *expected,
- long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *object, __local long *expected,
- long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *object, __private long *expected,
- long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *object, __global long *expected,
- long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *object, __local long *expected,
- long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *object, __private long *expected,
- long desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *object, __global ulong *expected,
- ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *object, __local ulong *expected,
- ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *object, __private ulong *expected,
- ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *object, __global ulong *expected,
- ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *object, __local ulong *expected,
- ulong desired, memory_order success, memory_order failure);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *object, __private ulong *expected,
- ulong desired, memory_order success, memory_order failure);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *, __global long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *, __local long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *, __private long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *, __global long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *, __local long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *, __private long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *, __global ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *, __local ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *, __private ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *, __global ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *, __local ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *, __private ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *, __global long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *, __local long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *, __private long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *, __global long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *, __local long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *, __private long *, long, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *, __global ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *, __local ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *, __private ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *, __global ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *, __local ulong *, ulong, memory_order, memory_order);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *, __private ulong *, ulong, memory_order, memory_order);
#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
+#endif //defined(__opencl_c_atomic_scope_device)
#if defined(__opencl_c_generic_address_space)
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_int *object, int *expected,
- int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_uint *object, uint *expected,
- uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_int *object, int *expected,
- int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_uint *object, uint *expected,
- uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_float *object, float *expected,
- float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_float *object, float *expected,
- float desired, memory_order success, memory_order failure, memory_scope scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_int *, int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_uint *, uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_int *, int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_uint *, uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_float *, float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_float *, float *, float, memory_order, memory_order, memory_scope);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#ifdef cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_double *object, double *expected,
- double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_double *object, double *expected,
- double desired, memory_order success, memory_order failure, memory_scope scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_double *, double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_double *, double *, double, memory_order, memory_order, memory_scope);
#endif //cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_long *object, long *expected,
- long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_long *object, long *expected,
- long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_ulong *object, ulong *expected,
- ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_ulong *object, ulong *expected,
- ulong desired, memory_order success, memory_order failure, memory_scope scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_long *, long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_long *, long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile atomic_ulong *, ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile atomic_ulong *, ulong *, ulong, memory_order, memory_order, memory_scope);
#endif
#endif //defined(__opencl_c_generic_address_space)
#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *object, __global int *expected,
- int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *object, __local int *expected,
- int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *object, __private int *expected,
- int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *object, __global int *expected,
- int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *object, __local int *expected,
- int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *object, __private int *expected,
- int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *object, __global uint *expected,
- uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *object, __local uint *expected,
- uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *object, __private uint *expected,
- uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *object, __global uint *expected,
- uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *object, __local uint *expected,
- uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *object, __private uint *expected,
- uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *object, __global float *expected,
- float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *object, __local float *expected,
- float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *object, __private float *expected,
- float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *object, __global float *expected,
- float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *object, __local float *expected,
- float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *object, __private float *expected,
- float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *object, __global int *expected,
- int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *object, __local int *expected,
- int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *object, __private int *expected,
- int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *object, __global int *expected,
- int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *object, __local int *expected,
- int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *object, __private int *expected,
- int desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *object, __global uint *expected,
- uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *object, __local uint *expected,
- uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *object, __private uint *expected,
- uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *object, __global uint *expected,
- uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *object, __local uint *expected,
- uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *object, __private uint *expected,
- uint desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *object, __global float *expected,
- float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *object, __local float *expected,
- float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *object, __private float *expected,
- float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *object, __global float *expected,
- float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *object, __local float *expected,
- float desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *object, __private float *expected,
- float desired, memory_order success, memory_order failure, memory_scope scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *, __global int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *, __local int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_int *, __private int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *, __global int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *, __local int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_int *, __private int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *, __global uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *, __local uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_uint *, __private uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *, __global uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *, __local uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_uint *, __private uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *, __global float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *, __local float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_float *, __private float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *, __global float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *, __local float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_float *, __private float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *, __global int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *, __local int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_int *, __private int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *, __global int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *, __local int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_int *, __private int *, int, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *, __global uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *, __local uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_uint *, __private uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *, __global uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *, __local uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_uint *, __private uint *, uint, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *, __global float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *, __local float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_float *, __private float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *, __global float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *, __local float *, float, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_float *, __private float *, float, memory_order, memory_order, memory_scope);
#if defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#ifdef cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *object, __global double *expected,
- double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *object, __local double *expected,
- double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *object, __private double *expected,
- double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *object, __global double *expected,
- double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *object, __local double *expected,
- double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *object, __private double *expected,
- double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *object, __global double *expected,
- double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *object, __local double *expected,
- double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *object, __private double *expected,
- double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *object, __global double *expected,
- double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *object, __local double *expected,
- double desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *object, __private double *expected,
- double desired, memory_order success, memory_order failure, memory_scope scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *, __global double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *, __local double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_double *, __private double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *, __global double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *, __local double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_double *, __private double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *, __global double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *, __local double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_double *, __private double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *, __global double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *, __local double *, double, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_double *, __private double *, double, memory_order, memory_order, memory_scope);
#endif //cl_khr_fp64
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *object, __global long *expected,
- long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *object, __local long *expected,
- long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *object, __private long *expected,
- long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *object, __global long *expected,
- long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *object, __local long *expected,
- long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *object, __private long *expected,
- long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *object, __global ulong *expected,
- ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *object, __local ulong *expected,
- ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *object, __private ulong *expected,
- ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *object, __global ulong *expected,
- ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *object, __local ulong *expected,
- ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *object, __private ulong *expected,
- ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *object, __global long *expected,
- long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *object, __local long *expected,
- long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *object, __private long *expected,
- long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *object, __global long *expected,
- long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *object, __local long *expected,
- long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *object, __private long *expected,
- long desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *object, __global ulong *expected,
- ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *object, __local ulong *expected,
- ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *object, __private ulong *expected,
- ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *object, __global ulong *expected,
- ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *object, __local ulong *expected,
- ulong desired, memory_order success, memory_order failure, memory_scope scope);
-bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *object, __private ulong *expected,
- ulong desired, memory_order success, memory_order failure, memory_scope scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *, __global long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *, __local long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_long *, __private long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *, __global long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *, __local long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_long *, __private long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *, __global ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *, __local ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __global atomic_ulong *, __private ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *, __global ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *, __local ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_strong_explicit(volatile __local atomic_ulong *, __private ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *, __global long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *, __local long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_long *, __private long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *, __global long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *, __local long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_long *, __private long *, long, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *, __global ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *, __local ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __global atomic_ulong *, __private ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *, __global ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *, __local ulong *, ulong, memory_order, memory_order, memory_scope);
+bool __ovld atomic_compare_exchange_weak_explicit(volatile __local atomic_ulong *, __private ulong *, ulong, memory_order, memory_order, memory_scope);
#endif //defined(cl_khr_int64_base_atomics) && defined(cl_khr_int64_extended_atomics)
#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
// atomic_flag_test_and_set() and atomic_flag_clear()
#if defined(__opencl_c_atomic_order_seq_cst) && defined(__opencl_c_atomic_scope_device)
#if defined(__opencl_c_generic_address_space)
-bool __ovld atomic_flag_test_and_set(volatile atomic_flag *object);
-void __ovld atomic_flag_clear(volatile atomic_flag *object);
+bool __ovld atomic_flag_test_and_set(volatile atomic_flag *);
+void __ovld atomic_flag_clear(volatile atomic_flag *);
#endif //defined(__opencl_c_generic_address_space)
#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-bool __ovld atomic_flag_test_and_set(volatile __global atomic_flag *object);
-bool __ovld atomic_flag_test_and_set(volatile __local atomic_flag *object);
-void __ovld atomic_flag_clear(volatile __global atomic_flag *object);
-void __ovld atomic_flag_clear(volatile __local atomic_flag *object);
+bool __ovld atomic_flag_test_and_set(volatile __global atomic_flag *);
+bool __ovld atomic_flag_test_and_set(volatile __local atomic_flag *);
+void __ovld atomic_flag_clear(volatile __global atomic_flag *);
+void __ovld atomic_flag_clear(volatile __local atomic_flag *);
#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
#endif
#if defined(__opencl_c_atomic_scope_device)
#if defined(__opencl_c_generic_address_space)
-bool __ovld atomic_flag_test_and_set_explicit(volatile atomic_flag *object, memory_order order);
-void __ovld atomic_flag_clear_explicit(volatile atomic_flag *object, memory_order order);
+bool __ovld atomic_flag_test_and_set_explicit(volatile atomic_flag *, memory_order);
+void __ovld atomic_flag_clear_explicit(volatile atomic_flag *, memory_order);
#endif //defined(__opencl_c_generic_address_space)
#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-bool __ovld atomic_flag_test_and_set_explicit(volatile __global atomic_flag *object, memory_order order);
-bool __ovld atomic_flag_test_and_set_explicit(volatile __local atomic_flag *object, memory_order order);
-void __ovld atomic_flag_clear_explicit(volatile __global atomic_flag *object, memory_order order);
-void __ovld atomic_flag_clear_explicit(volatile __local atomic_flag *object, memory_order order);
+bool __ovld atomic_flag_test_and_set_explicit(volatile __global atomic_flag *, memory_order);
+bool __ovld atomic_flag_test_and_set_explicit(volatile __local atomic_flag *, memory_order);
+void __ovld atomic_flag_clear_explicit(volatile __global atomic_flag *, memory_order);
+void __ovld atomic_flag_clear_explicit(volatile __local atomic_flag *, memory_order);
#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
#endif
#if defined(__opencl_c_generic_address_space)
-bool __ovld atomic_flag_test_and_set_explicit(volatile atomic_flag *object, memory_order order, memory_scope scope);
-void __ovld atomic_flag_clear_explicit(volatile atomic_flag *object, memory_order order, memory_scope scope);
+bool __ovld atomic_flag_test_and_set_explicit(volatile atomic_flag *, memory_order, memory_scope);
+void __ovld atomic_flag_clear_explicit(volatile atomic_flag *, memory_order, memory_scope);
#endif //defined(__opencl_c_generic_address_space)
#if (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
-bool __ovld atomic_flag_test_and_set_explicit(volatile __global atomic_flag *object, memory_order order, memory_scope scope);
-bool __ovld atomic_flag_test_and_set_explicit(volatile __local atomic_flag *object, memory_order order, memory_scope scope);
-void __ovld atomic_flag_clear_explicit(volatile __global atomic_flag *object, memory_order order, memory_scope scope);
-void __ovld atomic_flag_clear_explicit(volatile __local atomic_flag *object, memory_order order, memory_scope scope);
+bool __ovld atomic_flag_test_and_set_explicit(volatile __global atomic_flag *, memory_order, memory_scope);
+bool __ovld atomic_flag_test_and_set_explicit(volatile __local atomic_flag *, memory_order, memory_scope);
+void __ovld atomic_flag_clear_explicit(volatile __global atomic_flag *, memory_order, memory_scope);
+void __ovld atomic_flag_clear_explicit(volatile __local atomic_flag *, memory_order, memory_scope);
#endif // (__OPENCL_C_VERSION__ >= CL_VERSION_3_0 || __OPENCL_CPP_VERSION__ >= 202100)
#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
@@ -14886,452 +14646,452 @@ void __ovld atomic_flag_clear_explicit(volatile __local atomic_flag *object, mem
* short8 b;
* b = shuffle(a, mask); <- not valid
*/
-char2 __ovld __cnfn shuffle(char2 x, uchar2 mask);
-char2 __ovld __cnfn shuffle(char4 x, uchar2 mask);
-char2 __ovld __cnfn shuffle(char8 x, uchar2 mask);
-char2 __ovld __cnfn shuffle(char16 x, uchar2 mask);
-
-uchar2 __ovld __cnfn shuffle(uchar2 x, uchar2 mask);
-uchar2 __ovld __cnfn shuffle(uchar4 x, uchar2 mask);
-uchar2 __ovld __cnfn shuffle(uchar8 x, uchar2 mask);
-uchar2 __ovld __cnfn shuffle(uchar16 x, uchar2 mask);
-
-short2 __ovld __cnfn shuffle(short2 x, ushort2 mask);
-short2 __ovld __cnfn shuffle(short4 x, ushort2 mask);
-short2 __ovld __cnfn shuffle(short8 x, ushort2 mask);
-short2 __ovld __cnfn shuffle(short16 x, ushort2 mask);
-
-ushort2 __ovld __cnfn shuffle(ushort2 x, ushort2 mask);
-ushort2 __ovld __cnfn shuffle(ushort4 x, ushort2 mask);
-ushort2 __ovld __cnfn shuffle(ushort8 x, ushort2 mask);
-ushort2 __ovld __cnfn shuffle(ushort16 x, ushort2 mask);
-
-int2 __ovld __cnfn shuffle(int2 x, uint2 mask);
-int2 __ovld __cnfn shuffle(int4 x, uint2 mask);
-int2 __ovld __cnfn shuffle(int8 x, uint2 mask);
-int2 __ovld __cnfn shuffle(int16 x, uint2 mask);
-
-uint2 __ovld __cnfn shuffle(uint2 x, uint2 mask);
-uint2 __ovld __cnfn shuffle(uint4 x, uint2 mask);
-uint2 __ovld __cnfn shuffle(uint8 x, uint2 mask);
-uint2 __ovld __cnfn shuffle(uint16 x, uint2 mask);
-
-long2 __ovld __cnfn shuffle(long2 x, ulong2 mask);
-long2 __ovld __cnfn shuffle(long4 x, ulong2 mask);
-long2 __ovld __cnfn shuffle(long8 x, ulong2 mask);
-long2 __ovld __cnfn shuffle(long16 x, ulong2 mask);
-
-ulong2 __ovld __cnfn shuffle(ulong2 x, ulong2 mask);
-ulong2 __ovld __cnfn shuffle(ulong4 x, ulong2 mask);
-ulong2 __ovld __cnfn shuffle(ulong8 x, ulong2 mask);
-ulong2 __ovld __cnfn shuffle(ulong16 x, ulong2 mask);
-
-float2 __ovld __cnfn shuffle(float2 x, uint2 mask);
-float2 __ovld __cnfn shuffle(float4 x, uint2 mask);
-float2 __ovld __cnfn shuffle(float8 x, uint2 mask);
-float2 __ovld __cnfn shuffle(float16 x, uint2 mask);
-
-char4 __ovld __cnfn shuffle(char2 x, uchar4 mask);
-char4 __ovld __cnfn shuffle(char4 x, uchar4 mask);
-char4 __ovld __cnfn shuffle(char8 x, uchar4 mask);
-char4 __ovld __cnfn shuffle(char16 x, uchar4 mask);
-
-uchar4 __ovld __cnfn shuffle(uchar2 x, uchar4 mask);
-uchar4 __ovld __cnfn shuffle(uchar4 x, uchar4 mask);
-uchar4 __ovld __cnfn shuffle(uchar8 x, uchar4 mask);
-uchar4 __ovld __cnfn shuffle(uchar16 x, uchar4 mask);
-
-short4 __ovld __cnfn shuffle(short2 x, ushort4 mask);
-short4 __ovld __cnfn shuffle(short4 x, ushort4 mask);
-short4 __ovld __cnfn shuffle(short8 x, ushort4 mask);
-short4 __ovld __cnfn shuffle(short16 x, ushort4 mask);
-
-ushort4 __ovld __cnfn shuffle(ushort2 x, ushort4 mask);
-ushort4 __ovld __cnfn shuffle(ushort4 x, ushort4 mask);
-ushort4 __ovld __cnfn shuffle(ushort8 x, ushort4 mask);
-ushort4 __ovld __cnfn shuffle(ushort16 x, ushort4 mask);
-
-int4 __ovld __cnfn shuffle(int2 x, uint4 mask);
-int4 __ovld __cnfn shuffle(int4 x, uint4 mask);
-int4 __ovld __cnfn shuffle(int8 x, uint4 mask);
-int4 __ovld __cnfn shuffle(int16 x, uint4 mask);
-
-uint4 __ovld __cnfn shuffle(uint2 x, uint4 mask);
-uint4 __ovld __cnfn shuffle(uint4 x, uint4 mask);
-uint4 __ovld __cnfn shuffle(uint8 x, uint4 mask);
-uint4 __ovld __cnfn shuffle(uint16 x, uint4 mask);
-
-long4 __ovld __cnfn shuffle(long2 x, ulong4 mask);
-long4 __ovld __cnfn shuffle(long4 x, ulong4 mask);
-long4 __ovld __cnfn shuffle(long8 x, ulong4 mask);
-long4 __ovld __cnfn shuffle(long16 x, ulong4 mask);
-
-ulong4 __ovld __cnfn shuffle(ulong2 x, ulong4 mask);
-ulong4 __ovld __cnfn shuffle(ulong4 x, ulong4 mask);
-ulong4 __ovld __cnfn shuffle(ulong8 x, ulong4 mask);
-ulong4 __ovld __cnfn shuffle(ulong16 x, ulong4 mask);
-
-float4 __ovld __cnfn shuffle(float2 x, uint4 mask);
-float4 __ovld __cnfn shuffle(float4 x, uint4 mask);
-float4 __ovld __cnfn shuffle(float8 x, uint4 mask);
-float4 __ovld __cnfn shuffle(float16 x, uint4 mask);
-
-char8 __ovld __cnfn shuffle(char2 x, uchar8 mask);
-char8 __ovld __cnfn shuffle(char4 x, uchar8 mask);
-char8 __ovld __cnfn shuffle(char8 x, uchar8 mask);
-char8 __ovld __cnfn shuffle(char16 x, uchar8 mask);
-
-uchar8 __ovld __cnfn shuffle(uchar2 x, uchar8 mask);
-uchar8 __ovld __cnfn shuffle(uchar4 x, uchar8 mask);
-uchar8 __ovld __cnfn shuffle(uchar8 x, uchar8 mask);
-uchar8 __ovld __cnfn shuffle(uchar16 x, uchar8 mask);
-
-short8 __ovld __cnfn shuffle(short2 x, ushort8 mask);
-short8 __ovld __cnfn shuffle(short4 x, ushort8 mask);
-short8 __ovld __cnfn shuffle(short8 x, ushort8 mask);
-short8 __ovld __cnfn shuffle(short16 x, ushort8 mask);
-
-ushort8 __ovld __cnfn shuffle(ushort2 x, ushort8 mask);
-ushort8 __ovld __cnfn shuffle(ushort4 x, ushort8 mask);
-ushort8 __ovld __cnfn shuffle(ushort8 x, ushort8 mask);
-ushort8 __ovld __cnfn shuffle(ushort16 x, ushort8 mask);
-
-int8 __ovld __cnfn shuffle(int2 x, uint8 mask);
-int8 __ovld __cnfn shuffle(int4 x, uint8 mask);
-int8 __ovld __cnfn shuffle(int8 x, uint8 mask);
-int8 __ovld __cnfn shuffle(int16 x, uint8 mask);
-
-uint8 __ovld __cnfn shuffle(uint2 x, uint8 mask);
-uint8 __ovld __cnfn shuffle(uint4 x, uint8 mask);
-uint8 __ovld __cnfn shuffle(uint8 x, uint8 mask);
-uint8 __ovld __cnfn shuffle(uint16 x, uint8 mask);
-
-long8 __ovld __cnfn shuffle(long2 x, ulong8 mask);
-long8 __ovld __cnfn shuffle(long4 x, ulong8 mask);
-long8 __ovld __cnfn shuffle(long8 x, ulong8 mask);
-long8 __ovld __cnfn shuffle(long16 x, ulong8 mask);
-
-ulong8 __ovld __cnfn shuffle(ulong2 x, ulong8 mask);
-ulong8 __ovld __cnfn shuffle(ulong4 x, ulong8 mask);
-ulong8 __ovld __cnfn shuffle(ulong8 x, ulong8 mask);
-ulong8 __ovld __cnfn shuffle(ulong16 x, ulong8 mask);
-
-float8 __ovld __cnfn shuffle(float2 x, uint8 mask);
-float8 __ovld __cnfn shuffle(float4 x, uint8 mask);
-float8 __ovld __cnfn shuffle(float8 x, uint8 mask);
-float8 __ovld __cnfn shuffle(float16 x, uint8 mask);
-
-char16 __ovld __cnfn shuffle(char2 x, uchar16 mask);
-char16 __ovld __cnfn shuffle(char4 x, uchar16 mask);
-char16 __ovld __cnfn shuffle(char8 x, uchar16 mask);
-char16 __ovld __cnfn shuffle(char16 x, uchar16 mask);
-
-uchar16 __ovld __cnfn shuffle(uchar2 x, uchar16 mask);
-uchar16 __ovld __cnfn shuffle(uchar4 x, uchar16 mask);
-uchar16 __ovld __cnfn shuffle(uchar8 x, uchar16 mask);
-uchar16 __ovld __cnfn shuffle(uchar16 x, uchar16 mask);
-
-short16 __ovld __cnfn shuffle(short2 x, ushort16 mask);
-short16 __ovld __cnfn shuffle(short4 x, ushort16 mask);
-short16 __ovld __cnfn shuffle(short8 x, ushort16 mask);
-short16 __ovld __cnfn shuffle(short16 x, ushort16 mask);
-
-ushort16 __ovld __cnfn shuffle(ushort2 x, ushort16 mask);
-ushort16 __ovld __cnfn shuffle(ushort4 x, ushort16 mask);
-ushort16 __ovld __cnfn shuffle(ushort8 x, ushort16 mask);
-ushort16 __ovld __cnfn shuffle(ushort16 x, ushort16 mask);
-
-int16 __ovld __cnfn shuffle(int2 x, uint16 mask);
-int16 __ovld __cnfn shuffle(int4 x, uint16 mask);
-int16 __ovld __cnfn shuffle(int8 x, uint16 mask);
-int16 __ovld __cnfn shuffle(int16 x, uint16 mask);
-
-uint16 __ovld __cnfn shuffle(uint2 x, uint16 mask);
-uint16 __ovld __cnfn shuffle(uint4 x, uint16 mask);
-uint16 __ovld __cnfn shuffle(uint8 x, uint16 mask);
-uint16 __ovld __cnfn shuffle(uint16 x, uint16 mask);
-
-long16 __ovld __cnfn shuffle(long2 x, ulong16 mask);
-long16 __ovld __cnfn shuffle(long4 x, ulong16 mask);
-long16 __ovld __cnfn shuffle(long8 x, ulong16 mask);
-long16 __ovld __cnfn shuffle(long16 x, ulong16 mask);
-
-ulong16 __ovld __cnfn shuffle(ulong2 x, ulong16 mask);
-ulong16 __ovld __cnfn shuffle(ulong4 x, ulong16 mask);
-ulong16 __ovld __cnfn shuffle(ulong8 x, ulong16 mask);
-ulong16 __ovld __cnfn shuffle(ulong16 x, ulong16 mask);
-
-float16 __ovld __cnfn shuffle(float2 x, uint16 mask);
-float16 __ovld __cnfn shuffle(float4 x, uint16 mask);
-float16 __ovld __cnfn shuffle(float8 x, uint16 mask);
-float16 __ovld __cnfn shuffle(float16 x, uint16 mask);
+char2 __ovld __cnfn shuffle(char2, uchar2);
+char2 __ovld __cnfn shuffle(char4, uchar2);
+char2 __ovld __cnfn shuffle(char8, uchar2);
+char2 __ovld __cnfn shuffle(char16, uchar2);
+
+uchar2 __ovld __cnfn shuffle(uchar2, uchar2);
+uchar2 __ovld __cnfn shuffle(uchar4, uchar2);
+uchar2 __ovld __cnfn shuffle(uchar8, uchar2);
+uchar2 __ovld __cnfn shuffle(uchar16, uchar2);
+
+short2 __ovld __cnfn shuffle(short2, ushort2);
+short2 __ovld __cnfn shuffle(short4, ushort2);
+short2 __ovld __cnfn shuffle(short8, ushort2);
+short2 __ovld __cnfn shuffle(short16, ushort2);
+
+ushort2 __ovld __cnfn shuffle(ushort2, ushort2);
+ushort2 __ovld __cnfn shuffle(ushort4, ushort2);
+ushort2 __ovld __cnfn shuffle(ushort8, ushort2);
+ushort2 __ovld __cnfn shuffle(ushort16, ushort2);
+
+int2 __ovld __cnfn shuffle(int2, uint2);
+int2 __ovld __cnfn shuffle(int4, uint2);
+int2 __ovld __cnfn shuffle(int8, uint2);
+int2 __ovld __cnfn shuffle(int16, uint2);
+
+uint2 __ovld __cnfn shuffle(uint2, uint2);
+uint2 __ovld __cnfn shuffle(uint4, uint2);
+uint2 __ovld __cnfn shuffle(uint8, uint2);
+uint2 __ovld __cnfn shuffle(uint16, uint2);
+
+long2 __ovld __cnfn shuffle(long2, ulong2);
+long2 __ovld __cnfn shuffle(long4, ulong2);
+long2 __ovld __cnfn shuffle(long8, ulong2);
+long2 __ovld __cnfn shuffle(long16, ulong2);
+
+ulong2 __ovld __cnfn shuffle(ulong2, ulong2);
+ulong2 __ovld __cnfn shuffle(ulong4, ulong2);
+ulong2 __ovld __cnfn shuffle(ulong8, ulong2);
+ulong2 __ovld __cnfn shuffle(ulong16, ulong2);
+
+float2 __ovld __cnfn shuffle(float2, uint2);
+float2 __ovld __cnfn shuffle(float4, uint2);
+float2 __ovld __cnfn shuffle(float8, uint2);
+float2 __ovld __cnfn shuffle(float16, uint2);
+
+char4 __ovld __cnfn shuffle(char2, uchar4);
+char4 __ovld __cnfn shuffle(char4, uchar4);
+char4 __ovld __cnfn shuffle(char8, uchar4);
+char4 __ovld __cnfn shuffle(char16, uchar4);
+
+uchar4 __ovld __cnfn shuffle(uchar2, uchar4);
+uchar4 __ovld __cnfn shuffle(uchar4, uchar4);
+uchar4 __ovld __cnfn shuffle(uchar8, uchar4);
+uchar4 __ovld __cnfn shuffle(uchar16, uchar4);
+
+short4 __ovld __cnfn shuffle(short2, ushort4);
+short4 __ovld __cnfn shuffle(short4, ushort4);
+short4 __ovld __cnfn shuffle(short8, ushort4);
+short4 __ovld __cnfn shuffle(short16, ushort4);
+
+ushort4 __ovld __cnfn shuffle(ushort2, ushort4);
+ushort4 __ovld __cnfn shuffle(ushort4, ushort4);
+ushort4 __ovld __cnfn shuffle(ushort8, ushort4);
+ushort4 __ovld __cnfn shuffle(ushort16, ushort4);
+
+int4 __ovld __cnfn shuffle(int2, uint4);
+int4 __ovld __cnfn shuffle(int4, uint4);
+int4 __ovld __cnfn shuffle(int8, uint4);
+int4 __ovld __cnfn shuffle(int16, uint4);
+
+uint4 __ovld __cnfn shuffle(uint2, uint4);
+uint4 __ovld __cnfn shuffle(uint4, uint4);
+uint4 __ovld __cnfn shuffle(uint8, uint4);
+uint4 __ovld __cnfn shuffle(uint16, uint4);
+
+long4 __ovld __cnfn shuffle(long2, ulong4);
+long4 __ovld __cnfn shuffle(long4, ulong4);
+long4 __ovld __cnfn shuffle(long8, ulong4);
+long4 __ovld __cnfn shuffle(long16, ulong4);
+
+ulong4 __ovld __cnfn shuffle(ulong2, ulong4);
+ulong4 __ovld __cnfn shuffle(ulong4, ulong4);
+ulong4 __ovld __cnfn shuffle(ulong8, ulong4);
+ulong4 __ovld __cnfn shuffle(ulong16, ulong4);
+
+float4 __ovld __cnfn shuffle(float2, uint4);
+float4 __ovld __cnfn shuffle(float4, uint4);
+float4 __ovld __cnfn shuffle(float8, uint4);
+float4 __ovld __cnfn shuffle(float16, uint4);
+
+char8 __ovld __cnfn shuffle(char2, uchar8);
+char8 __ovld __cnfn shuffle(char4, uchar8);
+char8 __ovld __cnfn shuffle(char8, uchar8);
+char8 __ovld __cnfn shuffle(char16, uchar8);
+
+uchar8 __ovld __cnfn shuffle(uchar2, uchar8);
+uchar8 __ovld __cnfn shuffle(uchar4, uchar8);
+uchar8 __ovld __cnfn shuffle(uchar8, uchar8);
+uchar8 __ovld __cnfn shuffle(uchar16, uchar8);
+
+short8 __ovld __cnfn shuffle(short2, ushort8);
+short8 __ovld __cnfn shuffle(short4, ushort8);
+short8 __ovld __cnfn shuffle(short8, ushort8);
+short8 __ovld __cnfn shuffle(short16, ushort8);
+
+ushort8 __ovld __cnfn shuffle(ushort2, ushort8);
+ushort8 __ovld __cnfn shuffle(ushort4, ushort8);
+ushort8 __ovld __cnfn shuffle(ushort8, ushort8);
+ushort8 __ovld __cnfn shuffle(ushort16, ushort8);
+
+int8 __ovld __cnfn shuffle(int2, uint8);
+int8 __ovld __cnfn shuffle(int4, uint8);
+int8 __ovld __cnfn shuffle(int8, uint8);
+int8 __ovld __cnfn shuffle(int16, uint8);
+
+uint8 __ovld __cnfn shuffle(uint2, uint8);
+uint8 __ovld __cnfn shuffle(uint4, uint8);
+uint8 __ovld __cnfn shuffle(uint8, uint8);
+uint8 __ovld __cnfn shuffle(uint16, uint8);
+
+long8 __ovld __cnfn shuffle(long2, ulong8);
+long8 __ovld __cnfn shuffle(long4, ulong8);
+long8 __ovld __cnfn shuffle(long8, ulong8);
+long8 __ovld __cnfn shuffle(long16, ulong8);
+
+ulong8 __ovld __cnfn shuffle(ulong2, ulong8);
+ulong8 __ovld __cnfn shuffle(ulong4, ulong8);
+ulong8 __ovld __cnfn shuffle(ulong8, ulong8);
+ulong8 __ovld __cnfn shuffle(ulong16, ulong8);
+
+float8 __ovld __cnfn shuffle(float2, uint8);
+float8 __ovld __cnfn shuffle(float4, uint8);
+float8 __ovld __cnfn shuffle(float8, uint8);
+float8 __ovld __cnfn shuffle(float16, uint8);
+
+char16 __ovld __cnfn shuffle(char2, uchar16);
+char16 __ovld __cnfn shuffle(char4, uchar16);
+char16 __ovld __cnfn shuffle(char8, uchar16);
+char16 __ovld __cnfn shuffle(char16, uchar16);
+
+uchar16 __ovld __cnfn shuffle(uchar2, uchar16);
+uchar16 __ovld __cnfn shuffle(uchar4, uchar16);
+uchar16 __ovld __cnfn shuffle(uchar8, uchar16);
+uchar16 __ovld __cnfn shuffle(uchar16, uchar16);
+
+short16 __ovld __cnfn shuffle(short2, ushort16);
+short16 __ovld __cnfn shuffle(short4, ushort16);
+short16 __ovld __cnfn shuffle(short8, ushort16);
+short16 __ovld __cnfn shuffle(short16, ushort16);
+
+ushort16 __ovld __cnfn shuffle(ushort2, ushort16);
+ushort16 __ovld __cnfn shuffle(ushort4, ushort16);
+ushort16 __ovld __cnfn shuffle(ushort8, ushort16);
+ushort16 __ovld __cnfn shuffle(ushort16, ushort16);
+
+int16 __ovld __cnfn shuffle(int2, uint16);
+int16 __ovld __cnfn shuffle(int4, uint16);
+int16 __ovld __cnfn shuffle(int8, uint16);
+int16 __ovld __cnfn shuffle(int16, uint16);
+
+uint16 __ovld __cnfn shuffle(uint2, uint16);
+uint16 __ovld __cnfn shuffle(uint4, uint16);
+uint16 __ovld __cnfn shuffle(uint8, uint16);
+uint16 __ovld __cnfn shuffle(uint16, uint16);
+
+long16 __ovld __cnfn shuffle(long2, ulong16);
+long16 __ovld __cnfn shuffle(long4, ulong16);
+long16 __ovld __cnfn shuffle(long8, ulong16);
+long16 __ovld __cnfn shuffle(long16, ulong16);
+
+ulong16 __ovld __cnfn shuffle(ulong2, ulong16);
+ulong16 __ovld __cnfn shuffle(ulong4, ulong16);
+ulong16 __ovld __cnfn shuffle(ulong8, ulong16);
+ulong16 __ovld __cnfn shuffle(ulong16, ulong16);
+
+float16 __ovld __cnfn shuffle(float2, uint16);
+float16 __ovld __cnfn shuffle(float4, uint16);
+float16 __ovld __cnfn shuffle(float8, uint16);
+float16 __ovld __cnfn shuffle(float16, uint16);
#ifdef cl_khr_fp64
-double2 __ovld __cnfn shuffle(double2 x, ulong2 mask);
-double2 __ovld __cnfn shuffle(double4 x, ulong2 mask);
-double2 __ovld __cnfn shuffle(double8 x, ulong2 mask);
-double2 __ovld __cnfn shuffle(double16 x, ulong2 mask);
-
-double4 __ovld __cnfn shuffle(double2 x, ulong4 mask);
-double4 __ovld __cnfn shuffle(double4 x, ulong4 mask);
-double4 __ovld __cnfn shuffle(double8 x, ulong4 mask);
-double4 __ovld __cnfn shuffle(double16 x, ulong4 mask);
-
-double8 __ovld __cnfn shuffle(double2 x, ulong8 mask);
-double8 __ovld __cnfn shuffle(double4 x, ulong8 mask);
-double8 __ovld __cnfn shuffle(double8 x, ulong8 mask);
-double8 __ovld __cnfn shuffle(double16 x, ulong8 mask);
-
-double16 __ovld __cnfn shuffle(double2 x, ulong16 mask);
-double16 __ovld __cnfn shuffle(double4 x, ulong16 mask);
-double16 __ovld __cnfn shuffle(double8 x, ulong16 mask);
-double16 __ovld __cnfn shuffle(double16 x, ulong16 mask);
+double2 __ovld __cnfn shuffle(double2, ulong2);
+double2 __ovld __cnfn shuffle(double4, ulong2);
+double2 __ovld __cnfn shuffle(double8, ulong2);
+double2 __ovld __cnfn shuffle(double16, ulong2);
+
+double4 __ovld __cnfn shuffle(double2, ulong4);
+double4 __ovld __cnfn shuffle(double4, ulong4);
+double4 __ovld __cnfn shuffle(double8, ulong4);
+double4 __ovld __cnfn shuffle(double16, ulong4);
+
+double8 __ovld __cnfn shuffle(double2, ulong8);
+double8 __ovld __cnfn shuffle(double4, ulong8);
+double8 __ovld __cnfn shuffle(double8, ulong8);
+double8 __ovld __cnfn shuffle(double16, ulong8);
+
+double16 __ovld __cnfn shuffle(double2, ulong16);
+double16 __ovld __cnfn shuffle(double4, ulong16);
+double16 __ovld __cnfn shuffle(double8, ulong16);
+double16 __ovld __cnfn shuffle(double16, ulong16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half2 __ovld __cnfn shuffle(half2 x, ushort2 mask);
-half2 __ovld __cnfn shuffle(half4 x, ushort2 mask);
-half2 __ovld __cnfn shuffle(half8 x, ushort2 mask);
-half2 __ovld __cnfn shuffle(half16 x, ushort2 mask);
-
-half4 __ovld __cnfn shuffle(half2 x, ushort4 mask);
-half4 __ovld __cnfn shuffle(half4 x, ushort4 mask);
-half4 __ovld __cnfn shuffle(half8 x, ushort4 mask);
-half4 __ovld __cnfn shuffle(half16 x, ushort4 mask);
-
-half8 __ovld __cnfn shuffle(half2 x, ushort8 mask);
-half8 __ovld __cnfn shuffle(half4 x, ushort8 mask);
-half8 __ovld __cnfn shuffle(half8 x, ushort8 mask);
-half8 __ovld __cnfn shuffle(half16 x, ushort8 mask);
-
-half16 __ovld __cnfn shuffle(half2 x, ushort16 mask);
-half16 __ovld __cnfn shuffle(half4 x, ushort16 mask);
-half16 __ovld __cnfn shuffle(half8 x, ushort16 mask);
-half16 __ovld __cnfn shuffle(half16 x, ushort16 mask);
+half2 __ovld __cnfn shuffle(half2, ushort2);
+half2 __ovld __cnfn shuffle(half4, ushort2);
+half2 __ovld __cnfn shuffle(half8, ushort2);
+half2 __ovld __cnfn shuffle(half16, ushort2);
+
+half4 __ovld __cnfn shuffle(half2, ushort4);
+half4 __ovld __cnfn shuffle(half4, ushort4);
+half4 __ovld __cnfn shuffle(half8, ushort4);
+half4 __ovld __cnfn shuffle(half16, ushort4);
+
+half8 __ovld __cnfn shuffle(half2, ushort8);
+half8 __ovld __cnfn shuffle(half4, ushort8);
+half8 __ovld __cnfn shuffle(half8, ushort8);
+half8 __ovld __cnfn shuffle(half16, ushort8);
+
+half16 __ovld __cnfn shuffle(half2, ushort16);
+half16 __ovld __cnfn shuffle(half4, ushort16);
+half16 __ovld __cnfn shuffle(half8, ushort16);
+half16 __ovld __cnfn shuffle(half16, ushort16);
#endif //cl_khr_fp16
-char2 __ovld __cnfn shuffle2(char2 x, char2 y, uchar2 mask);
-char2 __ovld __cnfn shuffle2(char4 x, char4 y, uchar2 mask);
-char2 __ovld __cnfn shuffle2(char8 x, char8 y, uchar2 mask);
-char2 __ovld __cnfn shuffle2(char16 x, char16 y, uchar2 mask);
-
-uchar2 __ovld __cnfn shuffle2(uchar2 x, uchar2 y, uchar2 mask);
-uchar2 __ovld __cnfn shuffle2(uchar4 x, uchar4 y, uchar2 mask);
-uchar2 __ovld __cnfn shuffle2(uchar8 x, uchar8 y, uchar2 mask);
-uchar2 __ovld __cnfn shuffle2(uchar16 x, uchar16 y, uchar2 mask);
-
-short2 __ovld __cnfn shuffle2(short2 x, short2 y, ushort2 mask);
-short2 __ovld __cnfn shuffle2(short4 x, short4 y, ushort2 mask);
-short2 __ovld __cnfn shuffle2(short8 x, short8 y, ushort2 mask);
-short2 __ovld __cnfn shuffle2(short16 x, short16 y, ushort2 mask);
-
-ushort2 __ovld __cnfn shuffle2(ushort2 x, ushort2 y, ushort2 mask);
-ushort2 __ovld __cnfn shuffle2(ushort4 x, ushort4 y, ushort2 mask);
-ushort2 __ovld __cnfn shuffle2(ushort8 x, ushort8 y, ushort2 mask);
-ushort2 __ovld __cnfn shuffle2(ushort16 x, ushort16 y, ushort2 mask);
-
-int2 __ovld __cnfn shuffle2(int2 x, int2 y, uint2 mask);
-int2 __ovld __cnfn shuffle2(int4 x, int4 y, uint2 mask);
-int2 __ovld __cnfn shuffle2(int8 x, int8 y, uint2 mask);
-int2 __ovld __cnfn shuffle2(int16 x, int16 y, uint2 mask);
-
-uint2 __ovld __cnfn shuffle2(uint2 x, uint2 y, uint2 mask);
-uint2 __ovld __cnfn shuffle2(uint4 x, uint4 y, uint2 mask);
-uint2 __ovld __cnfn shuffle2(uint8 x, uint8 y, uint2 mask);
-uint2 __ovld __cnfn shuffle2(uint16 x, uint16 y, uint2 mask);
-
-long2 __ovld __cnfn shuffle2(long2 x, long2 y, ulong2 mask);
-long2 __ovld __cnfn shuffle2(long4 x, long4 y, ulong2 mask);
-long2 __ovld __cnfn shuffle2(long8 x, long8 y, ulong2 mask);
-long2 __ovld __cnfn shuffle2(long16 x, long16 y, ulong2 mask);
-
-ulong2 __ovld __cnfn shuffle2(ulong2 x, ulong2 y, ulong2 mask);
-ulong2 __ovld __cnfn shuffle2(ulong4 x, ulong4 y, ulong2 mask);
-ulong2 __ovld __cnfn shuffle2(ulong8 x, ulong8 y, ulong2 mask);
-ulong2 __ovld __cnfn shuffle2(ulong16 x, ulong16 y, ulong2 mask);
-
-float2 __ovld __cnfn shuffle2(float2 x, float2 y, uint2 mask);
-float2 __ovld __cnfn shuffle2(float4 x, float4 y, uint2 mask);
-float2 __ovld __cnfn shuffle2(float8 x, float8 y, uint2 mask);
-float2 __ovld __cnfn shuffle2(float16 x, float16 y, uint2 mask);
-
-char4 __ovld __cnfn shuffle2(char2 x, char2 y, uchar4 mask);
-char4 __ovld __cnfn shuffle2(char4 x, char4 y, uchar4 mask);
-char4 __ovld __cnfn shuffle2(char8 x, char8 y, uchar4 mask);
-char4 __ovld __cnfn shuffle2(char16 x, char16 y, uchar4 mask);
-
-uchar4 __ovld __cnfn shuffle2(uchar2 x, uchar2 y, uchar4 mask);
-uchar4 __ovld __cnfn shuffle2(uchar4 x, uchar4 y, uchar4 mask);
-uchar4 __ovld __cnfn shuffle2(uchar8 x, uchar8 y, uchar4 mask);
-uchar4 __ovld __cnfn shuffle2(uchar16 x, uchar16 y, uchar4 mask);
-
-short4 __ovld __cnfn shuffle2(short2 x, short2 y, ushort4 mask);
-short4 __ovld __cnfn shuffle2(short4 x, short4 y, ushort4 mask);
-short4 __ovld __cnfn shuffle2(short8 x, short8 y, ushort4 mask);
-short4 __ovld __cnfn shuffle2(short16 x, short16 y, ushort4 mask);
-
-ushort4 __ovld __cnfn shuffle2(ushort2 x, ushort2 y, ushort4 mask);
-ushort4 __ovld __cnfn shuffle2(ushort4 x, ushort4 y, ushort4 mask);
-ushort4 __ovld __cnfn shuffle2(ushort8 x, ushort8 y, ushort4 mask);
-ushort4 __ovld __cnfn shuffle2(ushort16 x, ushort16 y, ushort4 mask);
-
-int4 __ovld __cnfn shuffle2(int2 x, int2 y, uint4 mask);
-int4 __ovld __cnfn shuffle2(int4 x, int4 y, uint4 mask);
-int4 __ovld __cnfn shuffle2(int8 x, int8 y, uint4 mask);
-int4 __ovld __cnfn shuffle2(int16 x, int16 y, uint4 mask);
-
-uint4 __ovld __cnfn shuffle2(uint2 x, uint2 y, uint4 mask);
-uint4 __ovld __cnfn shuffle2(uint4 x, uint4 y, uint4 mask);
-uint4 __ovld __cnfn shuffle2(uint8 x, uint8 y, uint4 mask);
-uint4 __ovld __cnfn shuffle2(uint16 x, uint16 y, uint4 mask);
-
-long4 __ovld __cnfn shuffle2(long2 x, long2 y, ulong4 mask);
-long4 __ovld __cnfn shuffle2(long4 x, long4 y, ulong4 mask);
-long4 __ovld __cnfn shuffle2(long8 x, long8 y, ulong4 mask);
-long4 __ovld __cnfn shuffle2(long16 x, long16 y, ulong4 mask);
-
-ulong4 __ovld __cnfn shuffle2(ulong2 x, ulong2 y, ulong4 mask);
-ulong4 __ovld __cnfn shuffle2(ulong4 x, ulong4 y, ulong4 mask);
-ulong4 __ovld __cnfn shuffle2(ulong8 x, ulong8 y, ulong4 mask);
-ulong4 __ovld __cnfn shuffle2(ulong16 x, ulong16 y, ulong4 mask);
-
-float4 __ovld __cnfn shuffle2(float2 x, float2 y, uint4 mask);
-float4 __ovld __cnfn shuffle2(float4 x, float4 y, uint4 mask);
-float4 __ovld __cnfn shuffle2(float8 x, float8 y, uint4 mask);
-float4 __ovld __cnfn shuffle2(float16 x, float16 y, uint4 mask);
-
-char8 __ovld __cnfn shuffle2(char2 x, char2 y, uchar8 mask);
-char8 __ovld __cnfn shuffle2(char4 x, char4 y, uchar8 mask);
-char8 __ovld __cnfn shuffle2(char8 x, char8 y, uchar8 mask);
-char8 __ovld __cnfn shuffle2(char16 x, char16 y, uchar8 mask);
-
-uchar8 __ovld __cnfn shuffle2(uchar2 x, uchar2 y, uchar8 mask);
-uchar8 __ovld __cnfn shuffle2(uchar4 x, uchar4 y, uchar8 mask);
-uchar8 __ovld __cnfn shuffle2(uchar8 x, uchar8 y, uchar8 mask);
-uchar8 __ovld __cnfn shuffle2(uchar16 x, uchar16 y, uchar8 mask);
-
-short8 __ovld __cnfn shuffle2(short2 x, short2 y, ushort8 mask);
-short8 __ovld __cnfn shuffle2(short4 x, short4 y, ushort8 mask);
-short8 __ovld __cnfn shuffle2(short8 x, short8 y, ushort8 mask);
-short8 __ovld __cnfn shuffle2(short16 x, short16 y, ushort8 mask);
-
-ushort8 __ovld __cnfn shuffle2(ushort2 x, ushort2 y, ushort8 mask);
-ushort8 __ovld __cnfn shuffle2(ushort4 x, ushort4 y, ushort8 mask);
-ushort8 __ovld __cnfn shuffle2(ushort8 x, ushort8 y, ushort8 mask);
-ushort8 __ovld __cnfn shuffle2(ushort16 x, ushort16 y, ushort8 mask);
-
-int8 __ovld __cnfn shuffle2(int2 x, int2 y, uint8 mask);
-int8 __ovld __cnfn shuffle2(int4 x, int4 y, uint8 mask);
-int8 __ovld __cnfn shuffle2(int8 x, int8 y, uint8 mask);
-int8 __ovld __cnfn shuffle2(int16 x, int16 y, uint8 mask);
-
-uint8 __ovld __cnfn shuffle2(uint2 x, uint2 y, uint8 mask);
-uint8 __ovld __cnfn shuffle2(uint4 x, uint4 y, uint8 mask);
-uint8 __ovld __cnfn shuffle2(uint8 x, uint8 y, uint8 mask);
-uint8 __ovld __cnfn shuffle2(uint16 x, uint16 y, uint8 mask);
-
-long8 __ovld __cnfn shuffle2(long2 x, long2 y, ulong8 mask);
-long8 __ovld __cnfn shuffle2(long4 x, long4 y, ulong8 mask);
-long8 __ovld __cnfn shuffle2(long8 x, long8 y, ulong8 mask);
-long8 __ovld __cnfn shuffle2(long16 x, long16 y, ulong8 mask);
-
-ulong8 __ovld __cnfn shuffle2(ulong2 x, ulong2 y, ulong8 mask);
-ulong8 __ovld __cnfn shuffle2(ulong4 x, ulong4 y, ulong8 mask);
-ulong8 __ovld __cnfn shuffle2(ulong8 x, ulong8 y, ulong8 mask);
-ulong8 __ovld __cnfn shuffle2(ulong16 x, ulong16 y, ulong8 mask);
-
-float8 __ovld __cnfn shuffle2(float2 x, float2 y, uint8 mask);
-float8 __ovld __cnfn shuffle2(float4 x, float4 y, uint8 mask);
-float8 __ovld __cnfn shuffle2(float8 x, float8 y, uint8 mask);
-float8 __ovld __cnfn shuffle2(float16 x, float16 y, uint8 mask);
-
-char16 __ovld __cnfn shuffle2(char2 x, char2 y, uchar16 mask);
-char16 __ovld __cnfn shuffle2(char4 x, char4 y, uchar16 mask);
-char16 __ovld __cnfn shuffle2(char8 x, char8 y, uchar16 mask);
-char16 __ovld __cnfn shuffle2(char16 x, char16 y, uchar16 mask);
-
-uchar16 __ovld __cnfn shuffle2(uchar2 x, uchar2 y, uchar16 mask);
-uchar16 __ovld __cnfn shuffle2(uchar4 x, uchar4 y, uchar16 mask);
-uchar16 __ovld __cnfn shuffle2(uchar8 x, uchar8 y, uchar16 mask);
-uchar16 __ovld __cnfn shuffle2(uchar16 x, uchar16 y, uchar16 mask);
-
-short16 __ovld __cnfn shuffle2(short2 x, short2 y, ushort16 mask);
-short16 __ovld __cnfn shuffle2(short4 x, short4 y, ushort16 mask);
-short16 __ovld __cnfn shuffle2(short8 x, short8 y, ushort16 mask);
-short16 __ovld __cnfn shuffle2(short16 x, short16 y, ushort16 mask);
-
-ushort16 __ovld __cnfn shuffle2(ushort2 x, ushort2 y, ushort16 mask);
-ushort16 __ovld __cnfn shuffle2(ushort4 x, ushort4 y, ushort16 mask);
-ushort16 __ovld __cnfn shuffle2(ushort8 x, ushort8 y, ushort16 mask);
-ushort16 __ovld __cnfn shuffle2(ushort16 x, ushort16 y, ushort16 mask);
-
-int16 __ovld __cnfn shuffle2(int2 x, int2 y, uint16 mask);
-int16 __ovld __cnfn shuffle2(int4 x, int4 y, uint16 mask);
-int16 __ovld __cnfn shuffle2(int8 x, int8 y, uint16 mask);
-int16 __ovld __cnfn shuffle2(int16 x, int16 y, uint16 mask);
-
-uint16 __ovld __cnfn shuffle2(uint2 x, uint2 y, uint16 mask);
-uint16 __ovld __cnfn shuffle2(uint4 x, uint4 y, uint16 mask);
-uint16 __ovld __cnfn shuffle2(uint8 x, uint8 y, uint16 mask);
-uint16 __ovld __cnfn shuffle2(uint16 x, uint16 y, uint16 mask);
-
-long16 __ovld __cnfn shuffle2(long2 x, long2 y, ulong16 mask);
-long16 __ovld __cnfn shuffle2(long4 x, long4 y, ulong16 mask);
-long16 __ovld __cnfn shuffle2(long8 x, long8 y, ulong16 mask);
-long16 __ovld __cnfn shuffle2(long16 x, long16 y, ulong16 mask);
-
-ulong16 __ovld __cnfn shuffle2(ulong2 x, ulong2 y, ulong16 mask);
-ulong16 __ovld __cnfn shuffle2(ulong4 x, ulong4 y, ulong16 mask);
-ulong16 __ovld __cnfn shuffle2(ulong8 x, ulong8 y, ulong16 mask);
-ulong16 __ovld __cnfn shuffle2(ulong16 x, ulong16 y, ulong16 mask);
-
-float16 __ovld __cnfn shuffle2(float2 x, float2 y, uint16 mask);
-float16 __ovld __cnfn shuffle2(float4 x, float4 y, uint16 mask);
-float16 __ovld __cnfn shuffle2(float8 x, float8 y, uint16 mask);
-float16 __ovld __cnfn shuffle2(float16 x, float16 y, uint16 mask);
+char2 __ovld __cnfn shuffle2(char2, char2, uchar2);
+char2 __ovld __cnfn shuffle2(char4, char4, uchar2);
+char2 __ovld __cnfn shuffle2(char8, char8, uchar2);
+char2 __ovld __cnfn shuffle2(char16, char16, uchar2);
+
+uchar2 __ovld __cnfn shuffle2(uchar2, uchar2, uchar2);
+uchar2 __ovld __cnfn shuffle2(uchar4, uchar4, uchar2);
+uchar2 __ovld __cnfn shuffle2(uchar8, uchar8, uchar2);
+uchar2 __ovld __cnfn shuffle2(uchar16, uchar16, uchar2);
+
+short2 __ovld __cnfn shuffle2(short2, short2, ushort2);
+short2 __ovld __cnfn shuffle2(short4, short4, ushort2);
+short2 __ovld __cnfn shuffle2(short8, short8, ushort2);
+short2 __ovld __cnfn shuffle2(short16, short16, ushort2);
+
+ushort2 __ovld __cnfn shuffle2(ushort2, ushort2, ushort2);
+ushort2 __ovld __cnfn shuffle2(ushort4, ushort4, ushort2);
+ushort2 __ovld __cnfn shuffle2(ushort8, ushort8, ushort2);
+ushort2 __ovld __cnfn shuffle2(ushort16, ushort16, ushort2);
+
+int2 __ovld __cnfn shuffle2(int2, int2, uint2);
+int2 __ovld __cnfn shuffle2(int4, int4, uint2);
+int2 __ovld __cnfn shuffle2(int8, int8, uint2);
+int2 __ovld __cnfn shuffle2(int16, int16, uint2);
+
+uint2 __ovld __cnfn shuffle2(uint2, uint2, uint2);
+uint2 __ovld __cnfn shuffle2(uint4, uint4, uint2);
+uint2 __ovld __cnfn shuffle2(uint8, uint8, uint2);
+uint2 __ovld __cnfn shuffle2(uint16, uint16, uint2);
+
+long2 __ovld __cnfn shuffle2(long2, long2, ulong2);
+long2 __ovld __cnfn shuffle2(long4, long4, ulong2);
+long2 __ovld __cnfn shuffle2(long8, long8, ulong2);
+long2 __ovld __cnfn shuffle2(long16, long16, ulong2);
+
+ulong2 __ovld __cnfn shuffle2(ulong2, ulong2, ulong2);
+ulong2 __ovld __cnfn shuffle2(ulong4, ulong4, ulong2);
+ulong2 __ovld __cnfn shuffle2(ulong8, ulong8, ulong2);
+ulong2 __ovld __cnfn shuffle2(ulong16, ulong16, ulong2);
+
+float2 __ovld __cnfn shuffle2(float2, float2, uint2);
+float2 __ovld __cnfn shuffle2(float4, float4, uint2);
+float2 __ovld __cnfn shuffle2(float8, float8, uint2);
+float2 __ovld __cnfn shuffle2(float16, float16, uint2);
+
+char4 __ovld __cnfn shuffle2(char2, char2, uchar4);
+char4 __ovld __cnfn shuffle2(char4, char4, uchar4);
+char4 __ovld __cnfn shuffle2(char8, char8, uchar4);
+char4 __ovld __cnfn shuffle2(char16, char16, uchar4);
+
+uchar4 __ovld __cnfn shuffle2(uchar2, uchar2, uchar4);
+uchar4 __ovld __cnfn shuffle2(uchar4, uchar4, uchar4);
+uchar4 __ovld __cnfn shuffle2(uchar8, uchar8, uchar4);
+uchar4 __ovld __cnfn shuffle2(uchar16, uchar16, uchar4);
+
+short4 __ovld __cnfn shuffle2(short2, short2, ushort4);
+short4 __ovld __cnfn shuffle2(short4, short4, ushort4);
+short4 __ovld __cnfn shuffle2(short8, short8, ushort4);
+short4 __ovld __cnfn shuffle2(short16, short16, ushort4);
+
+ushort4 __ovld __cnfn shuffle2(ushort2, ushort2, ushort4);
+ushort4 __ovld __cnfn shuffle2(ushort4, ushort4, ushort4);
+ushort4 __ovld __cnfn shuffle2(ushort8, ushort8, ushort4);
+ushort4 __ovld __cnfn shuffle2(ushort16, ushort16, ushort4);
+
+int4 __ovld __cnfn shuffle2(int2, int2, uint4);
+int4 __ovld __cnfn shuffle2(int4, int4, uint4);
+int4 __ovld __cnfn shuffle2(int8, int8, uint4);
+int4 __ovld __cnfn shuffle2(int16, int16, uint4);
+
+uint4 __ovld __cnfn shuffle2(uint2, uint2, uint4);
+uint4 __ovld __cnfn shuffle2(uint4, uint4, uint4);
+uint4 __ovld __cnfn shuffle2(uint8, uint8, uint4);
+uint4 __ovld __cnfn shuffle2(uint16, uint16, uint4);
+
+long4 __ovld __cnfn shuffle2(long2, long2, ulong4);
+long4 __ovld __cnfn shuffle2(long4, long4, ulong4);
+long4 __ovld __cnfn shuffle2(long8, long8, ulong4);
+long4 __ovld __cnfn shuffle2(long16, long16, ulong4);
+
+ulong4 __ovld __cnfn shuffle2(ulong2, ulong2, ulong4);
+ulong4 __ovld __cnfn shuffle2(ulong4, ulong4, ulong4);
+ulong4 __ovld __cnfn shuffle2(ulong8, ulong8, ulong4);
+ulong4 __ovld __cnfn shuffle2(ulong16, ulong16, ulong4);
+
+float4 __ovld __cnfn shuffle2(float2, float2, uint4);
+float4 __ovld __cnfn shuffle2(float4, float4, uint4);
+float4 __ovld __cnfn shuffle2(float8, float8, uint4);
+float4 __ovld __cnfn shuffle2(float16, float16, uint4);
+
+char8 __ovld __cnfn shuffle2(char2, char2, uchar8);
+char8 __ovld __cnfn shuffle2(char4, char4, uchar8);
+char8 __ovld __cnfn shuffle2(char8, char8, uchar8);
+char8 __ovld __cnfn shuffle2(char16, char16, uchar8);
+
+uchar8 __ovld __cnfn shuffle2(uchar2, uchar2, uchar8);
+uchar8 __ovld __cnfn shuffle2(uchar4, uchar4, uchar8);
+uchar8 __ovld __cnfn shuffle2(uchar8, uchar8, uchar8);
+uchar8 __ovld __cnfn shuffle2(uchar16, uchar16, uchar8);
+
+short8 __ovld __cnfn shuffle2(short2, short2, ushort8);
+short8 __ovld __cnfn shuffle2(short4, short4, ushort8);
+short8 __ovld __cnfn shuffle2(short8, short8, ushort8);
+short8 __ovld __cnfn shuffle2(short16, short16, ushort8);
+
+ushort8 __ovld __cnfn shuffle2(ushort2, ushort2, ushort8);
+ushort8 __ovld __cnfn shuffle2(ushort4, ushort4, ushort8);
+ushort8 __ovld __cnfn shuffle2(ushort8, ushort8, ushort8);
+ushort8 __ovld __cnfn shuffle2(ushort16, ushort16, ushort8);
+
+int8 __ovld __cnfn shuffle2(int2, int2, uint8);
+int8 __ovld __cnfn shuffle2(int4, int4, uint8);
+int8 __ovld __cnfn shuffle2(int8, int8, uint8);
+int8 __ovld __cnfn shuffle2(int16, int16, uint8);
+
+uint8 __ovld __cnfn shuffle2(uint2, uint2, uint8);
+uint8 __ovld __cnfn shuffle2(uint4, uint4, uint8);
+uint8 __ovld __cnfn shuffle2(uint8, uint8, uint8);
+uint8 __ovld __cnfn shuffle2(uint16, uint16, uint8);
+
+long8 __ovld __cnfn shuffle2(long2, long2, ulong8);
+long8 __ovld __cnfn shuffle2(long4, long4, ulong8);
+long8 __ovld __cnfn shuffle2(long8, long8, ulong8);
+long8 __ovld __cnfn shuffle2(long16, long16, ulong8);
+
+ulong8 __ovld __cnfn shuffle2(ulong2, ulong2, ulong8);
+ulong8 __ovld __cnfn shuffle2(ulong4, ulong4, ulong8);
+ulong8 __ovld __cnfn shuffle2(ulong8, ulong8, ulong8);
+ulong8 __ovld __cnfn shuffle2(ulong16, ulong16, ulong8);
+
+float8 __ovld __cnfn shuffle2(float2, float2, uint8);
+float8 __ovld __cnfn shuffle2(float4, float4, uint8);
+float8 __ovld __cnfn shuffle2(float8, float8, uint8);
+float8 __ovld __cnfn shuffle2(float16, float16, uint8);
+
+char16 __ovld __cnfn shuffle2(char2, char2, uchar16);
+char16 __ovld __cnfn shuffle2(char4, char4, uchar16);
+char16 __ovld __cnfn shuffle2(char8, char8, uchar16);
+char16 __ovld __cnfn shuffle2(char16, char16, uchar16);
+
+uchar16 __ovld __cnfn shuffle2(uchar2, uchar2, uchar16);
+uchar16 __ovld __cnfn shuffle2(uchar4, uchar4, uchar16);
+uchar16 __ovld __cnfn shuffle2(uchar8, uchar8, uchar16);
+uchar16 __ovld __cnfn shuffle2(uchar16, uchar16, uchar16);
+
+short16 __ovld __cnfn shuffle2(short2, short2, ushort16);
+short16 __ovld __cnfn shuffle2(short4, short4, ushort16);
+short16 __ovld __cnfn shuffle2(short8, short8, ushort16);
+short16 __ovld __cnfn shuffle2(short16, short16, ushort16);
+
+ushort16 __ovld __cnfn shuffle2(ushort2, ushort2, ushort16);
+ushort16 __ovld __cnfn shuffle2(ushort4, ushort4, ushort16);
+ushort16 __ovld __cnfn shuffle2(ushort8, ushort8, ushort16);
+ushort16 __ovld __cnfn shuffle2(ushort16, ushort16, ushort16);
+
+int16 __ovld __cnfn shuffle2(int2, int2, uint16);
+int16 __ovld __cnfn shuffle2(int4, int4, uint16);
+int16 __ovld __cnfn shuffle2(int8, int8, uint16);
+int16 __ovld __cnfn shuffle2(int16, int16, uint16);
+
+uint16 __ovld __cnfn shuffle2(uint2, uint2, uint16);
+uint16 __ovld __cnfn shuffle2(uint4, uint4, uint16);
+uint16 __ovld __cnfn shuffle2(uint8, uint8, uint16);
+uint16 __ovld __cnfn shuffle2(uint16, uint16, uint16);
+
+long16 __ovld __cnfn shuffle2(long2, long2, ulong16);
+long16 __ovld __cnfn shuffle2(long4, long4, ulong16);
+long16 __ovld __cnfn shuffle2(long8, long8, ulong16);
+long16 __ovld __cnfn shuffle2(long16, long16, ulong16);
+
+ulong16 __ovld __cnfn shuffle2(ulong2, ulong2, ulong16);
+ulong16 __ovld __cnfn shuffle2(ulong4, ulong4, ulong16);
+ulong16 __ovld __cnfn shuffle2(ulong8, ulong8, ulong16);
+ulong16 __ovld __cnfn shuffle2(ulong16, ulong16, ulong16);
+
+float16 __ovld __cnfn shuffle2(float2, float2, uint16);
+float16 __ovld __cnfn shuffle2(float4, float4, uint16);
+float16 __ovld __cnfn shuffle2(float8, float8, uint16);
+float16 __ovld __cnfn shuffle2(float16, float16, uint16);
#ifdef cl_khr_fp64
-double2 __ovld __cnfn shuffle2(double2 x, double2 y, ulong2 mask);
-double2 __ovld __cnfn shuffle2(double4 x, double4 y, ulong2 mask);
-double2 __ovld __cnfn shuffle2(double8 x, double8 y, ulong2 mask);
-double2 __ovld __cnfn shuffle2(double16 x, double16 y, ulong2 mask);
-
-double4 __ovld __cnfn shuffle2(double2 x, double2 y, ulong4 mask);
-double4 __ovld __cnfn shuffle2(double4 x, double4 y, ulong4 mask);
-double4 __ovld __cnfn shuffle2(double8 x, double8 y, ulong4 mask);
-double4 __ovld __cnfn shuffle2(double16 x, double16 y, ulong4 mask);
-
-double8 __ovld __cnfn shuffle2(double2 x, double2 y, ulong8 mask);
-double8 __ovld __cnfn shuffle2(double4 x, double4 y, ulong8 mask);
-double8 __ovld __cnfn shuffle2(double8 x, double8 y, ulong8 mask);
-double8 __ovld __cnfn shuffle2(double16 x, double16 y, ulong8 mask);
-
-double16 __ovld __cnfn shuffle2(double2 x, double2 y, ulong16 mask);
-double16 __ovld __cnfn shuffle2(double4 x, double4 y, ulong16 mask);
-double16 __ovld __cnfn shuffle2(double8 x, double8 y, ulong16 mask);
-double16 __ovld __cnfn shuffle2(double16 x, double16 y, ulong16 mask);
+double2 __ovld __cnfn shuffle2(double2, double2, ulong2);
+double2 __ovld __cnfn shuffle2(double4, double4, ulong2);
+double2 __ovld __cnfn shuffle2(double8, double8, ulong2);
+double2 __ovld __cnfn shuffle2(double16, double16, ulong2);
+
+double4 __ovld __cnfn shuffle2(double2, double2, ulong4);
+double4 __ovld __cnfn shuffle2(double4, double4, ulong4);
+double4 __ovld __cnfn shuffle2(double8, double8, ulong4);
+double4 __ovld __cnfn shuffle2(double16, double16, ulong4);
+
+double8 __ovld __cnfn shuffle2(double2, double2, ulong8);
+double8 __ovld __cnfn shuffle2(double4, double4, ulong8);
+double8 __ovld __cnfn shuffle2(double8, double8, ulong8);
+double8 __ovld __cnfn shuffle2(double16, double16, ulong8);
+
+double16 __ovld __cnfn shuffle2(double2, double2, ulong16);
+double16 __ovld __cnfn shuffle2(double4, double4, ulong16);
+double16 __ovld __cnfn shuffle2(double8, double8, ulong16);
+double16 __ovld __cnfn shuffle2(double16, double16, ulong16);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half2 __ovld __cnfn shuffle2(half2 x, half2 y, ushort2 mask);
-half2 __ovld __cnfn shuffle2(half4 x, half4 y, ushort2 mask);
-half2 __ovld __cnfn shuffle2(half8 x, half8 y, ushort2 mask);
-half2 __ovld __cnfn shuffle2(half16 x, half16 y, ushort2 mask);
-
-half4 __ovld __cnfn shuffle2(half2 x, half2 y, ushort4 mask);
-half4 __ovld __cnfn shuffle2(half4 x, half4 y, ushort4 mask);
-half4 __ovld __cnfn shuffle2(half8 x, half8 y, ushort4 mask);
-half4 __ovld __cnfn shuffle2(half16 x, half16 y, ushort4 mask);
-
-half8 __ovld __cnfn shuffle2(half2 x, half2 y, ushort8 mask);
-half8 __ovld __cnfn shuffle2(half4 x, half4 y, ushort8 mask);
-half8 __ovld __cnfn shuffle2(half8 x, half8 y, ushort8 mask);
-half8 __ovld __cnfn shuffle2(half16 x, half16 y, ushort8 mask);
-
-half16 __ovld __cnfn shuffle2(half2 x, half2 y, ushort16 mask);
-half16 __ovld __cnfn shuffle2(half4 x, half4 y, ushort16 mask);
-half16 __ovld __cnfn shuffle2(half8 x, half8 y, ushort16 mask);
-half16 __ovld __cnfn shuffle2(half16 x, half16 y, ushort16 mask);
+half2 __ovld __cnfn shuffle2(half2, half2, ushort2);
+half2 __ovld __cnfn shuffle2(half4, half4, ushort2);
+half2 __ovld __cnfn shuffle2(half8, half8, ushort2);
+half2 __ovld __cnfn shuffle2(half16, half16, ushort2);
+
+half4 __ovld __cnfn shuffle2(half2, half2, ushort4);
+half4 __ovld __cnfn shuffle2(half4, half4, ushort4);
+half4 __ovld __cnfn shuffle2(half8, half8, ushort4);
+half4 __ovld __cnfn shuffle2(half16, half16, ushort4);
+
+half8 __ovld __cnfn shuffle2(half2, half2, ushort8);
+half8 __ovld __cnfn shuffle2(half4, half4, ushort8);
+half8 __ovld __cnfn shuffle2(half8, half8, ushort8);
+half8 __ovld __cnfn shuffle2(half16, half16, ushort8);
+
+half16 __ovld __cnfn shuffle2(half2, half2, ushort16);
+half16 __ovld __cnfn shuffle2(half4, half4, ushort16);
+half16 __ovld __cnfn shuffle2(half8, half8, ushort16);
+half16 __ovld __cnfn shuffle2(half16, half16, ushort16);
#endif //cl_khr_fp16
// OpenCL v1.1 s6.11.3, v1.2 s6.12.14, v2.0 s6.13.14 - Image Read and Write Functions
@@ -15433,123 +15193,123 @@ half16 __ovld __cnfn shuffle2(half16 x, half16 y, ushort16 mask);
* in the description above are undefined.
*/
-float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, int2 coord);
-float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, float2 coord);
+float4 __ovld __purefn read_imagef(read_only image2d_t, sampler_t, int2);
+float4 __ovld __purefn read_imagef(read_only image2d_t, sampler_t, float2);
-int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, int2 coord);
-int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, float2 coord);
-uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, int2 coord);
-uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, float2 coord);
+int4 __ovld __purefn read_imagei(read_only image2d_t, sampler_t, int2);
+int4 __ovld __purefn read_imagei(read_only image2d_t, sampler_t, float2);
+uint4 __ovld __purefn read_imageui(read_only image2d_t, sampler_t, int2);
+uint4 __ovld __purefn read_imageui(read_only image2d_t, sampler_t, float2);
-float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, int4 coord);
-float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, float4 coord);
+float4 __ovld __purefn read_imagef(read_only image3d_t, sampler_t, int4);
+float4 __ovld __purefn read_imagef(read_only image3d_t, sampler_t, float4);
-int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, int4 coord);
-int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, float4 coord);
-uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, int4 coord);
-uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, float4 coord);
+int4 __ovld __purefn read_imagei(read_only image3d_t, sampler_t, int4);
+int4 __ovld __purefn read_imagei(read_only image3d_t, sampler_t, float4);
+uint4 __ovld __purefn read_imageui(read_only image3d_t, sampler_t, int4);
+uint4 __ovld __purefn read_imageui(read_only image3d_t, sampler_t, float4);
#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, int4 coord);
-float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, float4 coord);
+float4 __ovld __purefn read_imagef(read_only image2d_array_t, sampler_t, int4);
+float4 __ovld __purefn read_imagef(read_only image2d_array_t, sampler_t, float4);
-int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, int4 coord);
-int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, float4 coord);
-uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, int4 coord);
-uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, float4 coord);
+int4 __ovld __purefn read_imagei(read_only image2d_array_t, sampler_t, int4);
+int4 __ovld __purefn read_imagei(read_only image2d_array_t, sampler_t, float4);
+uint4 __ovld __purefn read_imageui(read_only image2d_array_t, sampler_t, int4);
+uint4 __ovld __purefn read_imageui(read_only image2d_array_t, sampler_t, float4);
#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, int coord);
-float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, float coord);
+float4 __ovld __purefn read_imagef(read_only image1d_t, sampler_t, int);
+float4 __ovld __purefn read_imagef(read_only image1d_t, sampler_t, float);
-int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, int coord);
-int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, float coord);
-uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, int coord);
-uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, float coord);
+int4 __ovld __purefn read_imagei(read_only image1d_t, sampler_t, int);
+int4 __ovld __purefn read_imagei(read_only image1d_t, sampler_t, float);
+uint4 __ovld __purefn read_imageui(read_only image1d_t, sampler_t, int);
+uint4 __ovld __purefn read_imageui(read_only image1d_t, sampler_t, float);
#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, int2 coord);
-float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, float2 coord);
+float4 __ovld __purefn read_imagef(read_only image1d_array_t, sampler_t, int2);
+float4 __ovld __purefn read_imagef(read_only image1d_array_t, sampler_t, float2);
-int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, int2 coord);
-int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, float2 coord);
-uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, int2 coord);
-uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, float2 coord);
+int4 __ovld __purefn read_imagei(read_only image1d_array_t, sampler_t, int2);
+int4 __ovld __purefn read_imagei(read_only image1d_array_t, sampler_t, float2);
+uint4 __ovld __purefn read_imageui(read_only image1d_array_t, sampler_t, int2);
+uint4 __ovld __purefn read_imageui(read_only image1d_array_t, sampler_t, float2);
#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
#ifdef cl_khr_depth_images
-float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, float2 coord);
-float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, int2 coord);
+float __ovld __purefn read_imagef(read_only image2d_depth_t, sampler_t, float2);
+float __ovld __purefn read_imagef(read_only image2d_depth_t, sampler_t, int2);
-float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, float4 coord);
-float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, int4 coord);
+float __ovld __purefn read_imagef(read_only image2d_array_depth_t, sampler_t, float4);
+float __ovld __purefn read_imagef(read_only image2d_array_depth_t, sampler_t, int4);
#endif //cl_khr_depth_images
#if defined(cl_khr_gl_msaa_sharing)
-float4 __purefn __ovld read_imagef(read_only image2d_msaa_t image, int2 coord, int sample);
-int4 __purefn __ovld read_imagei(read_only image2d_msaa_t image, int2 coord, int sample);
-uint4 __purefn __ovld read_imageui(read_only image2d_msaa_t image, int2 coord, int sample);
+float4 __ovld __purefn read_imagef(read_only image2d_msaa_t, int2, int);
+int4 __ovld __purefn read_imagei(read_only image2d_msaa_t, int2, int);
+uint4 __ovld __purefn read_imageui(read_only image2d_msaa_t, int2, int);
-float __purefn __ovld read_imagef(read_only image2d_msaa_depth_t image, int2 coord, int sample);
+float __ovld __purefn read_imagef(read_only image2d_msaa_depth_t, int2, int);
-float4 __purefn __ovld read_imagef(read_only image2d_array_msaa_t image, int4 coord, int sample);
-int4 __purefn __ovld read_imagei(read_only image2d_array_msaa_t image, int4 coord, int sample);
-uint4 __purefn __ovld read_imageui(read_only image2d_array_msaa_t image, int4 coord, int sample);
+float4 __ovld __purefn read_imagef(read_only image2d_array_msaa_t, int4, int);
+int4 __ovld __purefn read_imagei(read_only image2d_array_msaa_t, int4, int);
+uint4 __ovld __purefn read_imageui(read_only image2d_array_msaa_t, int4, int);
-float __purefn __ovld read_imagef(read_only image2d_array_msaa_depth_t image, int4 coord, int sample);
+float __ovld __purefn read_imagef(read_only image2d_array_msaa_depth_t, int4, int);
#endif //cl_khr_gl_msaa_sharing
// OpenCL Extension v2.0 s9.18 - Mipmaps
#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
#ifdef cl_khr_mipmap_image
-float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, float coord, float lod);
-int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, float coord, float lod);
-uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, float coord, float lod);
+float4 __ovld __purefn read_imagef(read_only image1d_t, sampler_t, float, float);
+int4 __ovld __purefn read_imagei(read_only image1d_t, sampler_t, float, float);
+uint4 __ovld __purefn read_imageui(read_only image1d_t, sampler_t, float, float);
-float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
-int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
+float4 __ovld __purefn read_imagef(read_only image1d_array_t, sampler_t, float2, float);
+int4 __ovld __purefn read_imagei(read_only image1d_array_t, sampler_t, float2, float);
+uint4 __ovld __purefn read_imageui(read_only image1d_array_t, sampler_t, float2, float);
-float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, float2 coord, float lod);
-int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, float2 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, float2 coord, float lod);
+float4 __ovld __purefn read_imagef(read_only image2d_t, sampler_t, float2, float);
+int4 __ovld __purefn read_imagei(read_only image2d_t, sampler_t, float2, float);
+uint4 __ovld __purefn read_imageui(read_only image2d_t, sampler_t, float2, float);
-float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, float2 coord, float lod);
+float __ovld __purefn read_imagef(read_only image2d_depth_t, sampler_t, float2, float);
-float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
-int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
+float4 __ovld __purefn read_imagef(read_only image2d_array_t, sampler_t, float4, float);
+int4 __ovld __purefn read_imagei(read_only image2d_array_t, sampler_t, float4, float);
+uint4 __ovld __purefn read_imageui(read_only image2d_array_t, sampler_t, float4, float);
-float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, float4 coord, float lod);
+float __ovld __purefn read_imagef(read_only image2d_array_depth_t, sampler_t, float4, float);
-float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, float4 coord, float lod);
-int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, float4 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, float4 coord, float lod);
+float4 __ovld __purefn read_imagef(read_only image3d_t, sampler_t, float4, float);
+int4 __ovld __purefn read_imagei(read_only image3d_t, sampler_t, float4, float);
+uint4 __ovld __purefn read_imageui(read_only image3d_t, sampler_t, float4, float);
-float4 __purefn __ovld read_imagef(read_only image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
-int4 __purefn __ovld read_imagei(read_only image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
-uint4 __purefn __ovld read_imageui(read_only image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
+float4 __ovld __purefn read_imagef(read_only image1d_t, sampler_t, float, float, float);
+int4 __ovld __purefn read_imagei(read_only image1d_t, sampler_t, float, float, float);
+uint4 __ovld __purefn read_imageui(read_only image1d_t, sampler_t, float, float, float);
-float4 __purefn __ovld read_imagef(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
-int4 __purefn __ovld read_imagei(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
-uint4 __purefn __ovld read_imageui(read_only image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
+float4 __ovld __purefn read_imagef(read_only image1d_array_t, sampler_t, float2, float, float);
+int4 __ovld __purefn read_imagei(read_only image1d_array_t, sampler_t, float2, float, float);
+uint4 __ovld __purefn read_imageui(read_only image1d_array_t, sampler_t, float2, float, float);
-float4 __purefn __ovld read_imagef(read_only image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
-int4 __purefn __ovld read_imagei(read_only image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
-uint4 __purefn __ovld read_imageui(read_only image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
+float4 __ovld __purefn read_imagef(read_only image2d_t, sampler_t, float2, float2, float2);
+int4 __ovld __purefn read_imagei(read_only image2d_t, sampler_t, float2, float2, float2);
+uint4 __ovld __purefn read_imageui(read_only image2d_t, sampler_t, float2, float2, float2);
-float __purefn __ovld read_imagef(read_only image2d_depth_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
+float __ovld __purefn read_imagef(read_only image2d_depth_t, sampler_t, float2, float2, float2);
-float4 __purefn __ovld read_imagef(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
-int4 __purefn __ovld read_imagei(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
-uint4 __purefn __ovld read_imageui(read_only image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
+float4 __ovld __purefn read_imagef(read_only image2d_array_t, sampler_t, float4, float2, float2);
+int4 __ovld __purefn read_imagei(read_only image2d_array_t, sampler_t, float4, float2, float2);
+uint4 __ovld __purefn read_imageui(read_only image2d_array_t, sampler_t, float4, float2, float2);
-float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
+float __ovld __purefn read_imagef(read_only image2d_array_depth_t, sampler_t, float4, float2, float2);
-float4 __purefn __ovld read_imagef(read_only image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
-int4 __purefn __ovld read_imagei(read_only image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
-uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
+float4 __ovld __purefn read_imagef(read_only image3d_t, sampler_t, float4, float4, float4);
+int4 __ovld __purefn read_imagei(read_only image3d_t, sampler_t, float4, float4, float4);
+uint4 __ovld __purefn read_imageui(read_only image3d_t, sampler_t, float4, float4, float4);
#endif //cl_khr_mipmap_image
#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
@@ -15560,165 +15320,165 @@ uint4 __purefn __ovld read_imageui(read_only image3d_t image, sampler_t sampler,
* Sampler-less Image Access
*/
-float4 __purefn __ovld read_imagef(read_only image1d_t image, int coord);
-int4 __purefn __ovld read_imagei(read_only image1d_t image, int coord);
-uint4 __purefn __ovld read_imageui(read_only image1d_t image, int coord);
+float4 __ovld __purefn read_imagef(read_only image1d_t, int);
+int4 __ovld __purefn read_imagei(read_only image1d_t, int);
+uint4 __ovld __purefn read_imageui(read_only image1d_t, int);
-float4 __purefn __ovld read_imagef(read_only image1d_buffer_t image, int coord);
-int4 __purefn __ovld read_imagei(read_only image1d_buffer_t image, int coord);
-uint4 __purefn __ovld read_imageui(read_only image1d_buffer_t image, int coord);
+float4 __ovld __purefn read_imagef(read_only image1d_buffer_t, int);
+int4 __ovld __purefn read_imagei(read_only image1d_buffer_t, int);
+uint4 __ovld __purefn read_imageui(read_only image1d_buffer_t, int);
-float4 __purefn __ovld read_imagef(read_only image1d_array_t image, int2 coord);
-int4 __purefn __ovld read_imagei(read_only image1d_array_t image, int2 coord);
-uint4 __purefn __ovld read_imageui(read_only image1d_array_t image, int2 coord);
+float4 __ovld __purefn read_imagef(read_only image1d_array_t, int2);
+int4 __ovld __purefn read_imagei(read_only image1d_array_t, int2);
+uint4 __ovld __purefn read_imageui(read_only image1d_array_t, int2);
-float4 __purefn __ovld read_imagef(read_only image2d_t image, int2 coord);
-int4 __purefn __ovld read_imagei(read_only image2d_t image, int2 coord);
-uint4 __purefn __ovld read_imageui(read_only image2d_t image, int2 coord);
+float4 __ovld __purefn read_imagef(read_only image2d_t, int2);
+int4 __ovld __purefn read_imagei(read_only image2d_t, int2);
+uint4 __ovld __purefn read_imageui(read_only image2d_t, int2);
-float4 __purefn __ovld read_imagef(read_only image2d_array_t image, int4 coord);
-int4 __purefn __ovld read_imagei(read_only image2d_array_t image, int4 coord);
-uint4 __purefn __ovld read_imageui(read_only image2d_array_t image, int4 coord);
+float4 __ovld __purefn read_imagef(read_only image2d_array_t, int4);
+int4 __ovld __purefn read_imagei(read_only image2d_array_t, int4);
+uint4 __ovld __purefn read_imageui(read_only image2d_array_t, int4);
#ifdef cl_khr_depth_images
-float __purefn __ovld read_imagef(read_only image2d_depth_t image, int2 coord);
-float __purefn __ovld read_imagef(read_only image2d_array_depth_t image, int4 coord);
+float __ovld __purefn read_imagef(read_only image2d_depth_t, int2);
+float __ovld __purefn read_imagef(read_only image2d_array_depth_t, int4);
#endif //cl_khr_depth_images
-float4 __purefn __ovld read_imagef(read_only image3d_t image, int4 coord);
-int4 __purefn __ovld read_imagei(read_only image3d_t image, int4 coord);
-uint4 __purefn __ovld read_imageui(read_only image3d_t image, int4 coord);
+float4 __ovld __purefn read_imagef(read_only image3d_t, int4);
+int4 __ovld __purefn read_imagei(read_only image3d_t, int4);
+uint4 __ovld __purefn read_imageui(read_only image3d_t, int4);
#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
// Image read functions returning half4 type
#ifdef cl_khr_fp16
-half4 __purefn __ovld read_imageh(read_only image1d_t image, sampler_t sampler, int coord);
-half4 __purefn __ovld read_imageh(read_only image1d_t image, sampler_t sampler, float coord);
-half4 __purefn __ovld read_imageh(read_only image2d_t image, sampler_t sampler, int2 coord);
-half4 __purefn __ovld read_imageh(read_only image2d_t image, sampler_t sampler, float2 coord);
-half4 __purefn __ovld read_imageh(read_only image3d_t image, sampler_t sampler, int4 coord);
-half4 __purefn __ovld read_imageh(read_only image3d_t image, sampler_t sampler, float4 coord);
+half4 __ovld __purefn read_imageh(read_only image1d_t, sampler_t, int);
+half4 __ovld __purefn read_imageh(read_only image1d_t, sampler_t, float);
+half4 __ovld __purefn read_imageh(read_only image2d_t, sampler_t, int2);
+half4 __ovld __purefn read_imageh(read_only image2d_t, sampler_t, float2);
+half4 __ovld __purefn read_imageh(read_only image3d_t, sampler_t, int4);
+half4 __ovld __purefn read_imageh(read_only image3d_t, sampler_t, float4);
#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
-half4 __purefn __ovld read_imageh(read_only image1d_array_t image, sampler_t sampler, int2 coord);
-half4 __purefn __ovld read_imageh(read_only image1d_array_t image, sampler_t sampler, float2 coord);
-half4 __purefn __ovld read_imageh(read_only image2d_array_t image, sampler_t sampler, int4 coord);
-half4 __purefn __ovld read_imageh(read_only image2d_array_t image, sampler_t sampler, float4 coord);
+half4 __ovld __purefn read_imageh(read_only image1d_array_t, sampler_t, int2);
+half4 __ovld __purefn read_imageh(read_only image1d_array_t, sampler_t, float2);
+half4 __ovld __purefn read_imageh(read_only image2d_array_t, sampler_t, int4);
+half4 __ovld __purefn read_imageh(read_only image2d_array_t, sampler_t, float4);
/**
* Sampler-less Image Access
*/
-half4 __purefn __ovld read_imageh(read_only image1d_t image, int coord);
-half4 __purefn __ovld read_imageh(read_only image2d_t image, int2 coord);
-half4 __purefn __ovld read_imageh(read_only image3d_t image, int4 coord);
-half4 __purefn __ovld read_imageh(read_only image1d_array_t image, int2 coord);
-half4 __purefn __ovld read_imageh(read_only image2d_array_t image, int4 coord);
-half4 __purefn __ovld read_imageh(read_only image1d_buffer_t image, int coord);
+half4 __ovld __purefn read_imageh(read_only image1d_t, int);
+half4 __ovld __purefn read_imageh(read_only image2d_t, int2);
+half4 __ovld __purefn read_imageh(read_only image3d_t, int4);
+half4 __ovld __purefn read_imageh(read_only image1d_array_t, int2);
+half4 __ovld __purefn read_imageh(read_only image2d_array_t, int4);
+half4 __ovld __purefn read_imageh(read_only image1d_buffer_t, int);
#endif // defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_1_2)
#endif //cl_khr_fp16
// Image read functions for read_write images
#if defined(__opencl_c_read_write_images)
-float4 __purefn __ovld read_imagef(read_write image1d_t image, int coord);
-int4 __purefn __ovld read_imagei(read_write image1d_t image, int coord);
-uint4 __purefn __ovld read_imageui(read_write image1d_t image, int coord);
+float4 __ovld __purefn read_imagef(read_write image1d_t, int);
+int4 __ovld __purefn read_imagei(read_write image1d_t, int);
+uint4 __ovld __purefn read_imageui(read_write image1d_t, int);
-float4 __purefn __ovld read_imagef(read_write image1d_buffer_t image, int coord);
-int4 __purefn __ovld read_imagei(read_write image1d_buffer_t image, int coord);
-uint4 __purefn __ovld read_imageui(read_write image1d_buffer_t image, int coord);
+float4 __ovld __purefn read_imagef(read_write image1d_buffer_t, int);
+int4 __ovld __purefn read_imagei(read_write image1d_buffer_t, int);
+uint4 __ovld __purefn read_imageui(read_write image1d_buffer_t, int);
-float4 __purefn __ovld read_imagef(read_write image1d_array_t image, int2 coord);
-int4 __purefn __ovld read_imagei(read_write image1d_array_t image, int2 coord);
-uint4 __purefn __ovld read_imageui(read_write image1d_array_t image, int2 coord);
+float4 __ovld __purefn read_imagef(read_write image1d_array_t, int2);
+int4 __ovld __purefn read_imagei(read_write image1d_array_t, int2);
+uint4 __ovld __purefn read_imageui(read_write image1d_array_t, int2);
-float4 __purefn __ovld read_imagef(read_write image2d_t image, int2 coord);
-int4 __purefn __ovld read_imagei(read_write image2d_t image, int2 coord);
-uint4 __purefn __ovld read_imageui(read_write image2d_t image, int2 coord);
+float4 __ovld __purefn read_imagef(read_write image2d_t, int2);
+int4 __ovld __purefn read_imagei(read_write image2d_t, int2);
+uint4 __ovld __purefn read_imageui(read_write image2d_t, int2);
-float4 __purefn __ovld read_imagef(read_write image2d_array_t image, int4 coord);
-int4 __purefn __ovld read_imagei(read_write image2d_array_t image, int4 coord);
-uint4 __purefn __ovld read_imageui(read_write image2d_array_t image, int4 coord);
+float4 __ovld __purefn read_imagef(read_write image2d_array_t, int4);
+int4 __ovld __purefn read_imagei(read_write image2d_array_t, int4);
+uint4 __ovld __purefn read_imageui(read_write image2d_array_t, int4);
-float4 __purefn __ovld read_imagef(read_write image3d_t image, int4 coord);
-int4 __purefn __ovld read_imagei(read_write image3d_t image, int4 coord);
-uint4 __purefn __ovld read_imageui(read_write image3d_t image, int4 coord);
+float4 __ovld __purefn read_imagef(read_write image3d_t, int4);
+int4 __ovld __purefn read_imagei(read_write image3d_t, int4);
+uint4 __ovld __purefn read_imageui(read_write image3d_t, int4);
#ifdef cl_khr_depth_images
-float __purefn __ovld read_imagef(read_write image2d_depth_t image, int2 coord);
-float __purefn __ovld read_imagef(read_write image2d_array_depth_t image, int4 coord);
+float __ovld __purefn read_imagef(read_write image2d_depth_t, int2);
+float __ovld __purefn read_imagef(read_write image2d_array_depth_t, int4);
#endif //cl_khr_depth_images
#if cl_khr_gl_msaa_sharing
-float4 __purefn __ovld read_imagef(read_write image2d_msaa_t image, int2 coord, int sample);
-int4 __purefn __ovld read_imagei(read_write image2d_msaa_t image, int2 coord, int sample);
-uint4 __purefn __ovld read_imageui(read_write image2d_msaa_t image, int2 coord, int sample);
+float4 __ovld __purefn read_imagef(read_write image2d_msaa_t, int2, int);
+int4 __ovld __purefn read_imagei(read_write image2d_msaa_t, int2, int);
+uint4 __ovld __purefn read_imageui(read_write image2d_msaa_t, int2, int);
-float4 __purefn __ovld read_imagef(read_write image2d_array_msaa_t image, int4 coord, int sample);
-int4 __purefn __ovld read_imagei(read_write image2d_array_msaa_t image, int4 coord, int sample);
-uint4 __purefn __ovld read_imageui(read_write image2d_array_msaa_t image, int4 coord, int sample);
+float4 __ovld __purefn read_imagef(read_write image2d_array_msaa_t, int4, int);
+int4 __ovld __purefn read_imagei(read_write image2d_array_msaa_t, int4, int);
+uint4 __ovld __purefn read_imageui(read_write image2d_array_msaa_t, int4, int);
-float __purefn __ovld read_imagef(read_write image2d_msaa_depth_t image, int2 coord, int sample);
-float __purefn __ovld read_imagef(read_write image2d_array_msaa_depth_t image, int4 coord, int sample);
+float __ovld __purefn read_imagef(read_write image2d_msaa_depth_t, int2, int);
+float __ovld __purefn read_imagef(read_write image2d_array_msaa_depth_t, int4, int);
#endif //cl_khr_gl_msaa_sharing
#ifdef cl_khr_mipmap_image
-float4 __purefn __ovld read_imagef(read_write image1d_t image, sampler_t sampler, float coord, float lod);
-int4 __purefn __ovld read_imagei(read_write image1d_t image, sampler_t sampler, float coord, float lod);
-uint4 __purefn __ovld read_imageui(read_write image1d_t image, sampler_t sampler, float coord, float lod);
+float4 __ovld __purefn read_imagef(read_write image1d_t, sampler_t, float, float);
+int4 __ovld __purefn read_imagei(read_write image1d_t, sampler_t, float, float);
+uint4 __ovld __purefn read_imageui(read_write image1d_t, sampler_t, float, float);
-float4 __purefn __ovld read_imagef(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
-int4 __purefn __ovld read_imagei(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float lod);
+float4 __ovld __purefn read_imagef(read_write image1d_array_t, sampler_t, float2, float);
+int4 __ovld __purefn read_imagei(read_write image1d_array_t, sampler_t, float2, float);
+uint4 __ovld __purefn read_imageui(read_write image1d_array_t, sampler_t, float2, float);
-float4 __purefn __ovld read_imagef(read_write image2d_t image, sampler_t sampler, float2 coord, float lod);
-int4 __purefn __ovld read_imagei(read_write image2d_t image, sampler_t sampler, float2 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_write image2d_t image, sampler_t sampler, float2 coord, float lod);
+float4 __ovld __purefn read_imagef(read_write image2d_t, sampler_t, float2, float);
+int4 __ovld __purefn read_imagei(read_write image2d_t, sampler_t, float2, float);
+uint4 __ovld __purefn read_imageui(read_write image2d_t, sampler_t, float2, float);
-float __purefn __ovld read_imagef(read_write image2d_depth_t image, sampler_t sampler, float2 coord, float lod);
+float __ovld __purefn read_imagef(read_write image2d_depth_t, sampler_t, float2, float);
-float4 __purefn __ovld read_imagef(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
-int4 __purefn __ovld read_imagei(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float lod);
+float4 __ovld __purefn read_imagef(read_write image2d_array_t, sampler_t, float4, float);
+int4 __ovld __purefn read_imagei(read_write image2d_array_t, sampler_t, float4, float);
+uint4 __ovld __purefn read_imageui(read_write image2d_array_t, sampler_t, float4, float);
-float __purefn __ovld read_imagef(read_write image2d_array_depth_t image, sampler_t sampler, float4 coord, float lod);
+float __ovld __purefn read_imagef(read_write image2d_array_depth_t, sampler_t, float4, float);
-float4 __purefn __ovld read_imagef(read_write image3d_t image, sampler_t sampler, float4 coord, float lod);
-int4 __purefn __ovld read_imagei(read_write image3d_t image, sampler_t sampler, float4 coord, float lod);
-uint4 __purefn __ovld read_imageui(read_write image3d_t image, sampler_t sampler, float4 coord, float lod);
+float4 __ovld __purefn read_imagef(read_write image3d_t, sampler_t, float4, float);
+int4 __ovld __purefn read_imagei(read_write image3d_t, sampler_t, float4, float);
+uint4 __ovld __purefn read_imageui(read_write image3d_t, sampler_t, float4, float);
-float4 __purefn __ovld read_imagef(read_write image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
-int4 __purefn __ovld read_imagei(read_write image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
-uint4 __purefn __ovld read_imageui(read_write image1d_t image, sampler_t sampler, float coord, float gradientX, float gradientY);
+float4 __ovld __purefn read_imagef(read_write image1d_t, sampler_t, float, float, float);
+int4 __ovld __purefn read_imagei(read_write image1d_t, sampler_t, float, float, float);
+uint4 __ovld __purefn read_imageui(read_write image1d_t, sampler_t, float, float, float);
-float4 __purefn __ovld read_imagef(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
-int4 __purefn __ovld read_imagei(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
-uint4 __purefn __ovld read_imageui(read_write image1d_array_t image_array, sampler_t sampler, float2 coord, float gradientX, float gradientY);
+float4 __ovld __purefn read_imagef(read_write image1d_array_t, sampler_t, float2, float, float);
+int4 __ovld __purefn read_imagei(read_write image1d_array_t, sampler_t, float2, float, float);
+uint4 __ovld __purefn read_imageui(read_write image1d_array_t, sampler_t, float2, float, float);
-float4 __purefn __ovld read_imagef(read_write image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
-int4 __purefn __ovld read_imagei(read_write image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
-uint4 __purefn __ovld read_imageui(read_write image2d_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
+float4 __ovld __purefn read_imagef(read_write image2d_t, sampler_t, float2, float2, float2);
+int4 __ovld __purefn read_imagei(read_write image2d_t, sampler_t, float2, float2, float2);
+uint4 __ovld __purefn read_imageui(read_write image2d_t, sampler_t, float2, float2, float2);
-float __purefn __ovld read_imagef(read_write image2d_depth_t image, sampler_t sampler, float2 coord, float2 gradientX, float2 gradientY);
+float __ovld __purefn read_imagef(read_write image2d_depth_t, sampler_t, float2, float2, float2);
-float4 __purefn __ovld read_imagef(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
-int4 __purefn __ovld read_imagei(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
-uint4 __purefn __ovld read_imageui(read_write image2d_array_t image_array, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
+float4 __ovld __purefn read_imagef(read_write image2d_array_t, sampler_t, float4, float2, float2);
+int4 __ovld __purefn read_imagei(read_write image2d_array_t, sampler_t, float4, float2, float2);
+uint4 __ovld __purefn read_imageui(read_write image2d_array_t, sampler_t, float4, float2, float2);
-float __purefn __ovld read_imagef(read_write image2d_array_depth_t image, sampler_t sampler, float4 coord, float2 gradientX, float2 gradientY);
+float __ovld __purefn read_imagef(read_write image2d_array_depth_t, sampler_t, float4, float2, float2);
-float4 __purefn __ovld read_imagef(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
-int4 __purefn __ovld read_imagei(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
-uint4 __purefn __ovld read_imageui(read_write image3d_t image, sampler_t sampler, float4 coord, float4 gradientX, float4 gradientY);
+float4 __ovld __purefn read_imagef(read_write image3d_t, sampler_t, float4, float4, float4);
+int4 __ovld __purefn read_imagei(read_write image3d_t, sampler_t, float4, float4, float4);
+uint4 __ovld __purefn read_imageui(read_write image3d_t, sampler_t, float4, float4, float4);
#endif //cl_khr_mipmap_image
// Image read functions returning half4 type
#ifdef cl_khr_fp16
-half4 __purefn __ovld read_imageh(read_write image1d_t image, int coord);
-half4 __purefn __ovld read_imageh(read_write image2d_t image, int2 coord);
-half4 __purefn __ovld read_imageh(read_write image3d_t image, int4 coord);
-half4 __purefn __ovld read_imageh(read_write image1d_array_t image, int2 coord);
-half4 __purefn __ovld read_imageh(read_write image2d_array_t image, int4 coord);
-half4 __purefn __ovld read_imageh(read_write image1d_buffer_t image, int coord);
+half4 __ovld __purefn read_imageh(read_write image1d_t, int);
+half4 __ovld __purefn read_imageh(read_write image2d_t, int2);
+half4 __ovld __purefn read_imageh(read_write image3d_t, int4);
+half4 __ovld __purefn read_imageh(read_write image1d_array_t, int2);
+half4 __ovld __purefn read_imageh(read_write image2d_array_t, int4);
+half4 __ovld __purefn read_imageh(read_write image1d_buffer_t, int);
#endif //cl_khr_fp16
#endif //defined(__opencl_c_read_write_images)
@@ -15789,63 +15549,63 @@ half4 __purefn __ovld read_imageh(read_write image1d_buffer_t image, int coord);
* values that are not in the range (0 ... image width -1,
* 0 ... image height - 1), respectively, is undefined.
*/
-void __ovld write_imagef(write_only image2d_t image, int2 coord, float4 color);
-void __ovld write_imagei(write_only image2d_t image, int2 coord, int4 color);
-void __ovld write_imageui(write_only image2d_t image, int2 coord, uint4 color);
+void __ovld write_imagef(write_only image2d_t, int2, float4);
+void __ovld write_imagei(write_only image2d_t, int2, int4);
+void __ovld write_imageui(write_only image2d_t, int2, uint4);
-void __ovld write_imagef(write_only image2d_array_t image_array, int4 coord, float4 color);
-void __ovld write_imagei(write_only image2d_array_t image_array, int4 coord, int4 color);
-void __ovld write_imageui(write_only image2d_array_t image_array, int4 coord, uint4 color);
+void __ovld write_imagef(write_only image2d_array_t, int4, float4);
+void __ovld write_imagei(write_only image2d_array_t, int4, int4);
+void __ovld write_imageui(write_only image2d_array_t, int4, uint4);
-void __ovld write_imagef(write_only image1d_t image, int coord, float4 color);
-void __ovld write_imagei(write_only image1d_t image, int coord, int4 color);
-void __ovld write_imageui(write_only image1d_t image, int coord, uint4 color);
+void __ovld write_imagef(write_only image1d_t, int, float4);
+void __ovld write_imagei(write_only image1d_t, int, int4);
+void __ovld write_imageui(write_only image1d_t, int, uint4);
-void __ovld write_imagef(write_only image1d_buffer_t image, int coord, float4 color);
-void __ovld write_imagei(write_only image1d_buffer_t image, int coord, int4 color);
-void __ovld write_imageui(write_only image1d_buffer_t image, int coord, uint4 color);
+void __ovld write_imagef(write_only image1d_buffer_t, int, float4);
+void __ovld write_imagei(write_only image1d_buffer_t, int, int4);
+void __ovld write_imageui(write_only image1d_buffer_t, int, uint4);
-void __ovld write_imagef(write_only image1d_array_t image_array, int2 coord, float4 color);
-void __ovld write_imagei(write_only image1d_array_t image_array, int2 coord, int4 color);
-void __ovld write_imageui(write_only image1d_array_t image_array, int2 coord, uint4 color);
+void __ovld write_imagef(write_only image1d_array_t, int2, float4);
+void __ovld write_imagei(write_only image1d_array_t, int2, int4);
+void __ovld write_imageui(write_only image1d_array_t, int2, uint4);
#ifdef cl_khr_3d_image_writes
-void __ovld write_imagef(write_only image3d_t image, int4 coord, float4 color);
-void __ovld write_imagei(write_only image3d_t image, int4 coord, int4 color);
-void __ovld write_imageui(write_only image3d_t image, int4 coord, uint4 color);
+void __ovld write_imagef(write_only image3d_t, int4, float4);
+void __ovld write_imagei(write_only image3d_t, int4, int4);
+void __ovld write_imageui(write_only image3d_t, int4, uint4);
#endif
#ifdef cl_khr_depth_images
-void __ovld write_imagef(write_only image2d_depth_t image, int2 coord, float color);
-void __ovld write_imagef(write_only image2d_array_depth_t image, int4 coord, float color);
+void __ovld write_imagef(write_only image2d_depth_t, int2, float);
+void __ovld write_imagef(write_only image2d_array_depth_t, int4, float);
#endif //cl_khr_depth_images
// OpenCL Extension v2.0 s9.18 - Mipmaps
#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
#if defined(cl_khr_mipmap_image_writes)
-void __ovld write_imagef(write_only image1d_t image, int coord, int lod, float4 color);
-void __ovld write_imagei(write_only image1d_t image, int coord, int lod, int4 color);
-void __ovld write_imageui(write_only image1d_t image, int coord, int lod, uint4 color);
+void __ovld write_imagef(write_only image1d_t, int, int, float4);
+void __ovld write_imagei(write_only image1d_t, int, int, int4);
+void __ovld write_imageui(write_only image1d_t, int, int, uint4);
-void __ovld write_imagef(write_only image1d_array_t image_array, int2 coord, int lod, float4 color);
-void __ovld write_imagei(write_only image1d_array_t image_array, int2 coord, int lod, int4 color);
-void __ovld write_imageui(write_only image1d_array_t image_array, int2 coord, int lod, uint4 color);
+void __ovld write_imagef(write_only image1d_array_t, int2, int, float4);
+void __ovld write_imagei(write_only image1d_array_t, int2, int, int4);
+void __ovld write_imageui(write_only image1d_array_t, int2, int, uint4);
-void __ovld write_imagef(write_only image2d_t image, int2 coord, int lod, float4 color);
-void __ovld write_imagei(write_only image2d_t image, int2 coord, int lod, int4 color);
-void __ovld write_imageui(write_only image2d_t image, int2 coord, int lod, uint4 color);
+void __ovld write_imagef(write_only image2d_t, int2, int, float4);
+void __ovld write_imagei(write_only image2d_t, int2, int, int4);
+void __ovld write_imageui(write_only image2d_t, int2, int, uint4);
-void __ovld write_imagef(write_only image2d_array_t image_array, int4 coord, int lod, float4 color);
-void __ovld write_imagei(write_only image2d_array_t image_array, int4 coord, int lod, int4 color);
-void __ovld write_imageui(write_only image2d_array_t image_array, int4 coord, int lod, uint4 color);
+void __ovld write_imagef(write_only image2d_array_t, int4, int, float4);
+void __ovld write_imagei(write_only image2d_array_t, int4, int, int4);
+void __ovld write_imageui(write_only image2d_array_t, int4, int, uint4);
-void __ovld write_imagef(write_only image2d_depth_t image, int2 coord, int lod, float depth);
-void __ovld write_imagef(write_only image2d_array_depth_t image, int4 coord, int lod, float depth);
+void __ovld write_imagef(write_only image2d_depth_t, int2, int, float);
+void __ovld write_imagef(write_only image2d_array_depth_t, int4, int, float);
#ifdef cl_khr_3d_image_writes
-void __ovld write_imagef(write_only image3d_t image, int4 coord, int lod, float4 color);
-void __ovld write_imagei(write_only image3d_t image, int4 coord, int lod, int4 color);
-void __ovld write_imageui(write_only image3d_t image, int4 coord, int lod, uint4 color);
+void __ovld write_imagef(write_only image3d_t, int4, int, float4);
+void __ovld write_imagei(write_only image3d_t, int4, int, int4);
+void __ovld write_imageui(write_only image3d_t, int4, int, uint4);
#endif //cl_khr_3d_image_writes
#endif //defined(cl_khr_mipmap_image_writes)
@@ -15853,87 +15613,87 @@ void __ovld write_imageui(write_only image3d_t image, int4 coord, int lod, uint4
// Image write functions for half4 type
#ifdef cl_khr_fp16
-void __ovld write_imageh(write_only image1d_t image, int coord, half4 color);
-void __ovld write_imageh(write_only image2d_t image, int2 coord, half4 color);
+void __ovld write_imageh(write_only image1d_t, int, half4);
+void __ovld write_imageh(write_only image2d_t, int2, half4);
#ifdef cl_khr_3d_image_writes
-void __ovld write_imageh(write_only image3d_t image, int4 coord, half4 color);
+void __ovld write_imageh(write_only image3d_t, int4, half4);
#endif
-void __ovld write_imageh(write_only image1d_array_t image, int2 coord, half4 color);
-void __ovld write_imageh(write_only image2d_array_t image, int4 coord, half4 color);
-void __ovld write_imageh(write_only image1d_buffer_t image, int coord, half4 color);
+void __ovld write_imageh(write_only image1d_array_t, int2, half4);
+void __ovld write_imageh(write_only image2d_array_t, int4, half4);
+void __ovld write_imageh(write_only image1d_buffer_t, int, half4);
#endif //cl_khr_fp16
// Image write functions for read_write images
#if defined(__opencl_c_read_write_images)
-void __ovld write_imagef(read_write image2d_t image, int2 coord, float4 color);
-void __ovld write_imagei(read_write image2d_t image, int2 coord, int4 color);
-void __ovld write_imageui(read_write image2d_t image, int2 coord, uint4 color);
+void __ovld write_imagef(read_write image2d_t, int2, float4);
+void __ovld write_imagei(read_write image2d_t, int2, int4);
+void __ovld write_imageui(read_write image2d_t, int2, uint4);
-void __ovld write_imagef(read_write image2d_array_t image_array, int4 coord, float4 color);
-void __ovld write_imagei(read_write image2d_array_t image_array, int4 coord, int4 color);
-void __ovld write_imageui(read_write image2d_array_t image_array, int4 coord, uint4 color);
+void __ovld write_imagef(read_write image2d_array_t, int4, float4);
+void __ovld write_imagei(read_write image2d_array_t, int4, int4);
+void __ovld write_imageui(read_write image2d_array_t, int4, uint4);
-void __ovld write_imagef(read_write image1d_t image, int coord, float4 color);
-void __ovld write_imagei(read_write image1d_t image, int coord, int4 color);
-void __ovld write_imageui(read_write image1d_t image, int coord, uint4 color);
+void __ovld write_imagef(read_write image1d_t, int, float4);
+void __ovld write_imagei(read_write image1d_t, int, int4);
+void __ovld write_imageui(read_write image1d_t, int, uint4);
-void __ovld write_imagef(read_write image1d_buffer_t image, int coord, float4 color);
-void __ovld write_imagei(read_write image1d_buffer_t image, int coord, int4 color);
-void __ovld write_imageui(read_write image1d_buffer_t image, int coord, uint4 color);
+void __ovld write_imagef(read_write image1d_buffer_t, int, float4);
+void __ovld write_imagei(read_write image1d_buffer_t, int, int4);
+void __ovld write_imageui(read_write image1d_buffer_t, int, uint4);
-void __ovld write_imagef(read_write image1d_array_t image_array, int2 coord, float4 color);
-void __ovld write_imagei(read_write image1d_array_t image_array, int2 coord, int4 color);
-void __ovld write_imageui(read_write image1d_array_t image_array, int2 coord, uint4 color);
+void __ovld write_imagef(read_write image1d_array_t, int2, float4);
+void __ovld write_imagei(read_write image1d_array_t, int2, int4);
+void __ovld write_imageui(read_write image1d_array_t, int2, uint4);
#ifdef cl_khr_3d_image_writes
-void __ovld write_imagef(read_write image3d_t image, int4 coord, float4 color);
-void __ovld write_imagei(read_write image3d_t image, int4 coord, int4 color);
-void __ovld write_imageui(read_write image3d_t image, int4 coord, uint4 color);
+void __ovld write_imagef(read_write image3d_t, int4, float4);
+void __ovld write_imagei(read_write image3d_t, int4, int4);
+void __ovld write_imageui(read_write image3d_t, int4, uint4);
#endif
#ifdef cl_khr_depth_images
-void __ovld write_imagef(read_write image2d_depth_t image, int2 coord, float color);
-void __ovld write_imagef(read_write image2d_array_depth_t image, int4 coord, float color);
+void __ovld write_imagef(read_write image2d_depth_t, int2, float);
+void __ovld write_imagef(read_write image2d_array_depth_t, int4, float);
#endif //cl_khr_depth_images
#if defined(cl_khr_mipmap_image_writes)
-void __ovld write_imagef(read_write image1d_t image, int coord, int lod, float4 color);
-void __ovld write_imagei(read_write image1d_t image, int coord, int lod, int4 color);
-void __ovld write_imageui(read_write image1d_t image, int coord, int lod, uint4 color);
+void __ovld write_imagef(read_write image1d_t, int, int, float4);
+void __ovld write_imagei(read_write image1d_t, int, int, int4);
+void __ovld write_imageui(read_write image1d_t, int, int, uint4);
-void __ovld write_imagef(read_write image1d_array_t image_array, int2 coord, int lod, float4 color);
-void __ovld write_imagei(read_write image1d_array_t image_array, int2 coord, int lod, int4 color);
-void __ovld write_imageui(read_write image1d_array_t image_array, int2 coord, int lod, uint4 color);
+void __ovld write_imagef(read_write image1d_array_t, int2, int, float4);
+void __ovld write_imagei(read_write image1d_array_t, int2, int, int4);
+void __ovld write_imageui(read_write image1d_array_t, int2, int, uint4);
-void __ovld write_imagef(read_write image2d_t image, int2 coord, int lod, float4 color);
-void __ovld write_imagei(read_write image2d_t image, int2 coord, int lod, int4 color);
-void __ovld write_imageui(read_write image2d_t image, int2 coord, int lod, uint4 color);
+void __ovld write_imagef(read_write image2d_t, int2, int, float4);
+void __ovld write_imagei(read_write image2d_t, int2, int, int4);
+void __ovld write_imageui(read_write image2d_t, int2, int, uint4);
-void __ovld write_imagef(read_write image2d_array_t image_array, int4 coord, int lod, float4 color);
-void __ovld write_imagei(read_write image2d_array_t image_array, int4 coord, int lod, int4 color);
-void __ovld write_imageui(read_write image2d_array_t image_array, int4 coord, int lod, uint4 color);
+void __ovld write_imagef(read_write image2d_array_t, int4, int, float4);
+void __ovld write_imagei(read_write image2d_array_t, int4, int, int4);
+void __ovld write_imageui(read_write image2d_array_t, int4, int, uint4);
-void __ovld write_imagef(read_write image2d_depth_t image, int2 coord, int lod, float color);
-void __ovld write_imagef(read_write image2d_array_depth_t image, int4 coord, int lod, float color);
+void __ovld write_imagef(read_write image2d_depth_t, int2, int, float);
+void __ovld write_imagef(read_write image2d_array_depth_t, int4, int, float);
#ifdef cl_khr_3d_image_writes
-void __ovld write_imagef(read_write image3d_t image, int4 coord, int lod, float4 color);
-void __ovld write_imagei(read_write image3d_t image, int4 coord, int lod, int4 color);
-void __ovld write_imageui(read_write image3d_t image, int4 coord, int lod, uint4 color);
+void __ovld write_imagef(read_write image3d_t, int4, int, float4);
+void __ovld write_imagei(read_write image3d_t, int4, int, int4);
+void __ovld write_imageui(read_write image3d_t, int4, int, uint4);
#endif //cl_khr_3d_image_writes
#endif //cl_khr_mipmap_image_writes
// Image write functions for half4 type
#ifdef cl_khr_fp16
-void __ovld write_imageh(read_write image1d_t image, int coord, half4 color);
-void __ovld write_imageh(read_write image2d_t image, int2 coord, half4 color);
+void __ovld write_imageh(read_write image1d_t, int, half4);
+void __ovld write_imageh(read_write image2d_t, int2, half4);
#ifdef cl_khr_3d_image_writes
-void __ovld write_imageh(read_write image3d_t image, int4 coord, half4 color);
+void __ovld write_imageh(read_write image3d_t, int4, half4);
#endif
-void __ovld write_imageh(read_write image1d_array_t image, int2 coord, half4 color);
-void __ovld write_imageh(read_write image2d_array_t image, int4 coord, half4 color);
-void __ovld write_imageh(read_write image1d_buffer_t image, int coord, half4 color);
+void __ovld write_imageh(read_write image1d_array_t, int2, half4);
+void __ovld write_imageh(read_write image2d_array_t, int4, half4);
+void __ovld write_imageh(read_write image1d_buffer_t, int, half4);
#endif //cl_khr_fp16
#endif //defined(__opencl_c_read_write_images)
@@ -15945,123 +15705,121 @@ void __ovld write_imageh(read_write image1d_buffer_t image, int coord, half4 col
* Return the image width in pixels.
*
*/
-int __ovld __cnfn get_image_width(read_only image1d_t image);
-int __ovld __cnfn get_image_width(read_only image1d_buffer_t image);
-int __ovld __cnfn get_image_width(read_only image2d_t image);
-#ifdef cl_khr_3d_image_writes
-int __ovld __cnfn get_image_width(read_only image3d_t image);
-#endif
-int __ovld __cnfn get_image_width(read_only image1d_array_t image);
-int __ovld __cnfn get_image_width(read_only image2d_array_t image);
+int __ovld __cnfn get_image_width(read_only image1d_t);
+int __ovld __cnfn get_image_width(read_only image1d_buffer_t);
+int __ovld __cnfn get_image_width(read_only image2d_t);
+int __ovld __cnfn get_image_width(read_only image3d_t);
+int __ovld __cnfn get_image_width(read_only image1d_array_t);
+int __ovld __cnfn get_image_width(read_only image2d_array_t);
#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_width(read_only image2d_depth_t image);
-int __ovld __cnfn get_image_width(read_only image2d_array_depth_t image);
+int __ovld __cnfn get_image_width(read_only image2d_depth_t);
+int __ovld __cnfn get_image_width(read_only image2d_array_depth_t);
#endif //cl_khr_depth_images
#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_width(read_only image2d_msaa_t image);
-int __ovld __cnfn get_image_width(read_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_width(read_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_width(read_only image2d_array_msaa_depth_t image);
+int __ovld __cnfn get_image_width(read_only image2d_msaa_t);
+int __ovld __cnfn get_image_width(read_only image2d_msaa_depth_t);
+int __ovld __cnfn get_image_width(read_only image2d_array_msaa_t);
+int __ovld __cnfn get_image_width(read_only image2d_array_msaa_depth_t);
#endif //cl_khr_gl_msaa_sharing
-int __ovld __cnfn get_image_width(write_only image1d_t image);
-int __ovld __cnfn get_image_width(write_only image1d_buffer_t image);
-int __ovld __cnfn get_image_width(write_only image2d_t image);
+int __ovld __cnfn get_image_width(write_only image1d_t);
+int __ovld __cnfn get_image_width(write_only image1d_buffer_t);
+int __ovld __cnfn get_image_width(write_only image2d_t);
#ifdef cl_khr_3d_image_writes
-int __ovld __cnfn get_image_width(write_only image3d_t image);
+int __ovld __cnfn get_image_width(write_only image3d_t);
#endif
-int __ovld __cnfn get_image_width(write_only image1d_array_t image);
-int __ovld __cnfn get_image_width(write_only image2d_array_t image);
+int __ovld __cnfn get_image_width(write_only image1d_array_t);
+int __ovld __cnfn get_image_width(write_only image2d_array_t);
#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_width(write_only image2d_depth_t image);
-int __ovld __cnfn get_image_width(write_only image2d_array_depth_t image);
+int __ovld __cnfn get_image_width(write_only image2d_depth_t);
+int __ovld __cnfn get_image_width(write_only image2d_array_depth_t);
#endif //cl_khr_depth_images
#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_width(write_only image2d_msaa_t image);
-int __ovld __cnfn get_image_width(write_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_width(write_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_width(write_only image2d_array_msaa_depth_t image);
+int __ovld __cnfn get_image_width(write_only image2d_msaa_t);
+int __ovld __cnfn get_image_width(write_only image2d_msaa_depth_t);
+int __ovld __cnfn get_image_width(write_only image2d_array_msaa_t);
+int __ovld __cnfn get_image_width(write_only image2d_array_msaa_depth_t);
#endif //cl_khr_gl_msaa_sharing
#if defined(__opencl_c_read_write_images)
-int __ovld __cnfn get_image_width(read_write image1d_t image);
-int __ovld __cnfn get_image_width(read_write image1d_buffer_t image);
-int __ovld __cnfn get_image_width(read_write image2d_t image);
-int __ovld __cnfn get_image_width(read_write image3d_t image);
-int __ovld __cnfn get_image_width(read_write image1d_array_t image);
-int __ovld __cnfn get_image_width(read_write image2d_array_t image);
+int __ovld __cnfn get_image_width(read_write image1d_t);
+int __ovld __cnfn get_image_width(read_write image1d_buffer_t);
+int __ovld __cnfn get_image_width(read_write image2d_t);
+int __ovld __cnfn get_image_width(read_write image3d_t);
+int __ovld __cnfn get_image_width(read_write image1d_array_t);
+int __ovld __cnfn get_image_width(read_write image2d_array_t);
#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_width(read_write image2d_depth_t image);
-int __ovld __cnfn get_image_width(read_write image2d_array_depth_t image);
+int __ovld __cnfn get_image_width(read_write image2d_depth_t);
+int __ovld __cnfn get_image_width(read_write image2d_array_depth_t);
#endif //cl_khr_depth_images
#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_width(read_write image2d_msaa_t image);
-int __ovld __cnfn get_image_width(read_write image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_width(read_write image2d_array_msaa_t image);
-int __ovld __cnfn get_image_width(read_write image2d_array_msaa_depth_t image);
+int __ovld __cnfn get_image_width(read_write image2d_msaa_t);
+int __ovld __cnfn get_image_width(read_write image2d_msaa_depth_t);
+int __ovld __cnfn get_image_width(read_write image2d_array_msaa_t);
+int __ovld __cnfn get_image_width(read_write image2d_array_msaa_depth_t);
#endif //cl_khr_gl_msaa_sharing
#endif //defined(__opencl_c_read_write_images)
/**
* Return the image height in pixels.
*/
-int __ovld __cnfn get_image_height(read_only image2d_t image);
-int __ovld __cnfn get_image_height(read_only image3d_t image);
-int __ovld __cnfn get_image_height(read_only image2d_array_t image);
+int __ovld __cnfn get_image_height(read_only image2d_t);
+int __ovld __cnfn get_image_height(read_only image3d_t);
+int __ovld __cnfn get_image_height(read_only image2d_array_t);
#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_height(read_only image2d_depth_t image);
-int __ovld __cnfn get_image_height(read_only image2d_array_depth_t image);
+int __ovld __cnfn get_image_height(read_only image2d_depth_t);
+int __ovld __cnfn get_image_height(read_only image2d_array_depth_t);
#endif //cl_khr_depth_images
#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_height(read_only image2d_msaa_t image);
-int __ovld __cnfn get_image_height(read_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_height(read_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_height(read_only image2d_array_msaa_depth_t image);
+int __ovld __cnfn get_image_height(read_only image2d_msaa_t);
+int __ovld __cnfn get_image_height(read_only image2d_msaa_depth_t);
+int __ovld __cnfn get_image_height(read_only image2d_array_msaa_t);
+int __ovld __cnfn get_image_height(read_only image2d_array_msaa_depth_t);
#endif //cl_khr_gl_msaa_sharing
-int __ovld __cnfn get_image_height(write_only image2d_t image);
+int __ovld __cnfn get_image_height(write_only image2d_t);
#ifdef cl_khr_3d_image_writes
-int __ovld __cnfn get_image_height(write_only image3d_t image);
+int __ovld __cnfn get_image_height(write_only image3d_t);
#endif
-int __ovld __cnfn get_image_height(write_only image2d_array_t image);
+int __ovld __cnfn get_image_height(write_only image2d_array_t);
#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_height(write_only image2d_depth_t image);
-int __ovld __cnfn get_image_height(write_only image2d_array_depth_t image);
+int __ovld __cnfn get_image_height(write_only image2d_depth_t);
+int __ovld __cnfn get_image_height(write_only image2d_array_depth_t);
#endif //cl_khr_depth_images
#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_height(write_only image2d_msaa_t image);
-int __ovld __cnfn get_image_height(write_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_height(write_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_height(write_only image2d_array_msaa_depth_t image);
+int __ovld __cnfn get_image_height(write_only image2d_msaa_t);
+int __ovld __cnfn get_image_height(write_only image2d_msaa_depth_t);
+int __ovld __cnfn get_image_height(write_only image2d_array_msaa_t);
+int __ovld __cnfn get_image_height(write_only image2d_array_msaa_depth_t);
#endif //cl_khr_gl_msaa_sharing
#if defined(__opencl_c_read_write_images)
-int __ovld __cnfn get_image_height(read_write image2d_t image);
-int __ovld __cnfn get_image_height(read_write image3d_t image);
-int __ovld __cnfn get_image_height(read_write image2d_array_t image);
+int __ovld __cnfn get_image_height(read_write image2d_t);
+int __ovld __cnfn get_image_height(read_write image3d_t);
+int __ovld __cnfn get_image_height(read_write image2d_array_t);
#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_height(read_write image2d_depth_t image);
-int __ovld __cnfn get_image_height(read_write image2d_array_depth_t image);
+int __ovld __cnfn get_image_height(read_write image2d_depth_t);
+int __ovld __cnfn get_image_height(read_write image2d_array_depth_t);
#endif //cl_khr_depth_images
#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_height(read_write image2d_msaa_t image);
-int __ovld __cnfn get_image_height(read_write image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_height(read_write image2d_array_msaa_t image);
-int __ovld __cnfn get_image_height(read_write image2d_array_msaa_depth_t image);
+int __ovld __cnfn get_image_height(read_write image2d_msaa_t);
+int __ovld __cnfn get_image_height(read_write image2d_msaa_depth_t);
+int __ovld __cnfn get_image_height(read_write image2d_array_msaa_t);
+int __ovld __cnfn get_image_height(read_write image2d_array_msaa_depth_t);
#endif //cl_khr_gl_msaa_sharing
#endif //defined(__opencl_c_read_write_images)
/**
* Return the image depth in pixels.
*/
-int __ovld __cnfn get_image_depth(read_only image3d_t image);
+int __ovld __cnfn get_image_depth(read_only image3d_t);
#ifdef cl_khr_3d_image_writes
-int __ovld __cnfn get_image_depth(write_only image3d_t image);
+int __ovld __cnfn get_image_depth(write_only image3d_t);
#endif
#if defined(__opencl_c_read_write_images)
-int __ovld __cnfn get_image_depth(read_write image3d_t image);
+int __ovld __cnfn get_image_depth(read_write image3d_t);
#endif //defined(__opencl_c_read_write_images)
// OpenCL Extension v2.0 s9.18 - Mipmaps
@@ -16071,37 +15829,37 @@ int __ovld __cnfn get_image_depth(read_write image3d_t image);
* Return the image miplevels.
*/
-int __ovld get_image_num_mip_levels(read_only image1d_t image);
-int __ovld get_image_num_mip_levels(read_only image2d_t image);
-int __ovld get_image_num_mip_levels(read_only image3d_t image);
+int __ovld get_image_num_mip_levels(read_only image1d_t);
+int __ovld get_image_num_mip_levels(read_only image2d_t);
+int __ovld get_image_num_mip_levels(read_only image3d_t);
-int __ovld get_image_num_mip_levels(write_only image1d_t image);
-int __ovld get_image_num_mip_levels(write_only image2d_t image);
+int __ovld get_image_num_mip_levels(write_only image1d_t);
+int __ovld get_image_num_mip_levels(write_only image2d_t);
#ifdef cl_khr_3d_image_writes
-int __ovld get_image_num_mip_levels(write_only image3d_t image);
+int __ovld get_image_num_mip_levels(write_only image3d_t);
#endif
#if defined(__opencl_c_read_write_images)
-int __ovld get_image_num_mip_levels(read_write image1d_t image);
-int __ovld get_image_num_mip_levels(read_write image2d_t image);
-int __ovld get_image_num_mip_levels(read_write image3d_t image);
+int __ovld get_image_num_mip_levels(read_write image1d_t);
+int __ovld get_image_num_mip_levels(read_write image2d_t);
+int __ovld get_image_num_mip_levels(read_write image3d_t);
#endif //defined(__opencl_c_read_write_images)
-int __ovld get_image_num_mip_levels(read_only image1d_array_t image);
-int __ovld get_image_num_mip_levels(read_only image2d_array_t image);
-int __ovld get_image_num_mip_levels(read_only image2d_array_depth_t image);
-int __ovld get_image_num_mip_levels(read_only image2d_depth_t image);
+int __ovld get_image_num_mip_levels(read_only image1d_array_t);
+int __ovld get_image_num_mip_levels(read_only image2d_array_t);
+int __ovld get_image_num_mip_levels(read_only image2d_array_depth_t);
+int __ovld get_image_num_mip_levels(read_only image2d_depth_t);
-int __ovld get_image_num_mip_levels(write_only image1d_array_t image);
-int __ovld get_image_num_mip_levels(write_only image2d_array_t image);
-int __ovld get_image_num_mip_levels(write_only image2d_array_depth_t image);
-int __ovld get_image_num_mip_levels(write_only image2d_depth_t image);
+int __ovld get_image_num_mip_levels(write_only image1d_array_t);
+int __ovld get_image_num_mip_levels(write_only image2d_array_t);
+int __ovld get_image_num_mip_levels(write_only image2d_array_depth_t);
+int __ovld get_image_num_mip_levels(write_only image2d_depth_t);
#if defined(__opencl_c_read_write_images)
-int __ovld get_image_num_mip_levels(read_write image1d_array_t image);
-int __ovld get_image_num_mip_levels(read_write image2d_array_t image);
-int __ovld get_image_num_mip_levels(read_write image2d_array_depth_t image);
-int __ovld get_image_num_mip_levels(read_write image2d_depth_t image);
+int __ovld get_image_num_mip_levels(read_write image1d_array_t);
+int __ovld get_image_num_mip_levels(read_write image2d_array_t);
+int __ovld get_image_num_mip_levels(read_write image2d_array_depth_t);
+int __ovld get_image_num_mip_levels(read_write image2d_depth_t);
#endif //defined(__opencl_c_read_write_images)
#endif //cl_khr_mipmap_image
@@ -16126,58 +15884,58 @@ int __ovld get_image_num_mip_levels(read_write image2d_depth_t image);
* CLK_FLOAT
*/
-int __ovld __cnfn get_image_channel_data_type(read_only image1d_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image1d_buffer_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image3d_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image1d_array_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_t image);
+int __ovld __cnfn get_image_channel_data_type(read_only image1d_t);
+int __ovld __cnfn get_image_channel_data_type(read_only image1d_buffer_t);
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_t);
+int __ovld __cnfn get_image_channel_data_type(read_only image3d_t);
+int __ovld __cnfn get_image_channel_data_type(read_only image1d_array_t);
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_t);
#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_depth_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_depth_t image);
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_depth_t);
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_depth_t);
#endif //cl_khr_depth_images
#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_msaa_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_msaa_depth_t image);
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_msaa_t);
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_msaa_depth_t);
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_msaa_t);
+int __ovld __cnfn get_image_channel_data_type(read_only image2d_array_msaa_depth_t);
#endif //cl_khr_gl_msaa_sharing
-int __ovld __cnfn get_image_channel_data_type(write_only image1d_t image);
-int __ovld __cnfn get_image_channel_data_type(write_only image1d_buffer_t image);
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_t image);
+int __ovld __cnfn get_image_channel_data_type(write_only image1d_t);
+int __ovld __cnfn get_image_channel_data_type(write_only image1d_buffer_t);
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_t);
#ifdef cl_khr_3d_image_writes
-int __ovld __cnfn get_image_channel_data_type(write_only image3d_t image);
+int __ovld __cnfn get_image_channel_data_type(write_only image3d_t);
#endif
-int __ovld __cnfn get_image_channel_data_type(write_only image1d_array_t image);
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_t image);
+int __ovld __cnfn get_image_channel_data_type(write_only image1d_array_t);
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_t);
#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_depth_t image);
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_depth_t image);
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_depth_t);
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_depth_t);
#endif //cl_khr_depth_images
#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_msaa_t image);
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_msaa_depth_t image);
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_msaa_t);
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_msaa_depth_t);
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_msaa_t);
+int __ovld __cnfn get_image_channel_data_type(write_only image2d_array_msaa_depth_t);
#endif //cl_khr_gl_msaa_sharing
#if defined(__opencl_c_read_write_images)
-int __ovld __cnfn get_image_channel_data_type(read_write image1d_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image1d_buffer_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image3d_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image1d_array_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_t image);
+int __ovld __cnfn get_image_channel_data_type(read_write image1d_t);
+int __ovld __cnfn get_image_channel_data_type(read_write image1d_buffer_t);
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_t);
+int __ovld __cnfn get_image_channel_data_type(read_write image3d_t);
+int __ovld __cnfn get_image_channel_data_type(read_write image1d_array_t);
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_t);
#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_depth_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_depth_t image);
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_depth_t);
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_depth_t);
#endif //cl_khr_depth_images
#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_msaa_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_t image);
-int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_depth_t image);
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_msaa_t);
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_msaa_depth_t);
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_t);
+int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_depth_t);
#endif //cl_khr_gl_msaa_sharing
#endif //defined(__opencl_c_read_write_images)
@@ -16198,58 +15956,58 @@ int __ovld __cnfn get_image_channel_data_type(read_write image2d_array_msaa_dept
* CLK_LUMINANCE
*/
-int __ovld __cnfn get_image_channel_order(read_only image1d_t image);
-int __ovld __cnfn get_image_channel_order(read_only image1d_buffer_t image);
-int __ovld __cnfn get_image_channel_order(read_only image2d_t image);
-int __ovld __cnfn get_image_channel_order(read_only image3d_t image);
-int __ovld __cnfn get_image_channel_order(read_only image1d_array_t image);
-int __ovld __cnfn get_image_channel_order(read_only image2d_array_t image);
+int __ovld __cnfn get_image_channel_order(read_only image1d_t);
+int __ovld __cnfn get_image_channel_order(read_only image1d_buffer_t);
+int __ovld __cnfn get_image_channel_order(read_only image2d_t);
+int __ovld __cnfn get_image_channel_order(read_only image3d_t);
+int __ovld __cnfn get_image_channel_order(read_only image1d_array_t);
+int __ovld __cnfn get_image_channel_order(read_only image2d_array_t);
#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_channel_order(read_only image2d_depth_t image);
-int __ovld __cnfn get_image_channel_order(read_only image2d_array_depth_t image);
+int __ovld __cnfn get_image_channel_order(read_only image2d_depth_t);
+int __ovld __cnfn get_image_channel_order(read_only image2d_array_depth_t);
#endif //cl_khr_depth_images
#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_channel_order(read_only image2d_msaa_t image);
-int __ovld __cnfn get_image_channel_order(read_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_channel_order(read_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_channel_order(read_only image2d_array_msaa_depth_t image);
+int __ovld __cnfn get_image_channel_order(read_only image2d_msaa_t);
+int __ovld __cnfn get_image_channel_order(read_only image2d_msaa_depth_t);
+int __ovld __cnfn get_image_channel_order(read_only image2d_array_msaa_t);
+int __ovld __cnfn get_image_channel_order(read_only image2d_array_msaa_depth_t);
#endif //cl_khr_gl_msaa_sharing
-int __ovld __cnfn get_image_channel_order(write_only image1d_t image);
-int __ovld __cnfn get_image_channel_order(write_only image1d_buffer_t image);
-int __ovld __cnfn get_image_channel_order(write_only image2d_t image);
+int __ovld __cnfn get_image_channel_order(write_only image1d_t);
+int __ovld __cnfn get_image_channel_order(write_only image1d_buffer_t);
+int __ovld __cnfn get_image_channel_order(write_only image2d_t);
#ifdef cl_khr_3d_image_writes
-int __ovld __cnfn get_image_channel_order(write_only image3d_t image);
+int __ovld __cnfn get_image_channel_order(write_only image3d_t);
#endif
-int __ovld __cnfn get_image_channel_order(write_only image1d_array_t image);
-int __ovld __cnfn get_image_channel_order(write_only image2d_array_t image);
+int __ovld __cnfn get_image_channel_order(write_only image1d_array_t);
+int __ovld __cnfn get_image_channel_order(write_only image2d_array_t);
#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_channel_order(write_only image2d_depth_t image);
-int __ovld __cnfn get_image_channel_order(write_only image2d_array_depth_t image);
+int __ovld __cnfn get_image_channel_order(write_only image2d_depth_t);
+int __ovld __cnfn get_image_channel_order(write_only image2d_array_depth_t);
#endif //cl_khr_depth_images
#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_channel_order(write_only image2d_msaa_t image);
-int __ovld __cnfn get_image_channel_order(write_only image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_channel_order(write_only image2d_array_msaa_t image);
-int __ovld __cnfn get_image_channel_order(write_only image2d_array_msaa_depth_t image);
+int __ovld __cnfn get_image_channel_order(write_only image2d_msaa_t);
+int __ovld __cnfn get_image_channel_order(write_only image2d_msaa_depth_t);
+int __ovld __cnfn get_image_channel_order(write_only image2d_array_msaa_t);
+int __ovld __cnfn get_image_channel_order(write_only image2d_array_msaa_depth_t);
#endif //cl_khr_gl_msaa_sharing
#if defined(__opencl_c_read_write_images)
-int __ovld __cnfn get_image_channel_order(read_write image1d_t image);
-int __ovld __cnfn get_image_channel_order(read_write image1d_buffer_t image);
-int __ovld __cnfn get_image_channel_order(read_write image2d_t image);
-int __ovld __cnfn get_image_channel_order(read_write image3d_t image);
-int __ovld __cnfn get_image_channel_order(read_write image1d_array_t image);
-int __ovld __cnfn get_image_channel_order(read_write image2d_array_t image);
+int __ovld __cnfn get_image_channel_order(read_write image1d_t);
+int __ovld __cnfn get_image_channel_order(read_write image1d_buffer_t);
+int __ovld __cnfn get_image_channel_order(read_write image2d_t);
+int __ovld __cnfn get_image_channel_order(read_write image3d_t);
+int __ovld __cnfn get_image_channel_order(read_write image1d_array_t);
+int __ovld __cnfn get_image_channel_order(read_write image2d_array_t);
#ifdef cl_khr_depth_images
-int __ovld __cnfn get_image_channel_order(read_write image2d_depth_t image);
-int __ovld __cnfn get_image_channel_order(read_write image2d_array_depth_t image);
+int __ovld __cnfn get_image_channel_order(read_write image2d_depth_t);
+int __ovld __cnfn get_image_channel_order(read_write image2d_array_depth_t);
#endif //cl_khr_depth_images
#if defined(cl_khr_gl_msaa_sharing)
-int __ovld __cnfn get_image_channel_order(read_write image2d_msaa_t image);
-int __ovld __cnfn get_image_channel_order(read_write image2d_msaa_depth_t image);
-int __ovld __cnfn get_image_channel_order(read_write image2d_array_msaa_t image);
-int __ovld __cnfn get_image_channel_order(read_write image2d_array_msaa_depth_t image);
+int __ovld __cnfn get_image_channel_order(read_write image2d_msaa_t);
+int __ovld __cnfn get_image_channel_order(read_write image2d_msaa_depth_t);
+int __ovld __cnfn get_image_channel_order(read_write image2d_array_msaa_t);
+int __ovld __cnfn get_image_channel_order(read_write image2d_array_msaa_depth_t);
#endif //cl_khr_gl_msaa_sharing
#endif //defined(__opencl_c_read_write_images)
@@ -16258,44 +16016,44 @@ int __ovld __cnfn get_image_channel_order(read_write image2d_array_msaa_depth_t
* type. The width is returned in the x component, and
* the height in the y component.
*/
-int2 __ovld __cnfn get_image_dim(read_only image2d_t image);
-int2 __ovld __cnfn get_image_dim(read_only image2d_array_t image);
+int2 __ovld __cnfn get_image_dim(read_only image2d_t);
+int2 __ovld __cnfn get_image_dim(read_only image2d_array_t);
#ifdef cl_khr_depth_images
-int2 __ovld __cnfn get_image_dim(read_only image2d_array_depth_t image);
-int2 __ovld __cnfn get_image_dim(read_only image2d_depth_t image);
+int2 __ovld __cnfn get_image_dim(read_only image2d_array_depth_t);
+int2 __ovld __cnfn get_image_dim(read_only image2d_depth_t);
#endif //cl_khr_depth_images
#if defined(cl_khr_gl_msaa_sharing)
-int2 __ovld __cnfn get_image_dim(read_only image2d_msaa_t image);
-int2 __ovld __cnfn get_image_dim(read_only image2d_msaa_depth_t image);
-int2 __ovld __cnfn get_image_dim(read_only image2d_array_msaa_t image);
-int2 __ovld __cnfn get_image_dim(read_only image2d_array_msaa_depth_t image);
+int2 __ovld __cnfn get_image_dim(read_only image2d_msaa_t);
+int2 __ovld __cnfn get_image_dim(read_only image2d_msaa_depth_t);
+int2 __ovld __cnfn get_image_dim(read_only image2d_array_msaa_t);
+int2 __ovld __cnfn get_image_dim(read_only image2d_array_msaa_depth_t);
#endif //cl_khr_gl_msaa_sharing
-int2 __ovld __cnfn get_image_dim(write_only image2d_t image);
-int2 __ovld __cnfn get_image_dim(write_only image2d_array_t image);
+int2 __ovld __cnfn get_image_dim(write_only image2d_t);
+int2 __ovld __cnfn get_image_dim(write_only image2d_array_t);
#ifdef cl_khr_depth_images
-int2 __ovld __cnfn get_image_dim(write_only image2d_array_depth_t image);
-int2 __ovld __cnfn get_image_dim(write_only image2d_depth_t image);
+int2 __ovld __cnfn get_image_dim(write_only image2d_array_depth_t);
+int2 __ovld __cnfn get_image_dim(write_only image2d_depth_t);
#endif //cl_khr_depth_images
#if defined(cl_khr_gl_msaa_sharing)
-int2 __ovld __cnfn get_image_dim(write_only image2d_msaa_t image);
-int2 __ovld __cnfn get_image_dim(write_only image2d_msaa_depth_t image);
-int2 __ovld __cnfn get_image_dim(write_only image2d_array_msaa_t image);
-int2 __ovld __cnfn get_image_dim(write_only image2d_array_msaa_depth_t image);
+int2 __ovld __cnfn get_image_dim(write_only image2d_msaa_t);
+int2 __ovld __cnfn get_image_dim(write_only image2d_msaa_depth_t);
+int2 __ovld __cnfn get_image_dim(write_only image2d_array_msaa_t);
+int2 __ovld __cnfn get_image_dim(write_only image2d_array_msaa_depth_t);
#endif //cl_khr_gl_msaa_sharing
#if defined(__opencl_c_read_write_images)
-int2 __ovld __cnfn get_image_dim(read_write image2d_t image);
-int2 __ovld __cnfn get_image_dim(read_write image2d_array_t image);
+int2 __ovld __cnfn get_image_dim(read_write image2d_t);
+int2 __ovld __cnfn get_image_dim(read_write image2d_array_t);
#ifdef cl_khr_depth_images
-int2 __ovld __cnfn get_image_dim(read_write image2d_array_depth_t image);
-int2 __ovld __cnfn get_image_dim(read_write image2d_depth_t image);
+int2 __ovld __cnfn get_image_dim(read_write image2d_array_depth_t);
+int2 __ovld __cnfn get_image_dim(read_write image2d_depth_t);
#endif //cl_khr_depth_images
#if defined(cl_khr_gl_msaa_sharing)
-int2 __ovld __cnfn get_image_dim(read_write image2d_msaa_t image);
-int2 __ovld __cnfn get_image_dim(read_write image2d_msaa_depth_t image);
-int2 __ovld __cnfn get_image_dim(read_write image2d_array_msaa_t image);
-int2 __ovld __cnfn get_image_dim(read_write image2d_array_msaa_depth_t image);
+int2 __ovld __cnfn get_image_dim(read_write image2d_msaa_t);
+int2 __ovld __cnfn get_image_dim(read_write image2d_msaa_depth_t);
+int2 __ovld __cnfn get_image_dim(read_write image2d_array_msaa_t);
+int2 __ovld __cnfn get_image_dim(read_write image2d_array_msaa_depth_t);
#endif //cl_khr_gl_msaa_sharing
#endif //defined(__opencl_c_read_write_images)
@@ -16305,47 +16063,47 @@ int2 __ovld __cnfn get_image_dim(read_write image2d_array_msaa_depth_t image);
* component, height in the y component, depth in the z
* component and the w component is 0.
*/
-int4 __ovld __cnfn get_image_dim(read_only image3d_t image);
+int4 __ovld __cnfn get_image_dim(read_only image3d_t);
#ifdef cl_khr_3d_image_writes
-int4 __ovld __cnfn get_image_dim(write_only image3d_t image);
+int4 __ovld __cnfn get_image_dim(write_only image3d_t);
#endif
#if defined(__opencl_c_read_write_images)
-int4 __ovld __cnfn get_image_dim(read_write image3d_t image);
+int4 __ovld __cnfn get_image_dim(read_write image3d_t);
#endif //defined(__opencl_c_read_write_images)
/**
* Return the image array size.
*/
-size_t __ovld __cnfn get_image_array_size(read_only image1d_array_t image_array);
-size_t __ovld __cnfn get_image_array_size(read_only image2d_array_t image_array);
+size_t __ovld __cnfn get_image_array_size(read_only image1d_array_t);
+size_t __ovld __cnfn get_image_array_size(read_only image2d_array_t);
#ifdef cl_khr_depth_images
-size_t __ovld __cnfn get_image_array_size(read_only image2d_array_depth_t image_array);
+size_t __ovld __cnfn get_image_array_size(read_only image2d_array_depth_t);
#endif //cl_khr_depth_images
#if defined(cl_khr_gl_msaa_sharing)
-size_t __ovld __cnfn get_image_array_size(read_only image2d_array_msaa_t image_array);
-size_t __ovld __cnfn get_image_array_size(read_only image2d_array_msaa_depth_t image_array);
+size_t __ovld __cnfn get_image_array_size(read_only image2d_array_msaa_t);
+size_t __ovld __cnfn get_image_array_size(read_only image2d_array_msaa_depth_t);
#endif //cl_khr_gl_msaa_sharing
-size_t __ovld __cnfn get_image_array_size(write_only image1d_array_t image_array);
-size_t __ovld __cnfn get_image_array_size(write_only image2d_array_t image_array);
+size_t __ovld __cnfn get_image_array_size(write_only image1d_array_t);
+size_t __ovld __cnfn get_image_array_size(write_only image2d_array_t);
#ifdef cl_khr_depth_images
-size_t __ovld __cnfn get_image_array_size(write_only image2d_array_depth_t image_array);
+size_t __ovld __cnfn get_image_array_size(write_only image2d_array_depth_t);
#endif //cl_khr_depth_images
#if defined(cl_khr_gl_msaa_sharing)
-size_t __ovld __cnfn get_image_array_size(write_only image2d_array_msaa_t image_array);
-size_t __ovld __cnfn get_image_array_size(write_only image2d_array_msaa_depth_t image_array);
+size_t __ovld __cnfn get_image_array_size(write_only image2d_array_msaa_t);
+size_t __ovld __cnfn get_image_array_size(write_only image2d_array_msaa_depth_t);
#endif //cl_khr_gl_msaa_sharing
#if defined(__opencl_c_read_write_images)
-size_t __ovld __cnfn get_image_array_size(read_write image1d_array_t image_array);
-size_t __ovld __cnfn get_image_array_size(read_write image2d_array_t image_array);
+size_t __ovld __cnfn get_image_array_size(read_write image1d_array_t);
+size_t __ovld __cnfn get_image_array_size(read_write image2d_array_t);
#ifdef cl_khr_depth_images
-size_t __ovld __cnfn get_image_array_size(read_write image2d_array_depth_t image_array);
+size_t __ovld __cnfn get_image_array_size(read_write image2d_array_depth_t);
#endif //cl_khr_depth_images
#if defined(cl_khr_gl_msaa_sharing)
-size_t __ovld __cnfn get_image_array_size(read_write image2d_array_msaa_t image_array);
-size_t __ovld __cnfn get_image_array_size(read_write image2d_array_msaa_depth_t image_array);
+size_t __ovld __cnfn get_image_array_size(read_write image2d_array_msaa_t);
+size_t __ovld __cnfn get_image_array_size(read_write image2d_array_msaa_depth_t);
#endif //cl_khr_gl_msaa_sharing
#endif //defined(__opencl_c_read_write_images)
@@ -16353,21 +16111,21 @@ size_t __ovld __cnfn get_image_array_size(read_write image2d_array_msaa_depth_t
* Return the number of samples associated with image
*/
#if defined(cl_khr_gl_msaa_sharing)
-int __ovld get_image_num_samples(read_only image2d_msaa_t image);
-int __ovld get_image_num_samples(read_only image2d_msaa_depth_t image);
-int __ovld get_image_num_samples(read_only image2d_array_msaa_t image);
-int __ovld get_image_num_samples(read_only image2d_array_msaa_depth_t image);
+int __ovld __cnfn get_image_num_samples(read_only image2d_msaa_t);
+int __ovld __cnfn get_image_num_samples(read_only image2d_msaa_depth_t);
+int __ovld __cnfn get_image_num_samples(read_only image2d_array_msaa_t);
+int __ovld __cnfn get_image_num_samples(read_only image2d_array_msaa_depth_t);
-int __ovld get_image_num_samples(write_only image2d_msaa_t image);
-int __ovld get_image_num_samples(write_only image2d_msaa_depth_t image);
-int __ovld get_image_num_samples(write_only image2d_array_msaa_t image);
-int __ovld get_image_num_samples(write_only image2d_array_msaa_depth_t image);
+int __ovld __cnfn get_image_num_samples(write_only image2d_msaa_t);
+int __ovld __cnfn get_image_num_samples(write_only image2d_msaa_depth_t);
+int __ovld __cnfn get_image_num_samples(write_only image2d_array_msaa_t);
+int __ovld __cnfn get_image_num_samples(write_only image2d_array_msaa_depth_t);
#if defined(__opencl_c_read_write_images)
-int __ovld get_image_num_samples(read_write image2d_msaa_t image);
-int __ovld get_image_num_samples(read_write image2d_msaa_depth_t image);
-int __ovld get_image_num_samples(read_write image2d_array_msaa_t image);
-int __ovld get_image_num_samples(read_write image2d_array_msaa_depth_t image);
+int __ovld __cnfn get_image_num_samples(read_write image2d_msaa_t);
+int __ovld __cnfn get_image_num_samples(read_write image2d_msaa_depth_t);
+int __ovld __cnfn get_image_num_samples(read_write image2d_array_msaa_t);
+int __ovld __cnfn get_image_num_samples(read_write image2d_array_msaa_depth_t);
#endif //defined(__opencl_c_read_write_images)
#endif
@@ -16378,97 +16136,97 @@ int __ovld __conv work_group_all(int predicate);
int __ovld __conv work_group_any(int predicate);
#ifdef cl_khr_fp16
-half __ovld __conv work_group_broadcast(half a, size_t local_id);
-half __ovld __conv work_group_broadcast(half a, size_t x, size_t y);
-half __ovld __conv work_group_broadcast(half a, size_t x, size_t y, size_t z);
+half __ovld __conv work_group_broadcast(half, size_t local_id);
+half __ovld __conv work_group_broadcast(half, size_t, size_t);
+half __ovld __conv work_group_broadcast(half, size_t, size_t, size_t);
#endif
-int __ovld __conv work_group_broadcast(int a, size_t local_id);
-int __ovld __conv work_group_broadcast(int a, size_t x, size_t y);
-int __ovld __conv work_group_broadcast(int a, size_t x, size_t y, size_t z);
-uint __ovld __conv work_group_broadcast(uint a, size_t local_id);
-uint __ovld __conv work_group_broadcast(uint a, size_t x, size_t y);
-uint __ovld __conv work_group_broadcast(uint a, size_t x, size_t y, size_t z);
-long __ovld __conv work_group_broadcast(long a, size_t local_id);
-long __ovld __conv work_group_broadcast(long a, size_t x, size_t y);
-long __ovld __conv work_group_broadcast(long a, size_t x, size_t y, size_t z);
-ulong __ovld __conv work_group_broadcast(ulong a, size_t local_id);
-ulong __ovld __conv work_group_broadcast(ulong a, size_t x, size_t y);
-ulong __ovld __conv work_group_broadcast(ulong a, size_t x, size_t y, size_t z);
-float __ovld __conv work_group_broadcast(float a, size_t local_id);
-float __ovld __conv work_group_broadcast(float a, size_t x, size_t y);
-float __ovld __conv work_group_broadcast(float a, size_t x, size_t y, size_t z);
+int __ovld __conv work_group_broadcast(int, size_t local_id);
+int __ovld __conv work_group_broadcast(int, size_t, size_t);
+int __ovld __conv work_group_broadcast(int, size_t, size_t, size_t);
+uint __ovld __conv work_group_broadcast(uint, size_t local_id);
+uint __ovld __conv work_group_broadcast(uint, size_t, size_t);
+uint __ovld __conv work_group_broadcast(uint, size_t, size_t, size_t);
+long __ovld __conv work_group_broadcast(long, size_t local_id);
+long __ovld __conv work_group_broadcast(long, size_t, size_t);
+long __ovld __conv work_group_broadcast(long, size_t, size_t, size_t);
+ulong __ovld __conv work_group_broadcast(ulong, size_t local_id);
+ulong __ovld __conv work_group_broadcast(ulong, size_t, size_t);
+ulong __ovld __conv work_group_broadcast(ulong, size_t, size_t, size_t);
+float __ovld __conv work_group_broadcast(float, size_t local_id);
+float __ovld __conv work_group_broadcast(float, size_t, size_t);
+float __ovld __conv work_group_broadcast(float, size_t, size_t, size_t);
#ifdef cl_khr_fp64
-double __ovld __conv work_group_broadcast(double a, size_t local_id);
-double __ovld __conv work_group_broadcast(double a, size_t x, size_t y);
-double __ovld __conv work_group_broadcast(double a, size_t x, size_t y, size_t z);
+double __ovld __conv work_group_broadcast(double, size_t local_id);
+double __ovld __conv work_group_broadcast(double, size_t, size_t);
+double __ovld __conv work_group_broadcast(double, size_t, size_t, size_t);
#endif //cl_khr_fp64
#ifdef cl_khr_fp16
-half __ovld __conv work_group_reduce_add(half x);
-half __ovld __conv work_group_reduce_min(half x);
-half __ovld __conv work_group_reduce_max(half x);
-half __ovld __conv work_group_scan_exclusive_add(half x);
-half __ovld __conv work_group_scan_exclusive_min(half x);
-half __ovld __conv work_group_scan_exclusive_max(half x);
-half __ovld __conv work_group_scan_inclusive_add(half x);
-half __ovld __conv work_group_scan_inclusive_min(half x);
-half __ovld __conv work_group_scan_inclusive_max(half x);
+half __ovld __conv work_group_reduce_add(half);
+half __ovld __conv work_group_reduce_min(half);
+half __ovld __conv work_group_reduce_max(half);
+half __ovld __conv work_group_scan_exclusive_add(half);
+half __ovld __conv work_group_scan_exclusive_min(half);
+half __ovld __conv work_group_scan_exclusive_max(half);
+half __ovld __conv work_group_scan_inclusive_add(half);
+half __ovld __conv work_group_scan_inclusive_min(half);
+half __ovld __conv work_group_scan_inclusive_max(half);
#endif
-int __ovld __conv work_group_reduce_add(int x);
-int __ovld __conv work_group_reduce_min(int x);
-int __ovld __conv work_group_reduce_max(int x);
-int __ovld __conv work_group_scan_exclusive_add(int x);
-int __ovld __conv work_group_scan_exclusive_min(int x);
-int __ovld __conv work_group_scan_exclusive_max(int x);
-int __ovld __conv work_group_scan_inclusive_add(int x);
-int __ovld __conv work_group_scan_inclusive_min(int x);
-int __ovld __conv work_group_scan_inclusive_max(int x);
-uint __ovld __conv work_group_reduce_add(uint x);
-uint __ovld __conv work_group_reduce_min(uint x);
-uint __ovld __conv work_group_reduce_max(uint x);
-uint __ovld __conv work_group_scan_exclusive_add(uint x);
-uint __ovld __conv work_group_scan_exclusive_min(uint x);
-uint __ovld __conv work_group_scan_exclusive_max(uint x);
-uint __ovld __conv work_group_scan_inclusive_add(uint x);
-uint __ovld __conv work_group_scan_inclusive_min(uint x);
-uint __ovld __conv work_group_scan_inclusive_max(uint x);
-long __ovld __conv work_group_reduce_add(long x);
-long __ovld __conv work_group_reduce_min(long x);
-long __ovld __conv work_group_reduce_max(long x);
-long __ovld __conv work_group_scan_exclusive_add(long x);
-long __ovld __conv work_group_scan_exclusive_min(long x);
-long __ovld __conv work_group_scan_exclusive_max(long x);
-long __ovld __conv work_group_scan_inclusive_add(long x);
-long __ovld __conv work_group_scan_inclusive_min(long x);
-long __ovld __conv work_group_scan_inclusive_max(long x);
-ulong __ovld __conv work_group_reduce_add(ulong x);
-ulong __ovld __conv work_group_reduce_min(ulong x);
-ulong __ovld __conv work_group_reduce_max(ulong x);
-ulong __ovld __conv work_group_scan_exclusive_add(ulong x);
-ulong __ovld __conv work_group_scan_exclusive_min(ulong x);
-ulong __ovld __conv work_group_scan_exclusive_max(ulong x);
-ulong __ovld __conv work_group_scan_inclusive_add(ulong x);
-ulong __ovld __conv work_group_scan_inclusive_min(ulong x);
-ulong __ovld __conv work_group_scan_inclusive_max(ulong x);
-float __ovld __conv work_group_reduce_add(float x);
-float __ovld __conv work_group_reduce_min(float x);
-float __ovld __conv work_group_reduce_max(float x);
-float __ovld __conv work_group_scan_exclusive_add(float x);
-float __ovld __conv work_group_scan_exclusive_min(float x);
-float __ovld __conv work_group_scan_exclusive_max(float x);
-float __ovld __conv work_group_scan_inclusive_add(float x);
-float __ovld __conv work_group_scan_inclusive_min(float x);
-float __ovld __conv work_group_scan_inclusive_max(float x);
+int __ovld __conv work_group_reduce_add(int);
+int __ovld __conv work_group_reduce_min(int);
+int __ovld __conv work_group_reduce_max(int);
+int __ovld __conv work_group_scan_exclusive_add(int);
+int __ovld __conv work_group_scan_exclusive_min(int);
+int __ovld __conv work_group_scan_exclusive_max(int);
+int __ovld __conv work_group_scan_inclusive_add(int);
+int __ovld __conv work_group_scan_inclusive_min(int);
+int __ovld __conv work_group_scan_inclusive_max(int);
+uint __ovld __conv work_group_reduce_add(uint);
+uint __ovld __conv work_group_reduce_min(uint);
+uint __ovld __conv work_group_reduce_max(uint);
+uint __ovld __conv work_group_scan_exclusive_add(uint);
+uint __ovld __conv work_group_scan_exclusive_min(uint);
+uint __ovld __conv work_group_scan_exclusive_max(uint);
+uint __ovld __conv work_group_scan_inclusive_add(uint);
+uint __ovld __conv work_group_scan_inclusive_min(uint);
+uint __ovld __conv work_group_scan_inclusive_max(uint);
+long __ovld __conv work_group_reduce_add(long);
+long __ovld __conv work_group_reduce_min(long);
+long __ovld __conv work_group_reduce_max(long);
+long __ovld __conv work_group_scan_exclusive_add(long);
+long __ovld __conv work_group_scan_exclusive_min(long);
+long __ovld __conv work_group_scan_exclusive_max(long);
+long __ovld __conv work_group_scan_inclusive_add(long);
+long __ovld __conv work_group_scan_inclusive_min(long);
+long __ovld __conv work_group_scan_inclusive_max(long);
+ulong __ovld __conv work_group_reduce_add(ulong);
+ulong __ovld __conv work_group_reduce_min(ulong);
+ulong __ovld __conv work_group_reduce_max(ulong);
+ulong __ovld __conv work_group_scan_exclusive_add(ulong);
+ulong __ovld __conv work_group_scan_exclusive_min(ulong);
+ulong __ovld __conv work_group_scan_exclusive_max(ulong);
+ulong __ovld __conv work_group_scan_inclusive_add(ulong);
+ulong __ovld __conv work_group_scan_inclusive_min(ulong);
+ulong __ovld __conv work_group_scan_inclusive_max(ulong);
+float __ovld __conv work_group_reduce_add(float);
+float __ovld __conv work_group_reduce_min(float);
+float __ovld __conv work_group_reduce_max(float);
+float __ovld __conv work_group_scan_exclusive_add(float);
+float __ovld __conv work_group_scan_exclusive_min(float);
+float __ovld __conv work_group_scan_exclusive_max(float);
+float __ovld __conv work_group_scan_inclusive_add(float);
+float __ovld __conv work_group_scan_inclusive_min(float);
+float __ovld __conv work_group_scan_inclusive_max(float);
#ifdef cl_khr_fp64
-double __ovld __conv work_group_reduce_add(double x);
-double __ovld __conv work_group_reduce_min(double x);
-double __ovld __conv work_group_reduce_max(double x);
-double __ovld __conv work_group_scan_exclusive_add(double x);
-double __ovld __conv work_group_scan_exclusive_min(double x);
-double __ovld __conv work_group_scan_exclusive_max(double x);
-double __ovld __conv work_group_scan_inclusive_add(double x);
-double __ovld __conv work_group_scan_inclusive_min(double x);
-double __ovld __conv work_group_scan_inclusive_max(double x);
+double __ovld __conv work_group_reduce_add(double);
+double __ovld __conv work_group_reduce_min(double);
+double __ovld __conv work_group_reduce_max(double);
+double __ovld __conv work_group_scan_exclusive_add(double);
+double __ovld __conv work_group_scan_exclusive_min(double);
+double __ovld __conv work_group_scan_exclusive_max(double);
+double __ovld __conv work_group_scan_inclusive_add(double);
+double __ovld __conv work_group_scan_inclusive_min(double);
+double __ovld __conv work_group_scan_inclusive_max(double);
#endif //cl_khr_fp64
#endif //defined(__opencl_c_work_group_collective_functions)
@@ -16507,7 +16265,7 @@ void __ovld set_user_event_status(clk_event_t e, int state);
bool __ovld is_valid_event (clk_event_t event);
-void __ovld capture_event_profiling_info(clk_event_t, clk_profiling_info, __global void* value);
+void __ovld capture_event_profiling_info(clk_event_t, clk_profiling_info, __global void*);
queue_t __ovld get_default_queue(void);
#endif //__opencl_c_device_enqueue
@@ -16515,7 +16273,7 @@ queue_t __ovld get_default_queue(void);
// OpenCL Extension v2.0 s9.17 - Sub-groups
-#if defined(cl_intel_subgroups) || defined(cl_khr_subgroups) || defined(__opencl_c_subgroups)
+#if defined(__opencl_subgroup_builtins)
// Shared Sub Group Functions
uint __ovld get_sub_group_size(void);
uint __ovld get_max_sub_group_size(void);
@@ -16528,93 +16286,93 @@ uint __ovld get_sub_group_local_id(void);
void __ovld __conv sub_group_barrier(cl_mem_fence_flags flags);
#if defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
-void __ovld __conv sub_group_barrier(cl_mem_fence_flags flags, memory_scope scope);
+void __ovld __conv sub_group_barrier(cl_mem_fence_flags flags, memory_scope);
#endif //defined(__OPENCL_CPP_VERSION__) || (__OPENCL_C_VERSION__ >= CL_VERSION_2_0)
int __ovld __conv sub_group_all(int predicate);
int __ovld __conv sub_group_any(int predicate);
-int __ovld __conv sub_group_broadcast(int x, uint sub_group_local_id);
-uint __ovld __conv sub_group_broadcast(uint x, uint sub_group_local_id);
-long __ovld __conv sub_group_broadcast(long x, uint sub_group_local_id);
-ulong __ovld __conv sub_group_broadcast(ulong x, uint sub_group_local_id);
-float __ovld __conv sub_group_broadcast(float x, uint sub_group_local_id);
-
-int __ovld __conv sub_group_reduce_add(int x);
-uint __ovld __conv sub_group_reduce_add(uint x);
-long __ovld __conv sub_group_reduce_add(long x);
-ulong __ovld __conv sub_group_reduce_add(ulong x);
-float __ovld __conv sub_group_reduce_add(float x);
-int __ovld __conv sub_group_reduce_min(int x);
-uint __ovld __conv sub_group_reduce_min(uint x);
-long __ovld __conv sub_group_reduce_min(long x);
-ulong __ovld __conv sub_group_reduce_min(ulong x);
-float __ovld __conv sub_group_reduce_min(float x);
-int __ovld __conv sub_group_reduce_max(int x);
-uint __ovld __conv sub_group_reduce_max(uint x);
-long __ovld __conv sub_group_reduce_max(long x);
-ulong __ovld __conv sub_group_reduce_max(ulong x);
-float __ovld __conv sub_group_reduce_max(float x);
-
-int __ovld __conv sub_group_scan_exclusive_add(int x);
-uint __ovld __conv sub_group_scan_exclusive_add(uint x);
-long __ovld __conv sub_group_scan_exclusive_add(long x);
-ulong __ovld __conv sub_group_scan_exclusive_add(ulong x);
-float __ovld __conv sub_group_scan_exclusive_add(float x);
-int __ovld __conv sub_group_scan_exclusive_min(int x);
-uint __ovld __conv sub_group_scan_exclusive_min(uint x);
-long __ovld __conv sub_group_scan_exclusive_min(long x);
-ulong __ovld __conv sub_group_scan_exclusive_min(ulong x);
-float __ovld __conv sub_group_scan_exclusive_min(float x);
-int __ovld __conv sub_group_scan_exclusive_max(int x);
-uint __ovld __conv sub_group_scan_exclusive_max(uint x);
-long __ovld __conv sub_group_scan_exclusive_max(long x);
-ulong __ovld __conv sub_group_scan_exclusive_max(ulong x);
-float __ovld __conv sub_group_scan_exclusive_max(float x);
-
-int __ovld __conv sub_group_scan_inclusive_add(int x);
-uint __ovld __conv sub_group_scan_inclusive_add(uint x);
-long __ovld __conv sub_group_scan_inclusive_add(long x);
-ulong __ovld __conv sub_group_scan_inclusive_add(ulong x);
-float __ovld __conv sub_group_scan_inclusive_add(float x);
-int __ovld __conv sub_group_scan_inclusive_min(int x);
-uint __ovld __conv sub_group_scan_inclusive_min(uint x);
-long __ovld __conv sub_group_scan_inclusive_min(long x);
-ulong __ovld __conv sub_group_scan_inclusive_min(ulong x);
-float __ovld __conv sub_group_scan_inclusive_min(float x);
-int __ovld __conv sub_group_scan_inclusive_max(int x);
-uint __ovld __conv sub_group_scan_inclusive_max(uint x);
-long __ovld __conv sub_group_scan_inclusive_max(long x);
-ulong __ovld __conv sub_group_scan_inclusive_max(ulong x);
-float __ovld __conv sub_group_scan_inclusive_max(float x);
+int __ovld __conv sub_group_broadcast(int , uint sub_group_local_id);
+uint __ovld __conv sub_group_broadcast(uint , uint sub_group_local_id);
+long __ovld __conv sub_group_broadcast(long , uint sub_group_local_id);
+ulong __ovld __conv sub_group_broadcast(ulong, uint sub_group_local_id);
+float __ovld __conv sub_group_broadcast(float, uint sub_group_local_id);
+
+int __ovld __conv sub_group_reduce_add(int );
+uint __ovld __conv sub_group_reduce_add(uint );
+long __ovld __conv sub_group_reduce_add(long );
+ulong __ovld __conv sub_group_reduce_add(ulong);
+float __ovld __conv sub_group_reduce_add(float);
+int __ovld __conv sub_group_reduce_min(int );
+uint __ovld __conv sub_group_reduce_min(uint );
+long __ovld __conv sub_group_reduce_min(long );
+ulong __ovld __conv sub_group_reduce_min(ulong);
+float __ovld __conv sub_group_reduce_min(float);
+int __ovld __conv sub_group_reduce_max(int );
+uint __ovld __conv sub_group_reduce_max(uint );
+long __ovld __conv sub_group_reduce_max(long );
+ulong __ovld __conv sub_group_reduce_max(ulong);
+float __ovld __conv sub_group_reduce_max(float);
+
+int __ovld __conv sub_group_scan_exclusive_add(int );
+uint __ovld __conv sub_group_scan_exclusive_add(uint );
+long __ovld __conv sub_group_scan_exclusive_add(long );
+ulong __ovld __conv sub_group_scan_exclusive_add(ulong);
+float __ovld __conv sub_group_scan_exclusive_add(float);
+int __ovld __conv sub_group_scan_exclusive_min(int );
+uint __ovld __conv sub_group_scan_exclusive_min(uint );
+long __ovld __conv sub_group_scan_exclusive_min(long );
+ulong __ovld __conv sub_group_scan_exclusive_min(ulong);
+float __ovld __conv sub_group_scan_exclusive_min(float);
+int __ovld __conv sub_group_scan_exclusive_max(int );
+uint __ovld __conv sub_group_scan_exclusive_max(uint );
+long __ovld __conv sub_group_scan_exclusive_max(long );
+ulong __ovld __conv sub_group_scan_exclusive_max(ulong);
+float __ovld __conv sub_group_scan_exclusive_max(float);
+
+int __ovld __conv sub_group_scan_inclusive_add(int );
+uint __ovld __conv sub_group_scan_inclusive_add(uint );
+long __ovld __conv sub_group_scan_inclusive_add(long );
+ulong __ovld __conv sub_group_scan_inclusive_add(ulong);
+float __ovld __conv sub_group_scan_inclusive_add(float);
+int __ovld __conv sub_group_scan_inclusive_min(int );
+uint __ovld __conv sub_group_scan_inclusive_min(uint );
+long __ovld __conv sub_group_scan_inclusive_min(long );
+ulong __ovld __conv sub_group_scan_inclusive_min(ulong);
+float __ovld __conv sub_group_scan_inclusive_min(float);
+int __ovld __conv sub_group_scan_inclusive_max(int );
+uint __ovld __conv sub_group_scan_inclusive_max(uint );
+long __ovld __conv sub_group_scan_inclusive_max(long );
+ulong __ovld __conv sub_group_scan_inclusive_max(ulong);
+float __ovld __conv sub_group_scan_inclusive_max(float);
#ifdef cl_khr_fp16
-half __ovld __conv sub_group_broadcast(half x, uint sub_group_local_id);
-half __ovld __conv sub_group_reduce_add(half x);
-half __ovld __conv sub_group_reduce_min(half x);
-half __ovld __conv sub_group_reduce_max(half x);
-half __ovld __conv sub_group_scan_exclusive_add(half x);
-half __ovld __conv sub_group_scan_exclusive_min(half x);
-half __ovld __conv sub_group_scan_exclusive_max(half x);
-half __ovld __conv sub_group_scan_inclusive_add(half x);
-half __ovld __conv sub_group_scan_inclusive_min(half x);
-half __ovld __conv sub_group_scan_inclusive_max(half x);
+half __ovld __conv sub_group_broadcast(half, uint sub_group_local_id);
+half __ovld __conv sub_group_reduce_add(half);
+half __ovld __conv sub_group_reduce_min(half);
+half __ovld __conv sub_group_reduce_max(half);
+half __ovld __conv sub_group_scan_exclusive_add(half);
+half __ovld __conv sub_group_scan_exclusive_min(half);
+half __ovld __conv sub_group_scan_exclusive_max(half);
+half __ovld __conv sub_group_scan_inclusive_add(half);
+half __ovld __conv sub_group_scan_inclusive_min(half);
+half __ovld __conv sub_group_scan_inclusive_max(half);
#endif //cl_khr_fp16
#ifdef cl_khr_fp64
-double __ovld __conv sub_group_broadcast(double x, uint sub_group_local_id);
-double __ovld __conv sub_group_reduce_add(double x);
-double __ovld __conv sub_group_reduce_min(double x);
-double __ovld __conv sub_group_reduce_max(double x);
-double __ovld __conv sub_group_scan_exclusive_add(double x);
-double __ovld __conv sub_group_scan_exclusive_min(double x);
-double __ovld __conv sub_group_scan_exclusive_max(double x);
-double __ovld __conv sub_group_scan_inclusive_add(double x);
-double __ovld __conv sub_group_scan_inclusive_min(double x);
-double __ovld __conv sub_group_scan_inclusive_max(double x);
+double __ovld __conv sub_group_broadcast(double, uint sub_group_local_id);
+double __ovld __conv sub_group_reduce_add(double);
+double __ovld __conv sub_group_reduce_min(double);
+double __ovld __conv sub_group_reduce_max(double);
+double __ovld __conv sub_group_scan_exclusive_add(double);
+double __ovld __conv sub_group_scan_exclusive_min(double);
+double __ovld __conv sub_group_scan_exclusive_max(double);
+double __ovld __conv sub_group_scan_inclusive_add(double);
+double __ovld __conv sub_group_scan_inclusive_min(double);
+double __ovld __conv sub_group_scan_inclusive_max(double);
#endif //cl_khr_fp64
-#endif //cl_khr_subgroups cl_intel_subgroups __opencl_c_subgroups
+#endif // __opencl_subgroup_builtins
#if defined(cl_khr_subgroup_extended_types)
char __ovld __conv sub_group_broadcast( char value, uint index );
@@ -17508,116 +17266,150 @@ int __ovld __cnfn dot_acc_sat_4x8packed_us_int(uint, uint, int);
int __ovld __cnfn dot_acc_sat_4x8packed_su_int(uint, uint, int);
#endif // __opencl_c_integer_dot_product_input_4x8bit_packed
+#if defined(cl_khr_subgroup_rotate)
+char __ovld __conv sub_group_rotate(char, int);
+uchar __ovld __conv sub_group_rotate(uchar, int);
+short __ovld __conv sub_group_rotate(short, int);
+ushort __ovld __conv sub_group_rotate(ushort, int);
+int __ovld __conv sub_group_rotate(int, int);
+uint __ovld __conv sub_group_rotate(uint, int);
+long __ovld __conv sub_group_rotate(long, int);
+ulong __ovld __conv sub_group_rotate(ulong, int);
+float __ovld __conv sub_group_rotate(float, int);
+#if defined(cl_khr_fp64)
+double __ovld __conv sub_group_rotate(double, int);
+#endif // cl_khr_fp64
+#if defined(cl_khr_fp16)
+half __ovld __conv sub_group_rotate(half, int);
+#endif // cl_khr_fp16
+
+char __ovld __conv sub_group_clustered_rotate(char, int, uint);
+uchar __ovld __conv sub_group_clustered_rotate(uchar, int, uint);
+short __ovld __conv sub_group_clustered_rotate(short, int, uint);
+ushort __ovld __conv sub_group_clustered_rotate(ushort, int, uint);
+int __ovld __conv sub_group_clustered_rotate(int, int, uint);
+uint __ovld __conv sub_group_clustered_rotate(uint, int, uint);
+long __ovld __conv sub_group_clustered_rotate(long, int, uint);
+ulong __ovld __conv sub_group_clustered_rotate(ulong, int, uint);
+float __ovld __conv sub_group_clustered_rotate(float, int, uint);
+#if defined(cl_khr_fp64)
+double __ovld __conv sub_group_clustered_rotate(double, int, uint);
+#endif // cl_khr_fp64
+#if defined(cl_khr_fp16)
+half __ovld __conv sub_group_clustered_rotate(half, int, uint);
+#endif // cl_khr_fp16
+#endif // cl_khr_subgroup_rotate
+
#if defined(cl_intel_subgroups)
// Intel-Specific Sub Group Functions
-float __ovld __conv intel_sub_group_shuffle( float x, uint c );
-float2 __ovld __conv intel_sub_group_shuffle( float2 x, uint c );
-float3 __ovld __conv intel_sub_group_shuffle( float3 x, uint c );
-float4 __ovld __conv intel_sub_group_shuffle( float4 x, uint c );
-float8 __ovld __conv intel_sub_group_shuffle( float8 x, uint c );
-float16 __ovld __conv intel_sub_group_shuffle( float16 x, uint c );
-
-int __ovld __conv intel_sub_group_shuffle( int x, uint c );
-int2 __ovld __conv intel_sub_group_shuffle( int2 x, uint c );
-int3 __ovld __conv intel_sub_group_shuffle( int3 x, uint c );
-int4 __ovld __conv intel_sub_group_shuffle( int4 x, uint c );
-int8 __ovld __conv intel_sub_group_shuffle( int8 x, uint c );
-int16 __ovld __conv intel_sub_group_shuffle( int16 x, uint c );
-
-uint __ovld __conv intel_sub_group_shuffle( uint x, uint c );
-uint2 __ovld __conv intel_sub_group_shuffle( uint2 x, uint c );
-uint3 __ovld __conv intel_sub_group_shuffle( uint3 x, uint c );
-uint4 __ovld __conv intel_sub_group_shuffle( uint4 x, uint c );
-uint8 __ovld __conv intel_sub_group_shuffle( uint8 x, uint c );
-uint16 __ovld __conv intel_sub_group_shuffle( uint16 x, uint c );
-
-long __ovld __conv intel_sub_group_shuffle( long x, uint c );
-ulong __ovld __conv intel_sub_group_shuffle( ulong x, uint c );
-
-float __ovld __conv intel_sub_group_shuffle_down( float cur, float next, uint c );
-float2 __ovld __conv intel_sub_group_shuffle_down( float2 cur, float2 next, uint c );
-float3 __ovld __conv intel_sub_group_shuffle_down( float3 cur, float3 next, uint c );
-float4 __ovld __conv intel_sub_group_shuffle_down( float4 cur, float4 next, uint c );
-float8 __ovld __conv intel_sub_group_shuffle_down( float8 cur, float8 next, uint c );
-float16 __ovld __conv intel_sub_group_shuffle_down( float16 cur, float16 next, uint c );
-
-int __ovld __conv intel_sub_group_shuffle_down( int cur, int next, uint c );
-int2 __ovld __conv intel_sub_group_shuffle_down( int2 cur, int2 next, uint c );
-int3 __ovld __conv intel_sub_group_shuffle_down( int3 cur, int3 next, uint c );
-int4 __ovld __conv intel_sub_group_shuffle_down( int4 cur, int4 next, uint c );
-int8 __ovld __conv intel_sub_group_shuffle_down( int8 cur, int8 next, uint c );
-int16 __ovld __conv intel_sub_group_shuffle_down( int16 cur, int16 next, uint c );
-
-uint __ovld __conv intel_sub_group_shuffle_down( uint cur, uint next, uint c );
-uint2 __ovld __conv intel_sub_group_shuffle_down( uint2 cur, uint2 next, uint c );
-uint3 __ovld __conv intel_sub_group_shuffle_down( uint3 cur, uint3 next, uint c );
-uint4 __ovld __conv intel_sub_group_shuffle_down( uint4 cur, uint4 next, uint c );
-uint8 __ovld __conv intel_sub_group_shuffle_down( uint8 cur, uint8 next, uint c );
-uint16 __ovld __conv intel_sub_group_shuffle_down( uint16 cur, uint16 next, uint c );
-
-long __ovld __conv intel_sub_group_shuffle_down( long prev, long cur, uint c );
-ulong __ovld __conv intel_sub_group_shuffle_down( ulong prev, ulong cur, uint c );
-
-float __ovld __conv intel_sub_group_shuffle_up( float prev, float cur, uint c );
-float2 __ovld __conv intel_sub_group_shuffle_up( float2 prev, float2 cur, uint c );
-float3 __ovld __conv intel_sub_group_shuffle_up( float3 prev, float3 cur, uint c );
-float4 __ovld __conv intel_sub_group_shuffle_up( float4 prev, float4 cur, uint c );
-float8 __ovld __conv intel_sub_group_shuffle_up( float8 prev, float8 cur, uint c );
-float16 __ovld __conv intel_sub_group_shuffle_up( float16 prev, float16 cur, uint c );
-
-int __ovld __conv intel_sub_group_shuffle_up( int prev, int cur, uint c );
-int2 __ovld __conv intel_sub_group_shuffle_up( int2 prev, int2 cur, uint c );
-int3 __ovld __conv intel_sub_group_shuffle_up( int3 prev, int3 cur, uint c );
-int4 __ovld __conv intel_sub_group_shuffle_up( int4 prev, int4 cur, uint c );
-int8 __ovld __conv intel_sub_group_shuffle_up( int8 prev, int8 cur, uint c );
-int16 __ovld __conv intel_sub_group_shuffle_up( int16 prev, int16 cur, uint c );
-
-uint __ovld __conv intel_sub_group_shuffle_up( uint prev, uint cur, uint c );
-uint2 __ovld __conv intel_sub_group_shuffle_up( uint2 prev, uint2 cur, uint c );
-uint3 __ovld __conv intel_sub_group_shuffle_up( uint3 prev, uint3 cur, uint c );
-uint4 __ovld __conv intel_sub_group_shuffle_up( uint4 prev, uint4 cur, uint c );
-uint8 __ovld __conv intel_sub_group_shuffle_up( uint8 prev, uint8 cur, uint c );
-uint16 __ovld __conv intel_sub_group_shuffle_up( uint16 prev, uint16 cur, uint c );
-
-long __ovld __conv intel_sub_group_shuffle_up( long prev, long cur, uint c );
-ulong __ovld __conv intel_sub_group_shuffle_up( ulong prev, ulong cur, uint c );
-
-float __ovld __conv intel_sub_group_shuffle_xor( float x, uint c );
-float2 __ovld __conv intel_sub_group_shuffle_xor( float2 x, uint c );
-float3 __ovld __conv intel_sub_group_shuffle_xor( float3 x, uint c );
-float4 __ovld __conv intel_sub_group_shuffle_xor( float4 x, uint c );
-float8 __ovld __conv intel_sub_group_shuffle_xor( float8 x, uint c );
-float16 __ovld __conv intel_sub_group_shuffle_xor( float16 x, uint c );
-
-int __ovld __conv intel_sub_group_shuffle_xor( int x, uint c );
-int2 __ovld __conv intel_sub_group_shuffle_xor( int2 x, uint c );
-int3 __ovld __conv intel_sub_group_shuffle_xor( int3 x, uint c );
-int4 __ovld __conv intel_sub_group_shuffle_xor( int4 x, uint c );
-int8 __ovld __conv intel_sub_group_shuffle_xor( int8 x, uint c );
-int16 __ovld __conv intel_sub_group_shuffle_xor( int16 x, uint c );
-
-uint __ovld __conv intel_sub_group_shuffle_xor( uint x, uint c );
-uint2 __ovld __conv intel_sub_group_shuffle_xor( uint2 x, uint c );
-uint3 __ovld __conv intel_sub_group_shuffle_xor( uint3 x, uint c );
-uint4 __ovld __conv intel_sub_group_shuffle_xor( uint4 x, uint c );
-uint8 __ovld __conv intel_sub_group_shuffle_xor( uint8 x, uint c );
-uint16 __ovld __conv intel_sub_group_shuffle_xor( uint16 x, uint c );
-
-long __ovld __conv intel_sub_group_shuffle_xor( long x, uint c );
-ulong __ovld __conv intel_sub_group_shuffle_xor( ulong x, uint c );
+float __ovld __conv intel_sub_group_shuffle( float , uint );
+float2 __ovld __conv intel_sub_group_shuffle( float2, uint );
+float3 __ovld __conv intel_sub_group_shuffle( float3, uint );
+float4 __ovld __conv intel_sub_group_shuffle( float4, uint );
+float8 __ovld __conv intel_sub_group_shuffle( float8, uint );
+float16 __ovld __conv intel_sub_group_shuffle( float16, uint );
+
+int __ovld __conv intel_sub_group_shuffle( int , uint );
+int2 __ovld __conv intel_sub_group_shuffle( int2, uint );
+int3 __ovld __conv intel_sub_group_shuffle( int3, uint );
+int4 __ovld __conv intel_sub_group_shuffle( int4, uint );
+int8 __ovld __conv intel_sub_group_shuffle( int8, uint );
+int16 __ovld __conv intel_sub_group_shuffle( int16, uint );
+
+uint __ovld __conv intel_sub_group_shuffle( uint , uint );
+uint2 __ovld __conv intel_sub_group_shuffle( uint2, uint );
+uint3 __ovld __conv intel_sub_group_shuffle( uint3, uint );
+uint4 __ovld __conv intel_sub_group_shuffle( uint4, uint );
+uint8 __ovld __conv intel_sub_group_shuffle( uint8, uint );
+uint16 __ovld __conv intel_sub_group_shuffle( uint16, uint );
+
+long __ovld __conv intel_sub_group_shuffle( long, uint );
+ulong __ovld __conv intel_sub_group_shuffle( ulong, uint );
+
+float __ovld __conv intel_sub_group_shuffle_down( float cur, float next, uint );
+float2 __ovld __conv intel_sub_group_shuffle_down( float2 cur, float2 next, uint );
+float3 __ovld __conv intel_sub_group_shuffle_down( float3 cur, float3 next, uint );
+float4 __ovld __conv intel_sub_group_shuffle_down( float4 cur, float4 next, uint );
+float8 __ovld __conv intel_sub_group_shuffle_down( float8 cur, float8 next, uint );
+float16 __ovld __conv intel_sub_group_shuffle_down( float16 cur, float16 next, uint );
+
+int __ovld __conv intel_sub_group_shuffle_down( int cur, int next, uint );
+int2 __ovld __conv intel_sub_group_shuffle_down( int2 cur, int2 next, uint );
+int3 __ovld __conv intel_sub_group_shuffle_down( int3 cur, int3 next, uint );
+int4 __ovld __conv intel_sub_group_shuffle_down( int4 cur, int4 next, uint );
+int8 __ovld __conv intel_sub_group_shuffle_down( int8 cur, int8 next, uint );
+int16 __ovld __conv intel_sub_group_shuffle_down( int16 cur, int16 next, uint );
+
+uint __ovld __conv intel_sub_group_shuffle_down( uint cur, uint next, uint );
+uint2 __ovld __conv intel_sub_group_shuffle_down( uint2 cur, uint2 next, uint );
+uint3 __ovld __conv intel_sub_group_shuffle_down( uint3 cur, uint3 next, uint );
+uint4 __ovld __conv intel_sub_group_shuffle_down( uint4 cur, uint4 next, uint );
+uint8 __ovld __conv intel_sub_group_shuffle_down( uint8 cur, uint8 next, uint );
+uint16 __ovld __conv intel_sub_group_shuffle_down( uint16 cur, uint16 next, uint );
+
+long __ovld __conv intel_sub_group_shuffle_down( long prev, long cur, uint );
+ulong __ovld __conv intel_sub_group_shuffle_down( ulong prev, ulong cur, uint );
+
+float __ovld __conv intel_sub_group_shuffle_up( float prev, float cur, uint );
+float2 __ovld __conv intel_sub_group_shuffle_up( float2 prev, float2 cur, uint );
+float3 __ovld __conv intel_sub_group_shuffle_up( float3 prev, float3 cur, uint );
+float4 __ovld __conv intel_sub_group_shuffle_up( float4 prev, float4 cur, uint );
+float8 __ovld __conv intel_sub_group_shuffle_up( float8 prev, float8 cur, uint );
+float16 __ovld __conv intel_sub_group_shuffle_up( float16 prev, float16 cur, uint );
+
+int __ovld __conv intel_sub_group_shuffle_up( int prev, int cur, uint );
+int2 __ovld __conv intel_sub_group_shuffle_up( int2 prev, int2 cur, uint );
+int3 __ovld __conv intel_sub_group_shuffle_up( int3 prev, int3 cur, uint );
+int4 __ovld __conv intel_sub_group_shuffle_up( int4 prev, int4 cur, uint );
+int8 __ovld __conv intel_sub_group_shuffle_up( int8 prev, int8 cur, uint );
+int16 __ovld __conv intel_sub_group_shuffle_up( int16 prev, int16 cur, uint );
+
+uint __ovld __conv intel_sub_group_shuffle_up( uint prev, uint cur, uint );
+uint2 __ovld __conv intel_sub_group_shuffle_up( uint2 prev, uint2 cur, uint );
+uint3 __ovld __conv intel_sub_group_shuffle_up( uint3 prev, uint3 cur, uint );
+uint4 __ovld __conv intel_sub_group_shuffle_up( uint4 prev, uint4 cur, uint );
+uint8 __ovld __conv intel_sub_group_shuffle_up( uint8 prev, uint8 cur, uint );
+uint16 __ovld __conv intel_sub_group_shuffle_up( uint16 prev, uint16 cur, uint );
+
+long __ovld __conv intel_sub_group_shuffle_up( long prev, long cur, uint );
+ulong __ovld __conv intel_sub_group_shuffle_up( ulong prev, ulong cur, uint );
+
+float __ovld __conv intel_sub_group_shuffle_xor( float , uint );
+float2 __ovld __conv intel_sub_group_shuffle_xor( float2, uint );
+float3 __ovld __conv intel_sub_group_shuffle_xor( float3, uint );
+float4 __ovld __conv intel_sub_group_shuffle_xor( float4, uint );
+float8 __ovld __conv intel_sub_group_shuffle_xor( float8, uint );
+float16 __ovld __conv intel_sub_group_shuffle_xor( float16, uint );
+
+int __ovld __conv intel_sub_group_shuffle_xor( int , uint );
+int2 __ovld __conv intel_sub_group_shuffle_xor( int2, uint );
+int3 __ovld __conv intel_sub_group_shuffle_xor( int3, uint );
+int4 __ovld __conv intel_sub_group_shuffle_xor( int4, uint );
+int8 __ovld __conv intel_sub_group_shuffle_xor( int8, uint );
+int16 __ovld __conv intel_sub_group_shuffle_xor( int16, uint );
+
+uint __ovld __conv intel_sub_group_shuffle_xor( uint , uint );
+uint2 __ovld __conv intel_sub_group_shuffle_xor( uint2, uint );
+uint3 __ovld __conv intel_sub_group_shuffle_xor( uint3, uint );
+uint4 __ovld __conv intel_sub_group_shuffle_xor( uint4, uint );
+uint8 __ovld __conv intel_sub_group_shuffle_xor( uint8, uint );
+uint16 __ovld __conv intel_sub_group_shuffle_xor( uint16, uint );
+
+long __ovld __conv intel_sub_group_shuffle_xor( long, uint );
+ulong __ovld __conv intel_sub_group_shuffle_xor( ulong, uint );
#if defined(__opencl_c_images)
-uint __ovld __conv intel_sub_group_block_read( read_only image2d_t image, int2 coord );
-uint2 __ovld __conv intel_sub_group_block_read2( read_only image2d_t image, int2 coord );
-uint4 __ovld __conv intel_sub_group_block_read4( read_only image2d_t image, int2 coord );
-uint8 __ovld __conv intel_sub_group_block_read8( read_only image2d_t image, int2 coord );
+uint __ovld __conv intel_sub_group_block_read(read_only image2d_t, int2);
+uint2 __ovld __conv intel_sub_group_block_read2(read_only image2d_t, int2);
+uint4 __ovld __conv intel_sub_group_block_read4(read_only image2d_t, int2);
+uint8 __ovld __conv intel_sub_group_block_read8(read_only image2d_t, int2);
#endif
#if defined(__opencl_c_read_write_images)
-uint __ovld __conv intel_sub_group_block_read(read_write image2d_t image, int2 coord);
-uint2 __ovld __conv intel_sub_group_block_read2(read_write image2d_t image, int2 coord);
-uint4 __ovld __conv intel_sub_group_block_read4(read_write image2d_t image, int2 coord);
-uint8 __ovld __conv intel_sub_group_block_read8(read_write image2d_t image, int2 coord);
+uint __ovld __conv intel_sub_group_block_read(read_write image2d_t, int2);
+uint2 __ovld __conv intel_sub_group_block_read2(read_write image2d_t, int2);
+uint4 __ovld __conv intel_sub_group_block_read4(read_write image2d_t, int2);
+uint8 __ovld __conv intel_sub_group_block_read8(read_write image2d_t, int2);
#endif // defined(__opencl_c_read_write_images)
uint __ovld __conv intel_sub_group_block_read( const __global uint* p );
@@ -17626,17 +17418,17 @@ uint4 __ovld __conv intel_sub_group_block_read4( const __global uint* p );
uint8 __ovld __conv intel_sub_group_block_read8( const __global uint* p );
#if defined(__opencl_c_images)
-void __ovld __conv intel_sub_group_block_write(write_only image2d_t image, int2 coord, uint data);
-void __ovld __conv intel_sub_group_block_write2(write_only image2d_t image, int2 coord, uint2 data);
-void __ovld __conv intel_sub_group_block_write4(write_only image2d_t image, int2 coord, uint4 data);
-void __ovld __conv intel_sub_group_block_write8(write_only image2d_t image, int2 coord, uint8 data);
+void __ovld __conv intel_sub_group_block_write(write_only image2d_t, int2, uint);
+void __ovld __conv intel_sub_group_block_write2(write_only image2d_t, int2, uint2);
+void __ovld __conv intel_sub_group_block_write4(write_only image2d_t, int2, uint4);
+void __ovld __conv intel_sub_group_block_write8(write_only image2d_t, int2, uint8);
#endif // defined(__opencl_c_images)
#if defined(__opencl_c_read_write_images)
-void __ovld __conv intel_sub_group_block_write(read_write image2d_t image, int2 coord, uint data);
-void __ovld __conv intel_sub_group_block_write2(read_write image2d_t image, int2 coord, uint2 data);
-void __ovld __conv intel_sub_group_block_write4(read_write image2d_t image, int2 coord, uint4 data);
-void __ovld __conv intel_sub_group_block_write8(read_write image2d_t image, int2 coord, uint8 data);
+void __ovld __conv intel_sub_group_block_write(read_write image2d_t, int2, uint);
+void __ovld __conv intel_sub_group_block_write2(read_write image2d_t, int2, uint2);
+void __ovld __conv intel_sub_group_block_write4(read_write image2d_t, int2, uint4);
+void __ovld __conv intel_sub_group_block_write8(read_write image2d_t, int2, uint8);
#endif // defined(__opencl_c_read_write_images)
void __ovld __conv intel_sub_group_block_write( __global uint* p, uint data );
@@ -17645,89 +17437,89 @@ void __ovld __conv intel_sub_group_block_write4( __global uint* p, uint4 data
void __ovld __conv intel_sub_group_block_write8( __global uint* p, uint8 data );
#ifdef cl_khr_fp16
-half __ovld __conv intel_sub_group_shuffle( half x, uint c );
-half __ovld __conv intel_sub_group_shuffle_down( half prev, half cur, uint c );
-half __ovld __conv intel_sub_group_shuffle_up( half prev, half cur, uint c );
-half __ovld __conv intel_sub_group_shuffle_xor( half x, uint c );
+half __ovld __conv intel_sub_group_shuffle( half, uint );
+half __ovld __conv intel_sub_group_shuffle_down( half prev, half cur, uint );
+half __ovld __conv intel_sub_group_shuffle_up( half prev, half cur, uint );
+half __ovld __conv intel_sub_group_shuffle_xor( half, uint );
#endif
#if defined(cl_khr_fp64)
-double __ovld __conv intel_sub_group_shuffle( double x, uint c );
-double __ovld __conv intel_sub_group_shuffle_down( double prev, double cur, uint c );
-double __ovld __conv intel_sub_group_shuffle_up( double prev, double cur, uint c );
-double __ovld __conv intel_sub_group_shuffle_xor( double x, uint c );
+double __ovld __conv intel_sub_group_shuffle( double, uint );
+double __ovld __conv intel_sub_group_shuffle_down( double prev, double cur, uint );
+double __ovld __conv intel_sub_group_shuffle_up( double prev, double cur, uint );
+double __ovld __conv intel_sub_group_shuffle_xor( double, uint );
#endif
#endif //cl_intel_subgroups
#if defined(cl_intel_subgroups_short)
-short __ovld __conv intel_sub_group_broadcast( short x, uint sub_group_local_id );
-short2 __ovld __conv intel_sub_group_broadcast( short2 x, uint sub_group_local_id );
-short3 __ovld __conv intel_sub_group_broadcast( short3 x, uint sub_group_local_id );
-short4 __ovld __conv intel_sub_group_broadcast( short4 x, uint sub_group_local_id );
-short8 __ovld __conv intel_sub_group_broadcast( short8 x, uint sub_group_local_id );
-
-ushort __ovld __conv intel_sub_group_broadcast( ushort x, uint sub_group_local_id );
-ushort2 __ovld __conv intel_sub_group_broadcast( ushort2 x, uint sub_group_local_id );
-ushort3 __ovld __conv intel_sub_group_broadcast( ushort3 x, uint sub_group_local_id );
-ushort4 __ovld __conv intel_sub_group_broadcast( ushort4 x, uint sub_group_local_id );
-ushort8 __ovld __conv intel_sub_group_broadcast( ushort8 x, uint sub_group_local_id );
-
-short __ovld __conv intel_sub_group_shuffle( short x, uint c );
-short2 __ovld __conv intel_sub_group_shuffle( short2 x, uint c );
-short3 __ovld __conv intel_sub_group_shuffle( short3 x, uint c );
-short4 __ovld __conv intel_sub_group_shuffle( short4 x, uint c );
-short8 __ovld __conv intel_sub_group_shuffle( short8 x, uint c );
-short16 __ovld __conv intel_sub_group_shuffle( short16 x, uint c);
-
-ushort __ovld __conv intel_sub_group_shuffle( ushort x, uint c );
-ushort2 __ovld __conv intel_sub_group_shuffle( ushort2 x, uint c );
-ushort3 __ovld __conv intel_sub_group_shuffle( ushort3 x, uint c );
-ushort4 __ovld __conv intel_sub_group_shuffle( ushort4 x, uint c );
-ushort8 __ovld __conv intel_sub_group_shuffle( ushort8 x, uint c );
-ushort16 __ovld __conv intel_sub_group_shuffle( ushort16 x, uint c );
-
-short __ovld __conv intel_sub_group_shuffle_down( short cur, short next, uint c );
-short2 __ovld __conv intel_sub_group_shuffle_down( short2 cur, short2 next, uint c );
-short3 __ovld __conv intel_sub_group_shuffle_down( short3 cur, short3 next, uint c );
-short4 __ovld __conv intel_sub_group_shuffle_down( short4 cur, short4 next, uint c );
-short8 __ovld __conv intel_sub_group_shuffle_down( short8 cur, short8 next, uint c );
-short16 __ovld __conv intel_sub_group_shuffle_down( short16 cur, short16 next, uint c );
-
-ushort __ovld __conv intel_sub_group_shuffle_down( ushort cur, ushort next, uint c );
-ushort2 __ovld __conv intel_sub_group_shuffle_down( ushort2 cur, ushort2 next, uint c );
-ushort3 __ovld __conv intel_sub_group_shuffle_down( ushort3 cur, ushort3 next, uint c );
-ushort4 __ovld __conv intel_sub_group_shuffle_down( ushort4 cur, ushort4 next, uint c );
-ushort8 __ovld __conv intel_sub_group_shuffle_down( ushort8 cur, ushort8 next, uint c );
-ushort16 __ovld __conv intel_sub_group_shuffle_down( ushort16 cur, ushort16 next, uint c );
-
-short __ovld __conv intel_sub_group_shuffle_up( short cur, short next, uint c );
-short2 __ovld __conv intel_sub_group_shuffle_up( short2 cur, short2 next, uint c );
-short3 __ovld __conv intel_sub_group_shuffle_up( short3 cur, short3 next, uint c );
-short4 __ovld __conv intel_sub_group_shuffle_up( short4 cur, short4 next, uint c );
-short8 __ovld __conv intel_sub_group_shuffle_up( short8 cur, short8 next, uint c );
-short16 __ovld __conv intel_sub_group_shuffle_up( short16 cur, short16 next, uint c );
-
-ushort __ovld __conv intel_sub_group_shuffle_up( ushort cur, ushort next, uint c );
-ushort2 __ovld __conv intel_sub_group_shuffle_up( ushort2 cur, ushort2 next, uint c );
-ushort3 __ovld __conv intel_sub_group_shuffle_up( ushort3 cur, ushort3 next, uint c );
-ushort4 __ovld __conv intel_sub_group_shuffle_up( ushort4 cur, ushort4 next, uint c );
-ushort8 __ovld __conv intel_sub_group_shuffle_up( ushort8 cur, ushort8 next, uint c );
-ushort16 __ovld __conv intel_sub_group_shuffle_up( ushort16 cur, ushort16 next, uint c );
-
-short __ovld __conv intel_sub_group_shuffle_xor( short x, uint c );
-short2 __ovld __conv intel_sub_group_shuffle_xor( short2 x, uint c );
-short3 __ovld __conv intel_sub_group_shuffle_xor( short3 x, uint c );
-short4 __ovld __conv intel_sub_group_shuffle_xor( short4 x, uint c );
-short8 __ovld __conv intel_sub_group_shuffle_xor( short8 x, uint c );
-short16 __ovld __conv intel_sub_group_shuffle_xor( short16 x, uint c );
-
-ushort __ovld __conv intel_sub_group_shuffle_xor( ushort x, uint c );
-ushort2 __ovld __conv intel_sub_group_shuffle_xor( ushort2 x, uint c );
-ushort3 __ovld __conv intel_sub_group_shuffle_xor( ushort3 x, uint c );
-ushort4 __ovld __conv intel_sub_group_shuffle_xor( ushort4 x, uint c );
-ushort8 __ovld __conv intel_sub_group_shuffle_xor( ushort8 x, uint c );
-ushort16 __ovld __conv intel_sub_group_shuffle_xor( ushort16 x, uint c );
+short __ovld __conv intel_sub_group_broadcast( short , uint sub_group_local_id );
+short2 __ovld __conv intel_sub_group_broadcast( short2, uint sub_group_local_id );
+short3 __ovld __conv intel_sub_group_broadcast( short3, uint sub_group_local_id );
+short4 __ovld __conv intel_sub_group_broadcast( short4, uint sub_group_local_id );
+short8 __ovld __conv intel_sub_group_broadcast( short8, uint sub_group_local_id );
+
+ushort __ovld __conv intel_sub_group_broadcast( ushort , uint sub_group_local_id );
+ushort2 __ovld __conv intel_sub_group_broadcast( ushort2, uint sub_group_local_id );
+ushort3 __ovld __conv intel_sub_group_broadcast( ushort3, uint sub_group_local_id );
+ushort4 __ovld __conv intel_sub_group_broadcast( ushort4, uint sub_group_local_id );
+ushort8 __ovld __conv intel_sub_group_broadcast( ushort8, uint sub_group_local_id );
+
+short __ovld __conv intel_sub_group_shuffle( short , uint );
+short2 __ovld __conv intel_sub_group_shuffle( short2 , uint );
+short3 __ovld __conv intel_sub_group_shuffle( short3 , uint );
+short4 __ovld __conv intel_sub_group_shuffle( short4 , uint );
+short8 __ovld __conv intel_sub_group_shuffle( short8 , uint );
+short16 __ovld __conv intel_sub_group_shuffle( short16, uint);
+
+ushort __ovld __conv intel_sub_group_shuffle( ushort , uint );
+ushort2 __ovld __conv intel_sub_group_shuffle( ushort2 , uint );
+ushort3 __ovld __conv intel_sub_group_shuffle( ushort3 , uint );
+ushort4 __ovld __conv intel_sub_group_shuffle( ushort4 , uint );
+ushort8 __ovld __conv intel_sub_group_shuffle( ushort8 , uint );
+ushort16 __ovld __conv intel_sub_group_shuffle( ushort16, uint );
+
+short __ovld __conv intel_sub_group_shuffle_down( short cur, short next, uint );
+short2 __ovld __conv intel_sub_group_shuffle_down( short2 cur, short2 next, uint );
+short3 __ovld __conv intel_sub_group_shuffle_down( short3 cur, short3 next, uint );
+short4 __ovld __conv intel_sub_group_shuffle_down( short4 cur, short4 next, uint );
+short8 __ovld __conv intel_sub_group_shuffle_down( short8 cur, short8 next, uint );
+short16 __ovld __conv intel_sub_group_shuffle_down( short16 cur, short16 next, uint );
+
+ushort __ovld __conv intel_sub_group_shuffle_down( ushort cur, ushort next, uint );
+ushort2 __ovld __conv intel_sub_group_shuffle_down( ushort2 cur, ushort2 next, uint );
+ushort3 __ovld __conv intel_sub_group_shuffle_down( ushort3 cur, ushort3 next, uint );
+ushort4 __ovld __conv intel_sub_group_shuffle_down( ushort4 cur, ushort4 next, uint );
+ushort8 __ovld __conv intel_sub_group_shuffle_down( ushort8 cur, ushort8 next, uint );
+ushort16 __ovld __conv intel_sub_group_shuffle_down( ushort16 cur, ushort16 next, uint );
+
+short __ovld __conv intel_sub_group_shuffle_up( short cur, short next, uint );
+short2 __ovld __conv intel_sub_group_shuffle_up( short2 cur, short2 next, uint );
+short3 __ovld __conv intel_sub_group_shuffle_up( short3 cur, short3 next, uint );
+short4 __ovld __conv intel_sub_group_shuffle_up( short4 cur, short4 next, uint );
+short8 __ovld __conv intel_sub_group_shuffle_up( short8 cur, short8 next, uint );
+short16 __ovld __conv intel_sub_group_shuffle_up( short16 cur, short16 next, uint );
+
+ushort __ovld __conv intel_sub_group_shuffle_up( ushort cur, ushort next, uint );
+ushort2 __ovld __conv intel_sub_group_shuffle_up( ushort2 cur, ushort2 next, uint );
+ushort3 __ovld __conv intel_sub_group_shuffle_up( ushort3 cur, ushort3 next, uint );
+ushort4 __ovld __conv intel_sub_group_shuffle_up( ushort4 cur, ushort4 next, uint );
+ushort8 __ovld __conv intel_sub_group_shuffle_up( ushort8 cur, ushort8 next, uint );
+ushort16 __ovld __conv intel_sub_group_shuffle_up( ushort16 cur, ushort16 next, uint );
+
+short __ovld __conv intel_sub_group_shuffle_xor( short , uint );
+short2 __ovld __conv intel_sub_group_shuffle_xor( short2 , uint );
+short3 __ovld __conv intel_sub_group_shuffle_xor( short3 , uint );
+short4 __ovld __conv intel_sub_group_shuffle_xor( short4 , uint );
+short8 __ovld __conv intel_sub_group_shuffle_xor( short8 , uint );
+short16 __ovld __conv intel_sub_group_shuffle_xor( short16, uint );
+
+ushort __ovld __conv intel_sub_group_shuffle_xor( ushort , uint );
+ushort2 __ovld __conv intel_sub_group_shuffle_xor( ushort2 , uint );
+ushort3 __ovld __conv intel_sub_group_shuffle_xor( ushort3 , uint );
+ushort4 __ovld __conv intel_sub_group_shuffle_xor( ushort4 , uint );
+ushort8 __ovld __conv intel_sub_group_shuffle_xor( ushort8 , uint );
+ushort16 __ovld __conv intel_sub_group_shuffle_xor( ushort16, uint );
short __ovld __conv intel_sub_group_reduce_add( short x );
ushort __ovld __conv intel_sub_group_reduce_add( ushort x );
@@ -17751,17 +17543,17 @@ short __ovld __conv intel_sub_group_scan_inclusive_max( short x );
ushort __ovld __conv intel_sub_group_scan_inclusive_max( ushort x );
#if defined(__opencl_c_images)
-uint __ovld __conv intel_sub_group_block_read_ui( read_only image2d_t image, int2 byte_coord );
-uint2 __ovld __conv intel_sub_group_block_read_ui2( read_only image2d_t image, int2 byte_coord );
-uint4 __ovld __conv intel_sub_group_block_read_ui4( read_only image2d_t image, int2 byte_coord );
-uint8 __ovld __conv intel_sub_group_block_read_ui8( read_only image2d_t image, int2 byte_coord );
+uint __ovld __conv intel_sub_group_block_read_ui(read_only image2d_t, int2);
+uint2 __ovld __conv intel_sub_group_block_read_ui2(read_only image2d_t, int2);
+uint4 __ovld __conv intel_sub_group_block_read_ui4(read_only image2d_t, int2);
+uint8 __ovld __conv intel_sub_group_block_read_ui8(read_only image2d_t, int2);
#endif // defined(__opencl_c_images)
#if defined(__opencl_c_read_write_images)
-uint __ovld __conv intel_sub_group_block_read_ui( read_write image2d_t image, int2 byte_coord );
-uint2 __ovld __conv intel_sub_group_block_read_ui2( read_write image2d_t image, int2 byte_coord );
-uint4 __ovld __conv intel_sub_group_block_read_ui4( read_write image2d_t image, int2 byte_coord );
-uint8 __ovld __conv intel_sub_group_block_read_ui8( read_write image2d_t image, int2 byte_coord );
+uint __ovld __conv intel_sub_group_block_read_ui(read_write image2d_t, int2);
+uint2 __ovld __conv intel_sub_group_block_read_ui2(read_write image2d_t, int2);
+uint4 __ovld __conv intel_sub_group_block_read_ui4(read_write image2d_t, int2);
+uint8 __ovld __conv intel_sub_group_block_read_ui8(read_write image2d_t, int2);
#endif // defined(__opencl_c_read_write_images)
uint __ovld __conv intel_sub_group_block_read_ui( const __global uint* p );
@@ -17770,17 +17562,17 @@ uint4 __ovld __conv intel_sub_group_block_read_ui4( const __global uint* p
uint8 __ovld __conv intel_sub_group_block_read_ui8( const __global uint* p );
#if defined(__opencl_c_images)
-void __ovld __conv intel_sub_group_block_write_ui( read_only image2d_t image, int2 byte_coord, uint data );
-void __ovld __conv intel_sub_group_block_write_ui2( read_only image2d_t image, int2 byte_coord, uint2 data );
-void __ovld __conv intel_sub_group_block_write_ui4( read_only image2d_t image, int2 byte_coord, uint4 data );
-void __ovld __conv intel_sub_group_block_write_ui8( read_only image2d_t image, int2 byte_coord, uint8 data );
+void __ovld __conv intel_sub_group_block_write_ui(read_only image2d_t, int2, uint);
+void __ovld __conv intel_sub_group_block_write_ui2(read_only image2d_t, int2, uint2);
+void __ovld __conv intel_sub_group_block_write_ui4(read_only image2d_t, int2, uint4);
+void __ovld __conv intel_sub_group_block_write_ui8(read_only image2d_t, int2, uint8);
#endif //defined(__opencl_c_images)
#if defined(__opencl_c_read_write_images)
-void __ovld __conv intel_sub_group_block_write_ui( read_write image2d_t image, int2 byte_coord, uint data );
-void __ovld __conv intel_sub_group_block_write_ui2( read_write image2d_t image, int2 byte_coord, uint2 data );
-void __ovld __conv intel_sub_group_block_write_ui4( read_write image2d_t image, int2 byte_coord, uint4 data );
-void __ovld __conv intel_sub_group_block_write_ui8( read_write image2d_t image, int2 byte_coord, uint8 data );
+void __ovld __conv intel_sub_group_block_write_ui(read_write image2d_t, int2, uint);
+void __ovld __conv intel_sub_group_block_write_ui2(read_write image2d_t, int2, uint2);
+void __ovld __conv intel_sub_group_block_write_ui4(read_write image2d_t, int2, uint4);
+void __ovld __conv intel_sub_group_block_write_ui8(read_write image2d_t, int2, uint8);
#endif // defined(__opencl_c_read_write_images)
void __ovld __conv intel_sub_group_block_write_ui( __global uint* p, uint data );
@@ -17789,17 +17581,17 @@ void __ovld __conv intel_sub_group_block_write_ui4( __global uint* p, uint
void __ovld __conv intel_sub_group_block_write_ui8( __global uint* p, uint8 data );
#if defined(__opencl_c_images)
-ushort __ovld __conv intel_sub_group_block_read_us( read_only image2d_t image, int2 coord );
-ushort2 __ovld __conv intel_sub_group_block_read_us2( read_only image2d_t image, int2 coord );
-ushort4 __ovld __conv intel_sub_group_block_read_us4( read_only image2d_t image, int2 coord );
-ushort8 __ovld __conv intel_sub_group_block_read_us8( read_only image2d_t image, int2 coord );
+ushort __ovld __conv intel_sub_group_block_read_us(read_only image2d_t, int2);
+ushort2 __ovld __conv intel_sub_group_block_read_us2(read_only image2d_t, int2);
+ushort4 __ovld __conv intel_sub_group_block_read_us4(read_only image2d_t, int2);
+ushort8 __ovld __conv intel_sub_group_block_read_us8(read_only image2d_t, int2);
#endif // defined(__opencl_c_images)
#if defined(__opencl_c_read_write_images)
-ushort __ovld __conv intel_sub_group_block_read_us(read_write image2d_t image, int2 coord);
-ushort2 __ovld __conv intel_sub_group_block_read_us2(read_write image2d_t image, int2 coord);
-ushort4 __ovld __conv intel_sub_group_block_read_us4(read_write image2d_t image, int2 coord);
-ushort8 __ovld __conv intel_sub_group_block_read_us8(read_write image2d_t image, int2 coord);
+ushort __ovld __conv intel_sub_group_block_read_us(read_write image2d_t, int2);
+ushort2 __ovld __conv intel_sub_group_block_read_us2(read_write image2d_t, int2);
+ushort4 __ovld __conv intel_sub_group_block_read_us4(read_write image2d_t, int2);
+ushort8 __ovld __conv intel_sub_group_block_read_us8(read_write image2d_t, int2);
#endif // defined(__opencl_c_read_write_images)
ushort __ovld __conv intel_sub_group_block_read_us( const __global ushort* p );
@@ -17808,17 +17600,17 @@ ushort4 __ovld __conv intel_sub_group_block_read_us4( const __global ushort*
ushort8 __ovld __conv intel_sub_group_block_read_us8( const __global ushort* p );
#if defined(__opencl_c_images)
-void __ovld __conv intel_sub_group_block_write_us(write_only image2d_t image, int2 coord, ushort data);
-void __ovld __conv intel_sub_group_block_write_us2(write_only image2d_t image, int2 coord, ushort2 data);
-void __ovld __conv intel_sub_group_block_write_us4(write_only image2d_t image, int2 coord, ushort4 data);
-void __ovld __conv intel_sub_group_block_write_us8(write_only image2d_t image, int2 coord, ushort8 data);
+void __ovld __conv intel_sub_group_block_write_us(write_only image2d_t, int2, ushort);
+void __ovld __conv intel_sub_group_block_write_us2(write_only image2d_t, int2, ushort2);
+void __ovld __conv intel_sub_group_block_write_us4(write_only image2d_t, int2, ushort4);
+void __ovld __conv intel_sub_group_block_write_us8(write_only image2d_t, int2, ushort8);
#endif // defined(__opencl_c_images)
#if defined(__opencl_c_read_write_images)
-void __ovld __conv intel_sub_group_block_write_us(read_write image2d_t image, int2 coord, ushort data);
-void __ovld __conv intel_sub_group_block_write_us2(read_write image2d_t image, int2 coord, ushort2 data);
-void __ovld __conv intel_sub_group_block_write_us4(read_write image2d_t image, int2 coord, ushort4 data);
-void __ovld __conv intel_sub_group_block_write_us8(read_write image2d_t image, int2 coord, ushort8 data);
+void __ovld __conv intel_sub_group_block_write_us(read_write image2d_t, int2, ushort);
+void __ovld __conv intel_sub_group_block_write_us2(read_write image2d_t, int2, ushort2);
+void __ovld __conv intel_sub_group_block_write_us4(read_write image2d_t, int2, ushort4);
+void __ovld __conv intel_sub_group_block_write_us8(read_write image2d_t, int2, ushort8);
#endif // defined(__opencl_c_read_write_images)
void __ovld __conv intel_sub_group_block_write_us( __global ushort* p, ushort data );
@@ -18333,72 +18125,72 @@ intel_sub_group_avc_mce_convert_to_sic_result(
#endif // cl_intel_device_side_avc_motion_estimation
#ifdef cl_amd_media_ops
-uint __ovld amd_bitalign(uint a, uint b, uint c);
-uint2 __ovld amd_bitalign(uint2 a, uint2 b, uint2 c);
-uint3 __ovld amd_bitalign(uint3 a, uint3 b, uint3 c);
-uint4 __ovld amd_bitalign(uint4 a, uint4 b, uint4 c);
-uint8 __ovld amd_bitalign(uint8 a, uint8 b, uint8 c);
-uint16 __ovld amd_bitalign(uint16 a, uint16 b, uint16 c);
-
-uint __ovld amd_bytealign(uint a, uint b, uint c);
-uint2 __ovld amd_bytealign(uint2 a, uint2 b, uint2 c);
-uint3 __ovld amd_bytealign(uint3 a, uint3 b, uint3 c);
-uint4 __ovld amd_bytealign(uint4 a, uint4 b, uint4 c);
-uint8 __ovld amd_bytealign(uint8 a, uint8 b, uint8 c);
-uint16 __ovld amd_bytealign(uint16 a, uint16 b, uint16 c);
-
-uint __ovld amd_lerp(uint a, uint b, uint c);
-uint2 __ovld amd_lerp(uint2 a, uint2 b, uint2 c);
-uint3 __ovld amd_lerp(uint3 a, uint3 b, uint3 c);
-uint4 __ovld amd_lerp(uint4 a, uint4 b, uint4 c);
-uint8 __ovld amd_lerp(uint8 a, uint8 b, uint8 c);
-uint16 __ovld amd_lerp(uint16 a, uint16 b, uint16 c);
+uint __ovld amd_bitalign(uint, uint, uint);
+uint2 __ovld amd_bitalign(uint2, uint2, uint2);
+uint3 __ovld amd_bitalign(uint3, uint3, uint3);
+uint4 __ovld amd_bitalign(uint4, uint4, uint4);
+uint8 __ovld amd_bitalign(uint8, uint8, uint8);
+uint16 __ovld amd_bitalign(uint16, uint16, uint16);
+
+uint __ovld amd_bytealign(uint, uint, uint);
+uint2 __ovld amd_bytealign(uint2, uint2, uint2);
+uint3 __ovld amd_bytealign(uint3, uint3, uint3);
+uint4 __ovld amd_bytealign(uint4, uint4, uint4);
+uint8 __ovld amd_bytealign(uint8, uint8, uint8);
+uint16 __ovld amd_bytealign(uint16, uint16, uint16);
+
+uint __ovld amd_lerp(uint, uint, uint);
+uint2 __ovld amd_lerp(uint2, uint2, uint2);
+uint3 __ovld amd_lerp(uint3, uint3, uint3);
+uint4 __ovld amd_lerp(uint4, uint4, uint4);
+uint8 __ovld amd_lerp(uint8, uint8, uint8);
+uint16 __ovld amd_lerp(uint16, uint16, uint16);
uint __ovld amd_pack(float4 v);
-uint __ovld amd_sad4(uint4 x, uint4 y, uint z);
-
-uint __ovld amd_sadhi(uint a, uint b, uint c);
-uint2 __ovld amd_sadhi(uint2 a, uint2 b, uint2 c);
-uint3 __ovld amd_sadhi(uint3 a, uint3 b, uint3 c);
-uint4 __ovld amd_sadhi(uint4 a, uint4 b, uint4 c);
-uint8 __ovld amd_sadhi(uint8 a, uint8 b, uint8 c);
-uint16 __ovld amd_sadhi(uint16 a, uint16 b, uint16 c);
-
-uint __ovld amd_sad(uint a, uint b, uint c);
-uint2 __ovld amd_sad(uint2 a, uint2 b, uint2 c);
-uint3 __ovld amd_sad(uint3 a, uint3 b, uint3 c);
-uint4 __ovld amd_sad(uint4 a, uint4 b, uint4 c);
-uint8 __ovld amd_sad(uint8 a, uint8 b, uint8 c);
-uint16 __ovld amd_sad(uint16 a, uint16 b, uint16 c);
-
-float __ovld amd_unpack0(uint a);
-float2 __ovld amd_unpack0(uint2 a);
-float3 __ovld amd_unpack0(uint3 a);
-float4 __ovld amd_unpack0(uint4 a);
-float8 __ovld amd_unpack0(uint8 a);
-float16 __ovld amd_unpack0(uint16 a);
-
-float __ovld amd_unpack1(uint a);
-float2 __ovld amd_unpack1(uint2 a);
-float3 __ovld amd_unpack1(uint3 a);
-float4 __ovld amd_unpack1(uint4 a);
-float8 __ovld amd_unpack1(uint8 a);
-float16 __ovld amd_unpack1(uint16 a);
-
-float __ovld amd_unpack2(uint a);
-float2 __ovld amd_unpack2(uint2 a);
-float3 __ovld amd_unpack2(uint3 a);
-float4 __ovld amd_unpack2(uint4 a);
-float8 __ovld amd_unpack2(uint8 a);
-float16 __ovld amd_unpack2(uint16 a);
-
-float __ovld amd_unpack3(uint a);
-float2 __ovld amd_unpack3(uint2 a);
-float3 __ovld amd_unpack3(uint3 a);
-float4 __ovld amd_unpack3(uint4 a);
-float8 __ovld amd_unpack3(uint8 a);
-float16 __ovld amd_unpack3(uint16 a);
+uint __ovld amd_sad4(uint4, uint4, uint);
+
+uint __ovld amd_sadhi(uint, uint, uint);
+uint2 __ovld amd_sadhi(uint2, uint2, uint2);
+uint3 __ovld amd_sadhi(uint3, uint3, uint3);
+uint4 __ovld amd_sadhi(uint4, uint4, uint4);
+uint8 __ovld amd_sadhi(uint8, uint8, uint8);
+uint16 __ovld amd_sadhi(uint16, uint16, uint16);
+
+uint __ovld amd_sad(uint, uint, uint);
+uint2 __ovld amd_sad(uint2, uint2, uint2);
+uint3 __ovld amd_sad(uint3, uint3, uint3);
+uint4 __ovld amd_sad(uint4, uint4, uint4);
+uint8 __ovld amd_sad(uint8, uint8, uint8);
+uint16 __ovld amd_sad(uint16, uint16, uint16);
+
+float __ovld amd_unpack0(uint);
+float2 __ovld amd_unpack0(uint2);
+float3 __ovld amd_unpack0(uint3);
+float4 __ovld amd_unpack0(uint4);
+float8 __ovld amd_unpack0(uint8);
+float16 __ovld amd_unpack0(uint16);
+
+float __ovld amd_unpack1(uint);
+float2 __ovld amd_unpack1(uint2);
+float3 __ovld amd_unpack1(uint3);
+float4 __ovld amd_unpack1(uint4);
+float8 __ovld amd_unpack1(uint8);
+float16 __ovld amd_unpack1(uint16);
+
+float __ovld amd_unpack2(uint);
+float2 __ovld amd_unpack2(uint2);
+float3 __ovld amd_unpack2(uint3);
+float4 __ovld amd_unpack2(uint4);
+float8 __ovld amd_unpack2(uint8);
+float16 __ovld amd_unpack2(uint16);
+
+float __ovld amd_unpack3(uint);
+float2 __ovld amd_unpack3(uint2);
+float3 __ovld amd_unpack3(uint3);
+float4 __ovld amd_unpack3(uint4);
+float8 __ovld amd_unpack3(uint8);
+float16 __ovld amd_unpack3(uint16);
#endif // cl_amd_media_ops
#ifdef cl_amd_media_ops2
@@ -18523,23 +18315,23 @@ uint16 __ovld amd_sadw(uint16 src0, uint16 src1, uint16 src2);
#endif // cl_amd_media_ops2
#if defined(cl_arm_integer_dot_product_int8)
-uint __ovld arm_dot(uchar4 a, uchar4 b);
-int __ovld arm_dot(char4 a, char4 b);
+uint __ovld arm_dot(uchar4, uchar4);
+int __ovld arm_dot(char4, char4);
#endif // defined(cl_arm_integer_dot_product_int8)
#if defined(cl_arm_integer_dot_product_accumulate_int8)
-uint __ovld arm_dot_acc(uchar4 a, uchar4 b, uint c);
-int __ovld arm_dot_acc(char4 a, char4 b, int c);
+uint __ovld arm_dot_acc(uchar4, uchar4, uint);
+int __ovld arm_dot_acc(char4, char4, int);
#endif // defined(cl_arm_integer_dot_product_accumulate_int8)
#if defined(cl_arm_integer_dot_product_accumulate_int16)
-uint __ovld arm_dot_acc(ushort2 a, ushort2 b, uint c);
-int __ovld arm_dot_acc(short2 a, short2 b, int c);
+uint __ovld arm_dot_acc(ushort2, ushort2, uint);
+int __ovld arm_dot_acc(short2, short2, int);
#endif // defined(cl_arm_integer_dot_product_accumulate_int16)
#if defined(cl_arm_integer_dot_product_accumulate_saturate_int8)
-uint __ovld arm_dot_acc_sat(uchar4 a, uchar4 b, uint c);
-int __ovld arm_dot_acc_sat(char4 a, char4 b, int c);
+uint __ovld arm_dot_acc_sat(uchar4, uchar4, uint);
+int __ovld arm_dot_acc_sat(char4, char4, int);
#endif // defined(cl_arm_integer_dot_product_accumulate_saturate_int8)
// Disable any extensions we may have enabled previously.
diff --git a/contrib/llvm-project/clang/lib/Headers/pmmintrin.h b/contrib/llvm-project/clang/lib/Headers/pmmintrin.h
index eda83567cd05..ee660e95d274 100644
--- a/contrib/llvm-project/clang/lib/Headers/pmmintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/pmmintrin.h
@@ -35,7 +35,7 @@
/// A pointer to a 128-bit integer vector containing integer values.
/// \returns A 128-bit vector containing the moved values.
static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_lddqu_si128(__m128i const *__p)
+_mm_lddqu_si128(__m128i_u const *__p)
{
return (__m128i)__builtin_ia32_lddqu((char const *)__p);
}
diff --git a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/bmi2intrin.h b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/bmi2intrin.h
new file mode 100644
index 000000000000..0dc0d14ad480
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/bmi2intrin.h
@@ -0,0 +1,134 @@
+/*===---- bmiintrin.h - Implementation of BMI2 intrinsics on PowerPC -------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined X86GPRINTRIN_H_
+#error "Never use <bmi2intrin.h> directly; include <x86gprintrin.h> instead."
+#endif
+
+#ifndef BMI2INTRIN_H_
+#define BMI2INTRIN_H_
+
+extern __inline unsigned int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _bzhi_u32(unsigned int __X, unsigned int __Y) {
+ return ((__X << (32 - __Y)) >> (32 - __Y));
+}
+
+extern __inline unsigned int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mulx_u32(unsigned int __X, unsigned int __Y, unsigned int *__P) {
+ unsigned long long __res = (unsigned long long)__X * __Y;
+ *__P = (unsigned int)(__res >> 32);
+ return (unsigned int)__res;
+}
+
+#ifdef __PPC64__
+extern __inline unsigned long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _bzhi_u64(unsigned long long __X, unsigned long long __Y) {
+ return ((__X << (64 - __Y)) >> (64 - __Y));
+}
+
+/* __int128 requires base 64-bit. */
+extern __inline unsigned long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mulx_u64(unsigned long long __X, unsigned long long __Y,
+ unsigned long long *__P) {
+ unsigned __int128 __res = (unsigned __int128)__X * __Y;
+ *__P = (unsigned long long)(__res >> 64);
+ return (unsigned long long)__res;
+}
+
+#ifdef _ARCH_PWR7
+/* popcount and bpermd require power7 minimum. */
+extern __inline unsigned long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _pdep_u64(unsigned long long __X, unsigned long long __M) {
+ unsigned long __result = 0x0UL;
+ const unsigned long __mask = 0x8000000000000000UL;
+ unsigned long __m = __M;
+ unsigned long __c, __t;
+ unsigned long __p;
+
+ /* The pop-count of the mask gives the number of the bits from
+ source to process. This is also needed to shift bits from the
+ source into the correct position for the result. */
+ __p = 64 - __builtin_popcountl(__M);
+
+ /* The loop is for the number of '1' bits in the mask and clearing
+ each mask bit as it is processed. */
+ while (__m != 0) {
+ __c = __builtin_clzl(__m);
+ __t = __X << (__p - __c);
+ __m ^= (__mask >> __c);
+ __result |= (__t & (__mask >> __c));
+ __p++;
+ }
+ return __result;
+}
+
+extern __inline unsigned long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _pext_u64(unsigned long long __X, unsigned long long __M) {
+ unsigned long __p = 0x4040404040404040UL; // initial bit permute control
+ const unsigned long __mask = 0x8000000000000000UL;
+ unsigned long __m = __M;
+ unsigned long __c;
+ unsigned long __result;
+
+ /* if the mask is constant and selects 8 bits or less we can use
+ the Power8 Bit permute instruction. */
+ if (__builtin_constant_p(__M) && (__builtin_popcountl(__M) <= 8)) {
+ /* Also if the pext mask is constant, then the popcount is
+ constant, we can evaluate the following loop at compile
+ time and use a constant bit permute vector. */
+ long __i;
+ for (__i = 0; __i < __builtin_popcountl(__M); __i++) {
+ __c = __builtin_clzl(__m);
+ __p = (__p << 8) | __c;
+ __m ^= (__mask >> __c);
+ }
+ __result = __builtin_bpermd(__p, __X);
+ } else {
+ __p = 64 - __builtin_popcountl(__M);
+ __result = 0;
+ /* We could a use a for loop here, but that combined with
+ -funroll-loops can expand to a lot of code. The while
+ loop avoids unrolling and the compiler commons the xor
+ from clearing the mask bit with the (m != 0) test. The
+ result is a more compact loop setup and body. */
+ while (__m != 0) {
+ unsigned long __t;
+ __c = __builtin_clzl(__m);
+ __t = (__X & (__mask >> __c)) >> (__p - __c);
+ __m ^= (__mask >> __c);
+ __result |= (__t);
+ __p++;
+ }
+ }
+ return __result;
+}
+
+/* these 32-bit implementations depend on 64-bit pdep/pext
+ which depend on _ARCH_PWR7. */
+extern __inline unsigned int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _pdep_u32(unsigned int __X, unsigned int __Y) {
+ return _pdep_u64(__X, __Y);
+}
+
+extern __inline unsigned int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _pext_u32(unsigned int __X, unsigned int __Y) {
+ return _pext_u64(__X, __Y);
+}
+#endif /* _ARCH_PWR7 */
+#endif /* __PPC64__ */
+
+#endif /* BMI2INTRIN_H_ */
diff --git a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/bmiintrin.h b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/bmiintrin.h
new file mode 100644
index 000000000000..7d3315958c7b
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/bmiintrin.h
@@ -0,0 +1,165 @@
+/*===---- bmiintrin.h - Implementation of BMI intrinsics on PowerPC --------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#if !defined X86GPRINTRIN_H_
+#error "Never use <bmiintrin.h> directly; include <x86gprintrin.h> instead."
+#endif
+
+#ifndef BMIINTRIN_H_
+#define BMIINTRIN_H_
+
+extern __inline unsigned short
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __tzcnt_u16(unsigned short __X) {
+ return __builtin_ctz(__X);
+}
+
+extern __inline unsigned int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __andn_u32(unsigned int __X, unsigned int __Y) {
+ return (~__X & __Y);
+}
+
+extern __inline unsigned int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _bextr_u32(unsigned int __X, unsigned int __P, unsigned int __L) {
+ return ((__X << (32 - (__L + __P))) >> (32 - __L));
+}
+
+extern __inline unsigned int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __bextr_u32(unsigned int __X, unsigned int __Y) {
+ unsigned int __P, __L;
+ __P = __Y & 0xFF;
+ __L = (__Y >> 8) & 0xFF;
+ return (_bextr_u32(__X, __P, __L));
+}
+
+extern __inline unsigned int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __blsi_u32(unsigned int __X) {
+ return (__X & -__X);
+}
+
+extern __inline unsigned int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _blsi_u32(unsigned int __X) {
+ return __blsi_u32(__X);
+}
+
+extern __inline unsigned int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __blsmsk_u32(unsigned int __X) {
+ return (__X ^ (__X - 1));
+}
+
+extern __inline unsigned int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _blsmsk_u32(unsigned int __X) {
+ return __blsmsk_u32(__X);
+}
+
+extern __inline unsigned int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __blsr_u32(unsigned int __X) {
+ return (__X & (__X - 1));
+}
+
+extern __inline unsigned int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _blsr_u32(unsigned int __X) {
+ return __blsr_u32(__X);
+}
+
+extern __inline unsigned int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __tzcnt_u32(unsigned int __X) {
+ return __builtin_ctz(__X);
+}
+
+extern __inline unsigned int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _tzcnt_u32(unsigned int __X) {
+ return __builtin_ctz(__X);
+}
+
+/* use the 64-bit shift, rotate, and count leading zeros instructions
+ for long long. */
+#ifdef __PPC64__
+extern __inline unsigned long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __andn_u64(unsigned long long __X, unsigned long long __Y) {
+ return (~__X & __Y);
+}
+
+extern __inline unsigned long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _bextr_u64(unsigned long long __X, unsigned int __P, unsigned int __L) {
+ return ((__X << (64 - (__L + __P))) >> (64 - __L));
+}
+
+extern __inline unsigned long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __bextr_u64(unsigned long long __X, unsigned long long __Y) {
+ unsigned int __P, __L;
+ __P = __Y & 0xFF;
+ __L = (__Y & 0xFF00) >> 8;
+ return (_bextr_u64(__X, __P, __L));
+}
+
+extern __inline unsigned long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __blsi_u64(unsigned long long __X) {
+ return __X & -__X;
+}
+
+extern __inline unsigned long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _blsi_u64(unsigned long long __X) {
+ return __blsi_u64(__X);
+}
+
+extern __inline unsigned long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __blsmsk_u64(unsigned long long __X) {
+ return (__X ^ (__X - 1));
+}
+
+extern __inline unsigned long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _blsmsk_u64(unsigned long long __X) {
+ return __blsmsk_u64(__X);
+}
+
+extern __inline unsigned long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __blsr_u64(unsigned long long __X) {
+ return (__X & (__X - 1));
+}
+
+extern __inline unsigned long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _blsr_u64(unsigned long long __X) {
+ return __blsr_u64(__X);
+}
+
+extern __inline unsigned long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ __tzcnt_u64(unsigned long long __X) {
+ return __builtin_ctzll(__X);
+}
+
+extern __inline unsigned long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _tzcnt_u64(unsigned long long __X) {
+ return __builtin_ctzll(__X);
+}
+#endif /* __PPC64__ */
+
+#endif /* BMIINTRIN_H_ */
diff --git a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/emmintrin.h b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/emmintrin.h
index 82a71788b27a..8c6aa23c1a0d 100644
--- a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/emmintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/emmintrin.h
@@ -29,7 +29,8 @@
efficiently as C language float scalar operations or optimized to
use vector SIMD operations. We recommend this for new applications.
*/
-#error "Please read comment above. Use -DNO_WARN_X86_INTRINSICS to disable this error."
+#error \
+ "Please read comment above. Use -DNO_WARN_X86_INTRINSICS to disable this error."
#endif
#ifndef EMMINTRIN_H_
@@ -55,523 +56,515 @@ typedef __vector unsigned char __v16qu;
/* The Intel API is flexible enough that we must allow aliasing with other
vector types, and their scalar components. */
-typedef long long __m128i __attribute__ ((__vector_size__ (16), __may_alias__));
-typedef double __m128d __attribute__ ((__vector_size__ (16), __may_alias__));
+typedef long long __m128i __attribute__((__vector_size__(16), __may_alias__));
+typedef double __m128d __attribute__((__vector_size__(16), __may_alias__));
/* Unaligned version of the same types. */
-typedef long long __m128i_u __attribute__ ((__vector_size__ (16), __may_alias__, __aligned__ (1)));
-typedef double __m128d_u __attribute__ ((__vector_size__ (16), __may_alias__, __aligned__ (1)));
+typedef long long __m128i_u
+ __attribute__((__vector_size__(16), __may_alias__, __aligned__(1)));
+typedef double __m128d_u
+ __attribute__((__vector_size__(16), __may_alias__, __aligned__(1)));
/* Define two value permute mask. */
-#define _MM_SHUFFLE2(x,y) (((x) << 1) | (y))
+#define _MM_SHUFFLE2(x, y) (((x) << 1) | (y))
/* Create a vector with element 0 as F and the rest zero. */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_sd (double __F)
-{
- return __extension__ (__m128d){ __F, 0.0 };
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set_sd(double __F) {
+ return __extension__(__m128d){__F, 0.0};
}
/* Create a vector with both elements equal to F. */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set1_pd (double __F)
-{
- return __extension__ (__m128d){ __F, __F };
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set1_pd(double __F) {
+ return __extension__(__m128d){__F, __F};
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_pd1 (double __F)
-{
- return _mm_set1_pd (__F);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set_pd1(double __F) {
+ return _mm_set1_pd(__F);
}
/* Create a vector with the lower value X and upper value W. */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_pd (double __W, double __X)
-{
- return __extension__ (__m128d){ __X, __W };
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set_pd(double __W, double __X) {
+ return __extension__(__m128d){__X, __W};
}
/* Create a vector with the lower value W and upper value X. */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_setr_pd (double __W, double __X)
-{
- return __extension__ (__m128d){ __W, __X };
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_setr_pd(double __W, double __X) {
+ return __extension__(__m128d){__W, __X};
}
/* Create an undefined vector. */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_undefined_pd (void)
-{
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_undefined_pd(void) {
__m128d __Y = __Y;
return __Y;
}
/* Create a vector of zeros. */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_setzero_pd (void)
-{
- return (__m128d) vec_splats (0);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_setzero_pd(void) {
+ return (__m128d)vec_splats(0);
}
/* Sets the low DPFP value of A from the low value of B. */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_move_sd (__m128d __A, __m128d __B)
-{
- __v2df result = (__v2df) __A;
- result [0] = ((__v2df) __B)[0];
- return (__m128d) result;
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_move_sd(__m128d __A, __m128d __B) {
+ __v2df __result = (__v2df)__A;
+ __result[0] = ((__v2df)__B)[0];
+ return (__m128d)__result;
}
/* Load two DPFP values from P. The address must be 16-byte aligned. */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_load_pd (double const *__P)
-{
- return ((__m128d)vec_ld(0, (__v16qu*)__P));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_load_pd(double const *__P) {
+ return ((__m128d)vec_ld(0, (__v16qu *)__P));
}
/* Load two DPFP values from P. The address need not be 16-byte aligned. */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loadu_pd (double const *__P)
-{
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_loadu_pd(double const *__P) {
return (vec_vsx_ld(0, __P));
}
/* Create a vector with all two elements equal to *P. */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_load1_pd (double const *__P)
-{
- return (vec_splats (*__P));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_load1_pd(double const *__P) {
+ return (vec_splats(*__P));
}
/* Create a vector with element 0 as *P and the rest zero. */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_load_sd (double const *__P)
-{
- return _mm_set_sd (*__P);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_load_sd(double const *__P) {
+ return _mm_set_sd(*__P);
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_load_pd1 (double const *__P)
-{
- return _mm_load1_pd (__P);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_load_pd1(double const *__P) {
+ return _mm_load1_pd(__P);
}
/* Load two DPFP values in reverse order. The address must be aligned. */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loadr_pd (double const *__P)
-{
- __v2df __tmp = _mm_load_pd (__P);
- return (__m128d)vec_xxpermdi (__tmp, __tmp, 2);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_loadr_pd(double const *__P) {
+ __v2df __tmp = _mm_load_pd(__P);
+ return (__m128d)vec_xxpermdi(__tmp, __tmp, 2);
}
/* Store two DPFP values. The address must be 16-byte aligned. */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_store_pd (double *__P, __m128d __A)
-{
- vec_st((__v16qu)__A, 0, (__v16qu*)__P);
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_store_pd(double *__P, __m128d __A) {
+ vec_st((__v16qu)__A, 0, (__v16qu *)__P);
}
/* Store two DPFP values. The address need not be 16-byte aligned. */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_storeu_pd (double *__P, __m128d __A)
-{
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_storeu_pd(double *__P, __m128d __A) {
*(__m128d_u *)__P = __A;
}
/* Stores the lower DPFP value. */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_store_sd (double *__P, __m128d __A)
-{
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_store_sd(double *__P, __m128d __A) {
*__P = ((__v2df)__A)[0];
}
-extern __inline double __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsd_f64 (__m128d __A)
-{
+extern __inline double
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsd_f64(__m128d __A) {
return ((__v2df)__A)[0];
}
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_storel_pd (double *__P, __m128d __A)
-{
- _mm_store_sd (__P, __A);
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_storel_pd(double *__P, __m128d __A) {
+ _mm_store_sd(__P, __A);
}
/* Stores the upper DPFP value. */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_storeh_pd (double *__P, __m128d __A)
-{
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_storeh_pd(double *__P, __m128d __A) {
*__P = ((__v2df)__A)[1];
}
/* Store the lower DPFP value across two words.
The address must be 16-byte aligned. */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_store1_pd (double *__P, __m128d __A)
-{
- _mm_store_pd (__P, vec_splat (__A, 0));
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_store1_pd(double *__P, __m128d __A) {
+ _mm_store_pd(__P, vec_splat(__A, 0));
}
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_store_pd1 (double *__P, __m128d __A)
-{
- _mm_store1_pd (__P, __A);
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_store_pd1(double *__P, __m128d __A) {
+ _mm_store1_pd(__P, __A);
}
/* Store two DPFP values in reverse order. The address must be aligned. */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_storer_pd (double *__P, __m128d __A)
-{
- _mm_store_pd (__P, vec_xxpermdi (__A, __A, 2));
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_storer_pd(double *__P, __m128d __A) {
+ _mm_store_pd(__P, vec_xxpermdi(__A, __A, 2));
}
/* Intel intrinsic. */
-extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi128_si64 (__m128i __A)
-{
+extern __inline long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsi128_si64(__m128i __A) {
return ((__v2di)__A)[0];
}
/* Microsoft intrinsic. */
-extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi128_si64x (__m128i __A)
-{
+extern __inline long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsi128_si64x(__m128i __A) {
return ((__v2di)__A)[0];
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_add_pd (__m128d __A, __m128d __B)
-{
- return (__m128d) ((__v2df)__A + (__v2df)__B);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_add_pd(__m128d __A, __m128d __B) {
+ return (__m128d)((__v2df)__A + (__v2df)__B);
}
/* Add the lower double-precision (64-bit) floating-point element in
a and b, store the result in the lower element of dst, and copy
the upper element from a to the upper element of dst. */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_add_sd (__m128d __A, __m128d __B)
-{
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_add_sd(__m128d __A, __m128d __B) {
__A[0] = __A[0] + __B[0];
return (__A);
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sub_pd (__m128d __A, __m128d __B)
-{
- return (__m128d) ((__v2df)__A - (__v2df)__B);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sub_pd(__m128d __A, __m128d __B) {
+ return (__m128d)((__v2df)__A - (__v2df)__B);
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sub_sd (__m128d __A, __m128d __B)
-{
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sub_sd(__m128d __A, __m128d __B) {
__A[0] = __A[0] - __B[0];
return (__A);
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mul_pd (__m128d __A, __m128d __B)
-{
- return (__m128d) ((__v2df)__A * (__v2df)__B);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_mul_pd(__m128d __A, __m128d __B) {
+ return (__m128d)((__v2df)__A * (__v2df)__B);
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mul_sd (__m128d __A, __m128d __B)
-{
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_mul_sd(__m128d __A, __m128d __B) {
__A[0] = __A[0] * __B[0];
return (__A);
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_div_pd (__m128d __A, __m128d __B)
-{
- return (__m128d) ((__v2df)__A / (__v2df)__B);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_div_pd(__m128d __A, __m128d __B) {
+ return (__m128d)((__v2df)__A / (__v2df)__B);
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_div_sd (__m128d __A, __m128d __B)
-{
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_div_sd(__m128d __A, __m128d __B) {
__A[0] = __A[0] / __B[0];
return (__A);
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sqrt_pd (__m128d __A)
-{
- return (vec_sqrt (__A));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sqrt_pd(__m128d __A) {
+ return (vec_sqrt(__A));
}
/* Return pair {sqrt (B[0]), A[1]}. */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sqrt_sd (__m128d __A, __m128d __B)
-{
- __v2df c;
- c = vec_sqrt ((__v2df) _mm_set1_pd (__B[0]));
- return (__m128d) _mm_setr_pd (c[0], __A[1]);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sqrt_sd(__m128d __A, __m128d __B) {
+ __v2df __c;
+ __c = vec_sqrt((__v2df)_mm_set1_pd(__B[0]));
+ return (__m128d)_mm_setr_pd(__c[0], __A[1]);
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_min_pd (__m128d __A, __m128d __B)
-{
- return (vec_min (__A, __B));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_min_pd(__m128d __A, __m128d __B) {
+ return (vec_min(__A, __B));
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_min_sd (__m128d __A, __m128d __B)
-{
- __v2df a, b, c;
- a = vec_splats (__A[0]);
- b = vec_splats (__B[0]);
- c = vec_min (a, b);
- return (__m128d) _mm_setr_pd (c[0], __A[1]);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_min_sd(__m128d __A, __m128d __B) {
+ __v2df __a, __b, __c;
+ __a = vec_splats(__A[0]);
+ __b = vec_splats(__B[0]);
+ __c = vec_min(__a, __b);
+ return (__m128d)_mm_setr_pd(__c[0], __A[1]);
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_max_pd (__m128d __A, __m128d __B)
-{
- return (vec_max (__A, __B));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_max_pd(__m128d __A, __m128d __B) {
+ return (vec_max(__A, __B));
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_max_sd (__m128d __A, __m128d __B)
-{
- __v2df a, b, c;
- a = vec_splats (__A[0]);
- b = vec_splats (__B[0]);
- c = vec_max (a, b);
- return (__m128d) _mm_setr_pd (c[0], __A[1]);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_max_sd(__m128d __A, __m128d __B) {
+ __v2df __a, __b, __c;
+ __a = vec_splats(__A[0]);
+ __b = vec_splats(__B[0]);
+ __c = vec_max(__a, __b);
+ return (__m128d)_mm_setr_pd(__c[0], __A[1]);
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpeq_pd (__m128d __A, __m128d __B)
-{
- return ((__m128d)vec_cmpeq ((__v2df) __A, (__v2df) __B));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpeq_pd(__m128d __A, __m128d __B) {
+ return ((__m128d)vec_cmpeq((__v2df)__A, (__v2df)__B));
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmplt_pd (__m128d __A, __m128d __B)
-{
- return ((__m128d)vec_cmplt ((__v2df) __A, (__v2df) __B));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmplt_pd(__m128d __A, __m128d __B) {
+ return ((__m128d)vec_cmplt((__v2df)__A, (__v2df)__B));
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmple_pd (__m128d __A, __m128d __B)
-{
- return ((__m128d)vec_cmple ((__v2df) __A, (__v2df) __B));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmple_pd(__m128d __A, __m128d __B) {
+ return ((__m128d)vec_cmple((__v2df)__A, (__v2df)__B));
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpgt_pd (__m128d __A, __m128d __B)
-{
- return ((__m128d)vec_cmpgt ((__v2df) __A, (__v2df) __B));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpgt_pd(__m128d __A, __m128d __B) {
+ return ((__m128d)vec_cmpgt((__v2df)__A, (__v2df)__B));
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpge_pd (__m128d __A, __m128d __B)
-{
- return ((__m128d)vec_cmpge ((__v2df) __A,(__v2df) __B));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpge_pd(__m128d __A, __m128d __B) {
+ return ((__m128d)vec_cmpge((__v2df)__A, (__v2df)__B));
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpneq_pd (__m128d __A, __m128d __B)
-{
- __v2df temp = (__v2df) vec_cmpeq ((__v2df) __A, (__v2df)__B);
- return ((__m128d)vec_nor (temp, temp));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpneq_pd(__m128d __A, __m128d __B) {
+ __v2df __temp = (__v2df)vec_cmpeq((__v2df)__A, (__v2df)__B);
+ return ((__m128d)vec_nor(__temp, __temp));
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnlt_pd (__m128d __A, __m128d __B)
-{
- return ((__m128d)vec_cmpge ((__v2df) __A, (__v2df) __B));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpnlt_pd(__m128d __A, __m128d __B) {
+ return ((__m128d)vec_cmpge((__v2df)__A, (__v2df)__B));
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnle_pd (__m128d __A, __m128d __B)
-{
- return ((__m128d)vec_cmpgt ((__v2df) __A, (__v2df) __B));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpnle_pd(__m128d __A, __m128d __B) {
+ return ((__m128d)vec_cmpgt((__v2df)__A, (__v2df)__B));
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpngt_pd (__m128d __A, __m128d __B)
-{
- return ((__m128d)vec_cmple ((__v2df) __A, (__v2df) __B));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpngt_pd(__m128d __A, __m128d __B) {
+ return ((__m128d)vec_cmple((__v2df)__A, (__v2df)__B));
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnge_pd (__m128d __A, __m128d __B)
-{
- return ((__m128d)vec_cmplt ((__v2df) __A, (__v2df) __B));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpnge_pd(__m128d __A, __m128d __B) {
+ return ((__m128d)vec_cmplt((__v2df)__A, (__v2df)__B));
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpord_pd (__m128d __A, __m128d __B)
-{
-#if _ARCH_PWR8
- __v2du c, d;
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpord_pd(__m128d __A, __m128d __B) {
+ __v2du __c, __d;
/* Compare against self will return false (0's) if NAN. */
- c = (__v2du)vec_cmpeq (__A, __A);
- d = (__v2du)vec_cmpeq (__B, __B);
-#else
- __v2du a, b;
- __v2du c, d;
- const __v2du double_exp_mask = {0x7ff0000000000000, 0x7ff0000000000000};
- a = (__v2du)vec_abs ((__v2df)__A);
- b = (__v2du)vec_abs ((__v2df)__B);
- c = (__v2du)vec_cmpgt (double_exp_mask, a);
- d = (__v2du)vec_cmpgt (double_exp_mask, b);
-#endif
+ __c = (__v2du)vec_cmpeq(__A, __A);
+ __d = (__v2du)vec_cmpeq(__B, __B);
/* A != NAN and B != NAN. */
- return ((__m128d)vec_and(c, d));
+ return ((__m128d)vec_and(__c, __d));
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpunord_pd (__m128d __A, __m128d __B)
-{
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpunord_pd(__m128d __A, __m128d __B) {
#if _ARCH_PWR8
- __v2du c, d;
+ __v2du __c, __d;
/* Compare against self will return false (0's) if NAN. */
- c = (__v2du)vec_cmpeq ((__v2df)__A, (__v2df)__A);
- d = (__v2du)vec_cmpeq ((__v2df)__B, (__v2df)__B);
+ __c = (__v2du)vec_cmpeq((__v2df)__A, (__v2df)__A);
+ __d = (__v2du)vec_cmpeq((__v2df)__B, (__v2df)__B);
/* A == NAN OR B == NAN converts too:
NOT(A != NAN) OR NOT(B != NAN). */
- c = vec_nor (c, c);
- return ((__m128d)vec_orc(c, d));
+ __c = vec_nor(__c, __c);
+ return ((__m128d)vec_orc(__c, __d));
#else
- __v2du c, d;
+ __v2du __c, __d;
/* Compare against self will return false (0's) if NAN. */
- c = (__v2du)vec_cmpeq ((__v2df)__A, (__v2df)__A);
- d = (__v2du)vec_cmpeq ((__v2df)__B, (__v2df)__B);
+ __c = (__v2du)vec_cmpeq((__v2df)__A, (__v2df)__A);
+ __d = (__v2du)vec_cmpeq((__v2df)__B, (__v2df)__B);
/* Convert the true ('1's) is NAN. */
- c = vec_nor (c, c);
- d = vec_nor (d, d);
- return ((__m128d)vec_or(c, d));
+ __c = vec_nor(__c, __c);
+ __d = vec_nor(__d, __d);
+ return ((__m128d)vec_or(__c, __d));
#endif
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpeq_sd(__m128d __A, __m128d __B)
-{
- __v2df a, b, c;
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpeq_sd(__m128d __A, __m128d __B) {
+ __v2df __a, __b, __c;
/* PowerISA VSX does not allow partial (for just lower double)
results. So to insure we don't generate spurious exceptions
(from the upper double values) we splat the lower double
before we do the operation. */
- a = vec_splats (__A[0]);
- b = vec_splats (__B[0]);
- c = (__v2df) vec_cmpeq(a, b);
+ __a = vec_splats(__A[0]);
+ __b = vec_splats(__B[0]);
+ __c = (__v2df)vec_cmpeq(__a, __b);
/* Then we merge the lower double result with the original upper
double from __A. */
- return (__m128d) _mm_setr_pd (c[0], __A[1]);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmplt_sd (__m128d __A, __m128d __B)
-{
- __v2df a, b, c;
- a = vec_splats (__A[0]);
- b = vec_splats (__B[0]);
- c = (__v2df) vec_cmplt(a, b);
- return (__m128d) _mm_setr_pd (c[0], __A[1]);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmple_sd (__m128d __A, __m128d __B)
-{
- __v2df a, b, c;
- a = vec_splats (__A[0]);
- b = vec_splats (__B[0]);
- c = (__v2df) vec_cmple(a, b);
- return (__m128d) _mm_setr_pd (c[0], __A[1]);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpgt_sd (__m128d __A, __m128d __B)
-{
- __v2df a, b, c;
- a = vec_splats (__A[0]);
- b = vec_splats (__B[0]);
- c = (__v2df) vec_cmpgt(a, b);
- return (__m128d) _mm_setr_pd (c[0], __A[1]);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpge_sd (__m128d __A, __m128d __B)
-{
- __v2df a, b, c;
- a = vec_splats (__A[0]);
- b = vec_splats (__B[0]);
- c = (__v2df) vec_cmpge(a, b);
- return (__m128d) _mm_setr_pd (c[0], __A[1]);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpneq_sd (__m128d __A, __m128d __B)
-{
- __v2df a, b, c;
- a = vec_splats (__A[0]);
- b = vec_splats (__B[0]);
- c = (__v2df) vec_cmpeq(a, b);
- c = vec_nor (c, c);
- return (__m128d) _mm_setr_pd (c[0], __A[1]);
-}
-
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnlt_sd (__m128d __A, __m128d __B)
-{
- __v2df a, b, c;
- a = vec_splats (__A[0]);
- b = vec_splats (__B[0]);
+ return (__m128d)_mm_setr_pd(__c[0], __A[1]);
+}
+
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmplt_sd(__m128d __A, __m128d __B) {
+ __v2df __a, __b, __c;
+ __a = vec_splats(__A[0]);
+ __b = vec_splats(__B[0]);
+ __c = (__v2df)vec_cmplt(__a, __b);
+ return (__m128d)_mm_setr_pd(__c[0], __A[1]);
+}
+
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmple_sd(__m128d __A, __m128d __B) {
+ __v2df __a, __b, __c;
+ __a = vec_splats(__A[0]);
+ __b = vec_splats(__B[0]);
+ __c = (__v2df)vec_cmple(__a, __b);
+ return (__m128d)_mm_setr_pd(__c[0], __A[1]);
+}
+
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpgt_sd(__m128d __A, __m128d __B) {
+ __v2df __a, __b, __c;
+ __a = vec_splats(__A[0]);
+ __b = vec_splats(__B[0]);
+ __c = (__v2df)vec_cmpgt(__a, __b);
+ return (__m128d)_mm_setr_pd(__c[0], __A[1]);
+}
+
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpge_sd(__m128d __A, __m128d __B) {
+ __v2df __a, __b, __c;
+ __a = vec_splats(__A[0]);
+ __b = vec_splats(__B[0]);
+ __c = (__v2df)vec_cmpge(__a, __b);
+ return (__m128d)_mm_setr_pd(__c[0], __A[1]);
+}
+
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpneq_sd(__m128d __A, __m128d __B) {
+ __v2df __a, __b, __c;
+ __a = vec_splats(__A[0]);
+ __b = vec_splats(__B[0]);
+ __c = (__v2df)vec_cmpeq(__a, __b);
+ __c = vec_nor(__c, __c);
+ return (__m128d)_mm_setr_pd(__c[0], __A[1]);
+}
+
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpnlt_sd(__m128d __A, __m128d __B) {
+ __v2df __a, __b, __c;
+ __a = vec_splats(__A[0]);
+ __b = vec_splats(__B[0]);
/* Not less than is just greater than or equal. */
- c = (__v2df) vec_cmpge(a, b);
- return (__m128d) _mm_setr_pd (c[0], __A[1]);
+ __c = (__v2df)vec_cmpge(__a, __b);
+ return (__m128d)_mm_setr_pd(__c[0], __A[1]);
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnle_sd (__m128d __A, __m128d __B)
-{
- __v2df a, b, c;
- a = vec_splats (__A[0]);
- b = vec_splats (__B[0]);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpnle_sd(__m128d __A, __m128d __B) {
+ __v2df __a, __b, __c;
+ __a = vec_splats(__A[0]);
+ __b = vec_splats(__B[0]);
/* Not less than or equal is just greater than. */
- c = (__v2df) vec_cmpge(a, b);
- return (__m128d) _mm_setr_pd (c[0], __A[1]);
+ __c = (__v2df)vec_cmpge(__a, __b);
+ return (__m128d)_mm_setr_pd(__c[0], __A[1]);
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpngt_sd (__m128d __A, __m128d __B)
-{
- __v2df a, b, c;
- a = vec_splats (__A[0]);
- b = vec_splats (__B[0]);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpngt_sd(__m128d __A, __m128d __B) {
+ __v2df __a, __b, __c;
+ __a = vec_splats(__A[0]);
+ __b = vec_splats(__B[0]);
/* Not greater than is just less than or equal. */
- c = (__v2df) vec_cmple(a, b);
- return (__m128d) _mm_setr_pd (c[0], __A[1]);
+ __c = (__v2df)vec_cmple(__a, __b);
+ return (__m128d)_mm_setr_pd(__c[0], __A[1]);
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnge_sd (__m128d __A, __m128d __B)
-{
- __v2df a, b, c;
- a = vec_splats (__A[0]);
- b = vec_splats (__B[0]);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpnge_sd(__m128d __A, __m128d __B) {
+ __v2df __a, __b, __c;
+ __a = vec_splats(__A[0]);
+ __b = vec_splats(__B[0]);
/* Not greater than or equal is just less than. */
- c = (__v2df) vec_cmplt(a, b);
- return (__m128d) _mm_setr_pd (c[0], __A[1]);
+ __c = (__v2df)vec_cmplt(__a, __b);
+ return (__m128d)_mm_setr_pd(__c[0], __A[1]);
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpord_sd (__m128d __A, __m128d __B)
-{
- __v2df r;
- r = (__v2df)_mm_cmpord_pd (vec_splats (__A[0]), vec_splats (__B[0]));
- return (__m128d) _mm_setr_pd (r[0], ((__v2df)__A)[1]);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpord_sd(__m128d __A, __m128d __B) {
+ __v2df __r;
+ __r = (__v2df)_mm_cmpord_pd(vec_splats(__A[0]), vec_splats(__B[0]));
+ return (__m128d)_mm_setr_pd(__r[0], ((__v2df)__A)[1]);
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpunord_sd (__m128d __A, __m128d __B)
-{
- __v2df r;
- r = _mm_cmpunord_pd (vec_splats (__A[0]), vec_splats (__B[0]));
- return (__m128d) _mm_setr_pd (r[0], __A[1]);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpunord_sd(__m128d __A, __m128d __B) {
+ __v2df __r;
+ __r = _mm_cmpunord_pd(vec_splats(__A[0]), vec_splats(__B[0]));
+ return (__m128d)_mm_setr_pd(__r[0], __A[1]);
}
/* FIXME
@@ -581,1740 +574,1689 @@ _mm_cmpunord_sd (__m128d __A, __m128d __B)
Technically __mm_comieq_sp et all should be using the ordered
compare and signal for QNaNs. The __mm_ucomieq_sd et all should
be OK. */
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comieq_sd (__m128d __A, __m128d __B)
-{
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_comieq_sd(__m128d __A, __m128d __B) {
return (__A[0] == __B[0]);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comilt_sd (__m128d __A, __m128d __B)
-{
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_comilt_sd(__m128d __A, __m128d __B) {
return (__A[0] < __B[0]);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comile_sd (__m128d __A, __m128d __B)
-{
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_comile_sd(__m128d __A, __m128d __B) {
return (__A[0] <= __B[0]);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comigt_sd (__m128d __A, __m128d __B)
-{
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_comigt_sd(__m128d __A, __m128d __B) {
return (__A[0] > __B[0]);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comige_sd (__m128d __A, __m128d __B)
-{
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_comige_sd(__m128d __A, __m128d __B) {
return (__A[0] >= __B[0]);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comineq_sd (__m128d __A, __m128d __B)
-{
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_comineq_sd(__m128d __A, __m128d __B) {
return (__A[0] != __B[0]);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomieq_sd (__m128d __A, __m128d __B)
-{
- return (__A[0] == __B[0]);
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_ucomieq_sd(__m128d __A, __m128d __B) {
+ return (__A[0] == __B[0]);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomilt_sd (__m128d __A, __m128d __B)
-{
- return (__A[0] < __B[0]);
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_ucomilt_sd(__m128d __A, __m128d __B) {
+ return (__A[0] < __B[0]);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomile_sd (__m128d __A, __m128d __B)
-{
- return (__A[0] <= __B[0]);
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_ucomile_sd(__m128d __A, __m128d __B) {
+ return (__A[0] <= __B[0]);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomigt_sd (__m128d __A, __m128d __B)
-{
- return (__A[0] > __B[0]);
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_ucomigt_sd(__m128d __A, __m128d __B) {
+ return (__A[0] > __B[0]);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomige_sd (__m128d __A, __m128d __B)
-{
- return (__A[0] >= __B[0]);
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_ucomige_sd(__m128d __A, __m128d __B) {
+ return (__A[0] >= __B[0]);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomineq_sd (__m128d __A, __m128d __B)
-{
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_ucomineq_sd(__m128d __A, __m128d __B) {
return (__A[0] != __B[0]);
}
/* Create a vector of Qi, where i is the element number. */
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_epi64x (long long __q1, long long __q0)
-{
- return __extension__ (__m128i)(__v2di){ __q0, __q1 };
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set_epi64x(long long __q1, long long __q0) {
+ return __extension__(__m128i)(__v2di){__q0, __q1};
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_epi64 (__m64 __q1, __m64 __q0)
-{
- return _mm_set_epi64x ((long long)__q1, (long long)__q0);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set_epi64(__m64 __q1, __m64 __q0) {
+ return _mm_set_epi64x((long long)__q1, (long long)__q0);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_epi32 (int __q3, int __q2, int __q1, int __q0)
-{
- return __extension__ (__m128i)(__v4si){ __q0, __q1, __q2, __q3 };
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set_epi32(int __q3, int __q2, int __q1, int __q0) {
+ return __extension__(__m128i)(__v4si){__q0, __q1, __q2, __q3};
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_epi16 (short __q7, short __q6, short __q5, short __q4,
- short __q3, short __q2, short __q1, short __q0)
-{
- return __extension__ (__m128i)(__v8hi){
- __q0, __q1, __q2, __q3, __q4, __q5, __q6, __q7 };
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set_epi16(short __q7, short __q6, short __q5, short __q4, short __q3,
+ short __q2, short __q1, short __q0) {
+ return __extension__(__m128i)(__v8hi){__q0, __q1, __q2, __q3,
+ __q4, __q5, __q6, __q7};
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_epi8 (char __q15, char __q14, char __q13, char __q12,
- char __q11, char __q10, char __q09, char __q08,
- char __q07, char __q06, char __q05, char __q04,
- char __q03, char __q02, char __q01, char __q00)
-{
- return __extension__ (__m128i)(__v16qi){
- __q00, __q01, __q02, __q03, __q04, __q05, __q06, __q07,
- __q08, __q09, __q10, __q11, __q12, __q13, __q14, __q15
- };
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set_epi8(char __q15, char __q14, char __q13, char __q12, char __q11,
+ char __q10, char __q09, char __q08, char __q07, char __q06,
+ char __q05, char __q04, char __q03, char __q02, char __q01,
+ char __q00) {
+ return __extension__(__m128i)(__v16qi){
+ __q00, __q01, __q02, __q03, __q04, __q05, __q06, __q07,
+ __q08, __q09, __q10, __q11, __q12, __q13, __q14, __q15};
}
/* Set all of the elements of the vector to A. */
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set1_epi64x (long long __A)
-{
- return _mm_set_epi64x (__A, __A);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set1_epi64x(long long __A) {
+ return _mm_set_epi64x(__A, __A);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set1_epi64 (__m64 __A)
-{
- return _mm_set_epi64 (__A, __A);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set1_epi64(__m64 __A) {
+ return _mm_set_epi64(__A, __A);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set1_epi32 (int __A)
-{
- return _mm_set_epi32 (__A, __A, __A, __A);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set1_epi32(int __A) {
+ return _mm_set_epi32(__A, __A, __A, __A);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set1_epi16 (short __A)
-{
- return _mm_set_epi16 (__A, __A, __A, __A, __A, __A, __A, __A);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set1_epi16(short __A) {
+ return _mm_set_epi16(__A, __A, __A, __A, __A, __A, __A, __A);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set1_epi8 (char __A)
-{
- return _mm_set_epi8 (__A, __A, __A, __A, __A, __A, __A, __A,
- __A, __A, __A, __A, __A, __A, __A, __A);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set1_epi8(char __A) {
+ return _mm_set_epi8(__A, __A, __A, __A, __A, __A, __A, __A, __A, __A, __A,
+ __A, __A, __A, __A, __A);
}
/* Create a vector of Qi, where i is the element number.
The parameter order is reversed from the _mm_set_epi* functions. */
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_setr_epi64 (__m64 __q0, __m64 __q1)
-{
- return _mm_set_epi64 (__q1, __q0);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_setr_epi64(__m64 __q0, __m64 __q1) {
+ return _mm_set_epi64(__q1, __q0);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_setr_epi32 (int __q0, int __q1, int __q2, int __q3)
-{
- return _mm_set_epi32 (__q3, __q2, __q1, __q0);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_setr_epi32(int __q0, int __q1, int __q2, int __q3) {
+ return _mm_set_epi32(__q3, __q2, __q1, __q0);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_setr_epi16 (short __q0, short __q1, short __q2, short __q3,
- short __q4, short __q5, short __q6, short __q7)
-{
- return _mm_set_epi16 (__q7, __q6, __q5, __q4, __q3, __q2, __q1, __q0);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_setr_epi16(short __q0, short __q1, short __q2, short __q3, short __q4,
+ short __q5, short __q6, short __q7) {
+ return _mm_set_epi16(__q7, __q6, __q5, __q4, __q3, __q2, __q1, __q0);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_setr_epi8 (char __q00, char __q01, char __q02, char __q03,
- char __q04, char __q05, char __q06, char __q07,
- char __q08, char __q09, char __q10, char __q11,
- char __q12, char __q13, char __q14, char __q15)
-{
- return _mm_set_epi8 (__q15, __q14, __q13, __q12, __q11, __q10, __q09, __q08,
- __q07, __q06, __q05, __q04, __q03, __q02, __q01, __q00);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_setr_epi8(char __q00, char __q01, char __q02, char __q03, char __q04,
+ char __q05, char __q06, char __q07, char __q08, char __q09,
+ char __q10, char __q11, char __q12, char __q13, char __q14,
+ char __q15) {
+ return _mm_set_epi8(__q15, __q14, __q13, __q12, __q11, __q10, __q09, __q08,
+ __q07, __q06, __q05, __q04, __q03, __q02, __q01, __q00);
}
/* Create a vector with element 0 as *P and the rest zero. */
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_load_si128 (__m128i const *__P)
-{
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_load_si128(__m128i const *__P) {
return *__P;
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loadu_si128 (__m128i_u const *__P)
-{
- return (__m128i) (vec_vsx_ld(0, (signed int const *)__P));
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_loadu_si128(__m128i_u const *__P) {
+ return (__m128i)(vec_vsx_ld(0, (signed int const *)__P));
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loadl_epi64 (__m128i_u const *__P)
-{
- return _mm_set_epi64 ((__m64)0LL, *(__m64 *)__P);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_loadl_epi64(__m128i_u const *__P) {
+ return _mm_set_epi64((__m64)0LL, *(__m64 *)__P);
}
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_store_si128 (__m128i *__P, __m128i __B)
-{
- vec_st ((__v16qu) __B, 0, (__v16qu*)__P);
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_store_si128(__m128i *__P, __m128i __B) {
+ vec_st((__v16qu)__B, 0, (__v16qu *)__P);
}
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_storeu_si128 (__m128i_u *__P, __m128i __B)
-{
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_storeu_si128(__m128i_u *__P, __m128i __B) {
*__P = __B;
}
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_storel_epi64 (__m128i_u *__P, __m128i __B)
-{
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_storel_epi64(__m128i_u *__P, __m128i __B) {
*(long long *)__P = ((__v2di)__B)[0];
}
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_movepi64_pi64 (__m128i_u __B)
-{
- return (__m64) ((__v2di)__B)[0];
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_movepi64_pi64(__m128i_u __B) {
+ return (__m64)((__v2di)__B)[0];
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_movpi64_epi64 (__m64 __A)
-{
- return _mm_set_epi64 ((__m64)0LL, __A);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_movpi64_epi64(__m64 __A) {
+ return _mm_set_epi64((__m64)0LL, __A);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_move_epi64 (__m128i __A)
-{
- return _mm_set_epi64 ((__m64)0LL, (__m64)__A[0]);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_move_epi64(__m128i __A) {
+ return _mm_set_epi64((__m64)0LL, (__m64)__A[0]);
}
/* Create an undefined vector. */
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_undefined_si128 (void)
-{
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_undefined_si128(void) {
__m128i __Y = __Y;
return __Y;
}
/* Create a vector of zeros. */
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_setzero_si128 (void)
-{
- return __extension__ (__m128i)(__v4si){ 0, 0, 0, 0 };
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_setzero_si128(void) {
+ return __extension__(__m128i)(__v4si){0, 0, 0, 0};
}
#ifdef _ARCH_PWR8
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtepi32_pd (__m128i __A)
-{
- __v2di val;
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtepi32_pd(__m128i __A) {
+ __v2di __val;
/* For LE need to generate Vector Unpack Low Signed Word.
Which is generated from unpackh. */
- val = (__v2di)vec_unpackh ((__v4si)__A);
+ __val = (__v2di)vec_unpackh((__v4si)__A);
- return (__m128d)vec_ctf (val, 0);
+ return (__m128d)vec_ctf(__val, 0);
}
#endif
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtepi32_ps (__m128i __A)
-{
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtepi32_ps(__m128i __A) {
return ((__m128)vec_ctf((__v4si)__A, 0));
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtpd_epi32 (__m128d __A)
-{
- __v2df rounded = vec_rint (__A);
- __v4si result, temp;
- const __v4si vzero =
- { 0, 0, 0, 0 };
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtpd_epi32(__m128d __A) {
+ __v2df __rounded = vec_rint(__A);
+ __v4si __result, __temp;
+ const __v4si __vzero = {0, 0, 0, 0};
/* VSX Vector truncate Double-Precision to integer and Convert to
Signed Integer Word format with Saturate. */
- __asm__(
- "xvcvdpsxws %x0,%x1"
- : "=wa" (temp)
- : "wa" (rounded)
- : );
+ __asm__("xvcvdpsxws %x0,%x1" : "=wa"(__temp) : "wa"(__rounded) :);
#ifdef _ARCH_PWR8
- temp = vec_mergeo (temp, temp);
- result = (__v4si) vec_vpkudum ((__vector long long) temp,
- (__vector long long) vzero);
+#ifdef __LITTLE_ENDIAN__
+ __temp = vec_mergeo(__temp, __temp);
+#else
+ __temp = vec_mergee(__temp, __temp);
+#endif
+ __result = (__v4si)vec_vpkudum((__vector long long)__temp,
+ (__vector long long)__vzero);
#else
{
- const __v16qu pkperm = {0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0a, 0x0b,
- 0x14, 0x15, 0x16, 0x17, 0x1c, 0x1d, 0x1e, 0x1f };
- result = (__v4si) vec_perm ((__v16qu) temp, (__v16qu) vzero, pkperm);
+ const __v16qu __pkperm = {0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0a, 0x0b,
+ 0x14, 0x15, 0x16, 0x17, 0x1c, 0x1d, 0x1e, 0x1f};
+ __result = (__v4si)vec_perm((__v16qu)__temp, (__v16qu)__vzero, __pkperm);
}
#endif
- return (__m128i) result;
+ return (__m128i)__result;
}
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtpd_pi32 (__m128d __A)
-{
- __m128i result = _mm_cvtpd_epi32(__A);
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtpd_pi32(__m128d __A) {
+ __m128i __result = _mm_cvtpd_epi32(__A);
- return (__m64) result[0];
+ return (__m64)__result[0];
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtpd_ps (__m128d __A)
-{
- __v4sf result;
- __v4si temp;
- const __v4si vzero = { 0, 0, 0, 0 };
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtpd_ps(__m128d __A) {
+ __v4sf __result;
+ __v4si __temp;
+ const __v4si __vzero = {0, 0, 0, 0};
- __asm__(
- "xvcvdpsp %x0,%x1"
- : "=wa" (temp)
- : "wa" (__A)
- : );
+ __asm__("xvcvdpsp %x0,%x1" : "=wa"(__temp) : "wa"(__A) :);
#ifdef _ARCH_PWR8
- temp = vec_mergeo (temp, temp);
- result = (__v4sf) vec_vpkudum ((__vector long long) temp,
- (__vector long long) vzero);
+#ifdef __LITTLE_ENDIAN__
+ __temp = vec_mergeo(__temp, __temp);
+#else
+ __temp = vec_mergee(__temp, __temp);
+#endif
+ __result = (__v4sf)vec_vpkudum((__vector long long)__temp,
+ (__vector long long)__vzero);
#else
{
- const __v16qu pkperm = {0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0a, 0x0b,
- 0x14, 0x15, 0x16, 0x17, 0x1c, 0x1d, 0x1e, 0x1f };
- result = (__v4sf) vec_perm ((__v16qu) temp, (__v16qu) vzero, pkperm);
+ const __v16qu __pkperm = {0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0a, 0x0b,
+ 0x14, 0x15, 0x16, 0x17, 0x1c, 0x1d, 0x1e, 0x1f};
+ __result = (__v4sf)vec_perm((__v16qu)__temp, (__v16qu)__vzero, __pkperm);
}
#endif
- return ((__m128)result);
+ return ((__m128)__result);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttpd_epi32 (__m128d __A)
-{
- __v4si result;
- __v4si temp;
- const __v4si vzero = { 0, 0, 0, 0 };
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvttpd_epi32(__m128d __A) {
+ __v4si __result;
+ __v4si __temp;
+ const __v4si __vzero = {0, 0, 0, 0};
/* VSX Vector truncate Double-Precision to integer and Convert to
Signed Integer Word format with Saturate. */
- __asm__(
- "xvcvdpsxws %x0,%x1"
- : "=wa" (temp)
- : "wa" (__A)
- : );
+ __asm__("xvcvdpsxws %x0,%x1" : "=wa"(__temp) : "wa"(__A) :);
#ifdef _ARCH_PWR8
- temp = vec_mergeo (temp, temp);
- result = (__v4si) vec_vpkudum ((__vector long long) temp,
- (__vector long long) vzero);
+#ifdef __LITTLE_ENDIAN__
+ __temp = vec_mergeo(__temp, __temp);
+#else
+ __temp = vec_mergee(__temp, __temp);
+#endif
+ __result = (__v4si)vec_vpkudum((__vector long long)__temp,
+ (__vector long long)__vzero);
#else
{
- const __v16qu pkperm = {0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0a, 0x0b,
- 0x14, 0x15, 0x16, 0x17, 0x1c, 0x1d, 0x1e, 0x1f };
- result = (__v4si) vec_perm ((__v16qu) temp, (__v16qu) vzero, pkperm);
+ const __v16qu __pkperm = {0x00, 0x01, 0x02, 0x03, 0x08, 0x09, 0x0a, 0x0b,
+ 0x14, 0x15, 0x16, 0x17, 0x1c, 0x1d, 0x1e, 0x1f};
+ __result = (__v4si)vec_perm((__v16qu)__temp, (__v16qu)__vzero, __pkperm);
}
#endif
- return ((__m128i) result);
+ return ((__m128i)__result);
}
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttpd_pi32 (__m128d __A)
-{
- __m128i result = _mm_cvttpd_epi32 (__A);
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvttpd_pi32(__m128d __A) {
+ __m128i __result = _mm_cvttpd_epi32(__A);
- return (__m64) result[0];
+ return (__m64)__result[0];
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi128_si32 (__m128i __A)
-{
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsi128_si32(__m128i __A) {
return ((__v4si)__A)[0];
}
#ifdef _ARCH_PWR8
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtpi32_pd (__m64 __A)
-{
- __v4si temp;
- __v2di tmp2;
- __v2df result;
-
- temp = (__v4si)vec_splats (__A);
- tmp2 = (__v2di)vec_unpackl (temp);
- result = vec_ctf ((__vector signed long long) tmp2, 0);
- return (__m128d)result;
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtpi32_pd(__m64 __A) {
+ __v4si __temp;
+ __v2di __tmp2;
+ __v2df __result;
+
+ __temp = (__v4si)vec_splats(__A);
+ __tmp2 = (__v2di)vec_unpackl(__temp);
+ __result = vec_ctf((__vector signed long long)__tmp2, 0);
+ return (__m128d)__result;
}
#endif
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtps_epi32 (__m128 __A)
-{
- __v4sf rounded;
- __v4si result;
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtps_epi32(__m128 __A) {
+ __v4sf __rounded;
+ __v4si __result;
- rounded = vec_rint((__v4sf) __A);
- result = vec_cts (rounded, 0);
- return (__m128i) result;
+ __rounded = vec_rint((__v4sf)__A);
+ __result = vec_cts(__rounded, 0);
+ return (__m128i)__result;
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttps_epi32 (__m128 __A)
-{
- __v4si result;
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvttps_epi32(__m128 __A) {
+ __v4si __result;
- result = vec_cts ((__v4sf) __A, 0);
- return (__m128i) result;
+ __result = vec_cts((__v4sf)__A, 0);
+ return (__m128i)__result;
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtps_pd (__m128 __A)
-{
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtps_pd(__m128 __A) {
/* Check if vec_doubleh is defined by <altivec.h>. If so use that. */
#ifdef vec_doubleh
- return (__m128d) vec_doubleh ((__v4sf)__A);
+ return (__m128d)vec_doubleh((__v4sf)__A);
#else
/* Otherwise the compiler is not current and so need to generate the
equivalent code. */
- __v4sf a = (__v4sf)__A;
- __v4sf temp;
- __v2df result;
+ __v4sf __a = (__v4sf)__A;
+ __v4sf __temp;
+ __v2df __result;
#ifdef __LITTLE_ENDIAN__
/* The input float values are in elements {[0], [1]} but the convert
instruction needs them in elements {[1], [3]}, So we use two
shift left double vector word immediates to get the elements
lined up. */
- temp = __builtin_vsx_xxsldwi (a, a, 3);
- temp = __builtin_vsx_xxsldwi (a, temp, 2);
+ __temp = __builtin_vsx_xxsldwi(__a, __a, 3);
+ __temp = __builtin_vsx_xxsldwi(__a, __temp, 2);
#else
/* The input float values are in elements {[0], [1]} but the convert
instruction needs them in elements {[0], [2]}, So we use two
shift left double vector word immediates to get the elements
lined up. */
- temp = vec_vmrghw (a, a);
+ __temp = vec_vmrghw(__a, __a);
#endif
- __asm__(
- " xvcvspdp %x0,%x1"
- : "=wa" (result)
- : "wa" (temp)
- : );
- return (__m128d) result;
+ __asm__(" xvcvspdp %x0,%x1" : "=wa"(__result) : "wa"(__temp) :);
+ return (__m128d)__result;
#endif
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsd_si32 (__m128d __A)
-{
- __v2df rounded = vec_rint((__v2df) __A);
- int result = ((__v2df)rounded)[0];
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsd_si32(__m128d __A) {
+ __v2df __rounded = vec_rint((__v2df)__A);
+ int __result = ((__v2df)__rounded)[0];
- return result;
+ return __result;
}
/* Intel intrinsic. */
-extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsd_si64 (__m128d __A)
-{
- __v2df rounded = vec_rint ((__v2df) __A );
- long long result = ((__v2df) rounded)[0];
+extern __inline long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsd_si64(__m128d __A) {
+ __v2df __rounded = vec_rint((__v2df)__A);
+ long long __result = ((__v2df)__rounded)[0];
- return result;
+ return __result;
}
/* Microsoft intrinsic. */
-extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsd_si64x (__m128d __A)
-{
- return _mm_cvtsd_si64 ((__v2df)__A);
+extern __inline long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsd_si64x(__m128d __A) {
+ return _mm_cvtsd_si64((__v2df)__A);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttsd_si32 (__m128d __A)
-{
- int result = ((__v2df)__A)[0];
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvttsd_si32(__m128d __A) {
+ int __result = ((__v2df)__A)[0];
- return result;
+ return __result;
}
/* Intel intrinsic. */
-extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttsd_si64 (__m128d __A)
-{
- long long result = ((__v2df)__A)[0];
+extern __inline long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvttsd_si64(__m128d __A) {
+ long long __result = ((__v2df)__A)[0];
- return result;
+ return __result;
}
/* Microsoft intrinsic. */
-extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttsd_si64x (__m128d __A)
-{
- return _mm_cvttsd_si64 (__A);
+extern __inline long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvttsd_si64x(__m128d __A) {
+ return _mm_cvttsd_si64(__A);
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsd_ss (__m128 __A, __m128d __B)
-{
- __v4sf result = (__v4sf)__A;
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsd_ss(__m128 __A, __m128d __B) {
+ __v4sf __result = (__v4sf)__A;
#ifdef __LITTLE_ENDIAN__
- __v4sf temp_s;
+ __v4sf __temp_s;
/* Copy double element[0] to element [1] for conversion. */
- __v2df temp_b = vec_splat((__v2df)__B, 0);
+ __v2df __temp_b = vec_splat((__v2df)__B, 0);
/* Pre-rotate __A left 3 (logically right 1) elements. */
- result = __builtin_vsx_xxsldwi (result, result, 3);
+ __result = __builtin_vsx_xxsldwi(__result, __result, 3);
/* Convert double to single float scalar in a vector. */
- __asm__(
- "xscvdpsp %x0,%x1"
- : "=wa" (temp_s)
- : "wa" (temp_b)
- : );
+ __asm__("xscvdpsp %x0,%x1" : "=wa"(__temp_s) : "wa"(__temp_b) :);
/* Shift the resulting scalar into vector element [0]. */
- result = __builtin_vsx_xxsldwi (result, temp_s, 1);
+ __result = __builtin_vsx_xxsldwi(__result, __temp_s, 1);
#else
- result [0] = ((__v2df)__B)[0];
+ __result[0] = ((__v2df)__B)[0];
#endif
- return (__m128) result;
+ return (__m128)__result;
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi32_sd (__m128d __A, int __B)
-{
- __v2df result = (__v2df)__A;
- double db = __B;
- result [0] = db;
- return (__m128d)result;
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsi32_sd(__m128d __A, int __B) {
+ __v2df __result = (__v2df)__A;
+ double __db = __B;
+ __result[0] = __db;
+ return (__m128d)__result;
}
/* Intel intrinsic. */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi64_sd (__m128d __A, long long __B)
-{
- __v2df result = (__v2df)__A;
- double db = __B;
- result [0] = db;
- return (__m128d)result;
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsi64_sd(__m128d __A, long long __B) {
+ __v2df __result = (__v2df)__A;
+ double __db = __B;
+ __result[0] = __db;
+ return (__m128d)__result;
}
/* Microsoft intrinsic. */
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi64x_sd (__m128d __A, long long __B)
-{
- return _mm_cvtsi64_sd (__A, __B);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsi64x_sd(__m128d __A, long long __B) {
+ return _mm_cvtsi64_sd(__A, __B);
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtss_sd (__m128d __A, __m128 __B)
-{
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtss_sd(__m128d __A, __m128 __B) {
#ifdef __LITTLE_ENDIAN__
/* Use splat to move element [0] into position for the convert. */
- __v4sf temp = vec_splat ((__v4sf)__B, 0);
- __v2df res;
+ __v4sf __temp = vec_splat((__v4sf)__B, 0);
+ __v2df __res;
/* Convert single float scalar to double in a vector. */
- __asm__(
- "xscvspdp %x0,%x1"
- : "=wa" (res)
- : "wa" (temp)
- : );
- return (__m128d) vec_mergel (res, (__v2df)__A);
+ __asm__("xscvspdp %x0,%x1" : "=wa"(__res) : "wa"(__temp) :);
+ return (__m128d)vec_mergel(__res, (__v2df)__A);
#else
- __v2df res = (__v2df)__A;
- res [0] = ((__v4sf)__B) [0];
- return (__m128d) res;
+ __v2df __res = (__v2df)__A;
+ __res[0] = ((__v4sf)__B)[0];
+ return (__m128d)__res;
#endif
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_shuffle_pd(__m128d __A, __m128d __B, const int __mask)
-{
- __vector double result;
- const int litmsk = __mask & 0x3;
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_shuffle_pd(__m128d __A, __m128d __B, const int __mask) {
+ __vector double __result;
+ const int __litmsk = __mask & 0x3;
- if (litmsk == 0)
- result = vec_mergeh (__A, __B);
+ if (__litmsk == 0)
+ __result = vec_mergeh(__A, __B);
#if __GNUC__ < 6
- else if (litmsk == 1)
- result = vec_xxpermdi (__B, __A, 2);
- else if (litmsk == 2)
- result = vec_xxpermdi (__B, __A, 1);
+ else if (__litmsk == 1)
+ __result = vec_xxpermdi(__B, __A, 2);
+ else if (__litmsk == 2)
+ __result = vec_xxpermdi(__B, __A, 1);
#else
- else if (litmsk == 1)
- result = vec_xxpermdi (__A, __B, 2);
- else if (litmsk == 2)
- result = vec_xxpermdi (__A, __B, 1);
+ else if (__litmsk == 1)
+ __result = vec_xxpermdi(__A, __B, 2);
+ else if (__litmsk == 2)
+ __result = vec_xxpermdi(__A, __B, 1);
#endif
else
- result = vec_mergel (__A, __B);
+ __result = vec_mergel(__A, __B);
- return result;
+ return __result;
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpackhi_pd (__m128d __A, __m128d __B)
-{
- return (__m128d) vec_mergel ((__v2df)__A, (__v2df)__B);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_unpackhi_pd(__m128d __A, __m128d __B) {
+ return (__m128d)vec_mergel((__v2df)__A, (__v2df)__B);
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpacklo_pd (__m128d __A, __m128d __B)
-{
- return (__m128d) vec_mergeh ((__v2df)__A, (__v2df)__B);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_unpacklo_pd(__m128d __A, __m128d __B) {
+ return (__m128d)vec_mergeh((__v2df)__A, (__v2df)__B);
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loadh_pd (__m128d __A, double const *__B)
-{
- __v2df result = (__v2df)__A;
- result [1] = *__B;
- return (__m128d)result;
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_loadh_pd(__m128d __A, double const *__B) {
+ __v2df __result = (__v2df)__A;
+ __result[1] = *__B;
+ return (__m128d)__result;
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loadl_pd (__m128d __A, double const *__B)
-{
- __v2df result = (__v2df)__A;
- result [0] = *__B;
- return (__m128d)result;
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_loadl_pd(__m128d __A, double const *__B) {
+ __v2df __result = (__v2df)__A;
+ __result[0] = *__B;
+ return (__m128d)__result;
}
#ifdef _ARCH_PWR8
/* Intrinsic functions that require PowerISA 2.07 minimum. */
/* Creates a 2-bit mask from the most significant bits of the DPFP values. */
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_movemask_pd (__m128d __A)
-{
- __vector unsigned long long result;
- static const __vector unsigned int perm_mask =
- {
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_movemask_pd(__m128d __A) {
+#ifdef _ARCH_PWR10
+ return vec_extractm((__v2du)__A);
+#else
+ __vector unsigned long long __result;
+ static const __vector unsigned int __perm_mask = {
#ifdef __LITTLE_ENDIAN__
- 0x80800040, 0x80808080, 0x80808080, 0x80808080
+ 0x80800040, 0x80808080, 0x80808080, 0x80808080
#else
0x80808080, 0x80808080, 0x80808080, 0x80804000
#endif
- };
+ };
- result = ((__vector unsigned long long)
- vec_vbpermq ((__vector unsigned char) __A,
- (__vector unsigned char) perm_mask));
+ __result = ((__vector unsigned long long)vec_vbpermq(
+ (__vector unsigned char)__A, (__vector unsigned char)__perm_mask));
#ifdef __LITTLE_ENDIAN__
- return result[1];
+ return __result[1];
#else
- return result[0];
+ return __result[0];
#endif
+#endif /* !_ARCH_PWR10 */
}
#endif /* _ARCH_PWR8 */
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_packs_epi16 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_packs ((__v8hi) __A, (__v8hi)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_packs_epi16(__m128i __A, __m128i __B) {
+ return (__m128i)vec_packs((__v8hi)__A, (__v8hi)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_packs_epi32 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_packs ((__v4si)__A, (__v4si)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_packs_epi32(__m128i __A, __m128i __B) {
+ return (__m128i)vec_packs((__v4si)__A, (__v4si)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_packus_epi16 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_packsu ((__v8hi) __A, (__v8hi)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_packus_epi16(__m128i __A, __m128i __B) {
+ return (__m128i)vec_packsu((__v8hi)__A, (__v8hi)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpackhi_epi8 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_mergel ((__v16qu)__A, (__v16qu)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_unpackhi_epi8(__m128i __A, __m128i __B) {
+ return (__m128i)vec_mergel((__v16qu)__A, (__v16qu)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpackhi_epi16 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_mergel ((__v8hu)__A, (__v8hu)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_unpackhi_epi16(__m128i __A, __m128i __B) {
+ return (__m128i)vec_mergel((__v8hu)__A, (__v8hu)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpackhi_epi32 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_mergel ((__v4su)__A, (__v4su)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_unpackhi_epi32(__m128i __A, __m128i __B) {
+ return (__m128i)vec_mergel((__v4su)__A, (__v4su)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpackhi_epi64 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_mergel ((__vector long long) __A,
- (__vector long long) __B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_unpackhi_epi64(__m128i __A, __m128i __B) {
+ return (__m128i)vec_mergel((__vector long long)__A, (__vector long long)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpacklo_epi8 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_mergeh ((__v16qu)__A, (__v16qu)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_unpacklo_epi8(__m128i __A, __m128i __B) {
+ return (__m128i)vec_mergeh((__v16qu)__A, (__v16qu)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpacklo_epi16 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_mergeh ((__v8hi)__A, (__v8hi)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_unpacklo_epi16(__m128i __A, __m128i __B) {
+ return (__m128i)vec_mergeh((__v8hi)__A, (__v8hi)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpacklo_epi32 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_mergeh ((__v4si)__A, (__v4si)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_unpacklo_epi32(__m128i __A, __m128i __B) {
+ return (__m128i)vec_mergeh((__v4si)__A, (__v4si)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpacklo_epi64 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_mergeh ((__vector long long) __A,
- (__vector long long) __B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_unpacklo_epi64(__m128i __A, __m128i __B) {
+ return (__m128i)vec_mergeh((__vector long long)__A, (__vector long long)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_add_epi8 (__m128i __A, __m128i __B)
-{
- return (__m128i) ((__v16qu)__A + (__v16qu)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_add_epi8(__m128i __A, __m128i __B) {
+ return (__m128i)((__v16qu)__A + (__v16qu)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_add_epi16 (__m128i __A, __m128i __B)
-{
- return (__m128i) ((__v8hu)__A + (__v8hu)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_add_epi16(__m128i __A, __m128i __B) {
+ return (__m128i)((__v8hu)__A + (__v8hu)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_add_epi32 (__m128i __A, __m128i __B)
-{
- return (__m128i) ((__v4su)__A + (__v4su)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_add_epi32(__m128i __A, __m128i __B) {
+ return (__m128i)((__v4su)__A + (__v4su)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_add_epi64 (__m128i __A, __m128i __B)
-{
- return (__m128i) ((__v2du)__A + (__v2du)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_add_epi64(__m128i __A, __m128i __B) {
+ return (__m128i)((__v2du)__A + (__v2du)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_adds_epi8 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_adds ((__v16qi)__A, (__v16qi)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_adds_epi8(__m128i __A, __m128i __B) {
+ return (__m128i)vec_adds((__v16qi)__A, (__v16qi)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_adds_epi16 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_adds ((__v8hi)__A, (__v8hi)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_adds_epi16(__m128i __A, __m128i __B) {
+ return (__m128i)vec_adds((__v8hi)__A, (__v8hi)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_adds_epu8 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_adds ((__v16qu)__A, (__v16qu)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_adds_epu8(__m128i __A, __m128i __B) {
+ return (__m128i)vec_adds((__v16qu)__A, (__v16qu)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_adds_epu16 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_adds ((__v8hu)__A, (__v8hu)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_adds_epu16(__m128i __A, __m128i __B) {
+ return (__m128i)vec_adds((__v8hu)__A, (__v8hu)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sub_epi8 (__m128i __A, __m128i __B)
-{
- return (__m128i) ((__v16qu)__A - (__v16qu)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sub_epi8(__m128i __A, __m128i __B) {
+ return (__m128i)((__v16qu)__A - (__v16qu)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sub_epi16 (__m128i __A, __m128i __B)
-{
- return (__m128i) ((__v8hu)__A - (__v8hu)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sub_epi16(__m128i __A, __m128i __B) {
+ return (__m128i)((__v8hu)__A - (__v8hu)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sub_epi32 (__m128i __A, __m128i __B)
-{
- return (__m128i) ((__v4su)__A - (__v4su)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sub_epi32(__m128i __A, __m128i __B) {
+ return (__m128i)((__v4su)__A - (__v4su)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sub_epi64 (__m128i __A, __m128i __B)
-{
- return (__m128i) ((__v2du)__A - (__v2du)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sub_epi64(__m128i __A, __m128i __B) {
+ return (__m128i)((__v2du)__A - (__v2du)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_subs_epi8 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_subs ((__v16qi)__A, (__v16qi)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_subs_epi8(__m128i __A, __m128i __B) {
+ return (__m128i)vec_subs((__v16qi)__A, (__v16qi)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_subs_epi16 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_subs ((__v8hi)__A, (__v8hi)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_subs_epi16(__m128i __A, __m128i __B) {
+ return (__m128i)vec_subs((__v8hi)__A, (__v8hi)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_subs_epu8 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_subs ((__v16qu)__A, (__v16qu)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_subs_epu8(__m128i __A, __m128i __B) {
+ return (__m128i)vec_subs((__v16qu)__A, (__v16qu)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_subs_epu16 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_subs ((__v8hu)__A, (__v8hu)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_subs_epu16(__m128i __A, __m128i __B) {
+ return (__m128i)vec_subs((__v8hu)__A, (__v8hu)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_madd_epi16 (__m128i __A, __m128i __B)
-{
- __vector signed int zero = {0, 0, 0, 0};
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_madd_epi16(__m128i __A, __m128i __B) {
+ __vector signed int __zero = {0, 0, 0, 0};
- return (__m128i) vec_vmsumshm ((__v8hi)__A, (__v8hi)__B, zero);
+ return (__m128i)vec_vmsumshm((__v8hi)__A, (__v8hi)__B, __zero);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mulhi_epi16 (__m128i __A, __m128i __B)
-{
- __vector signed int w0, w1;
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_mulhi_epi16(__m128i __A, __m128i __B) {
+ __vector signed int __w0, __w1;
- __vector unsigned char xform1 = {
+ __vector unsigned char __xform1 = {
#ifdef __LITTLE_ENDIAN__
- 0x02, 0x03, 0x12, 0x13, 0x06, 0x07, 0x16, 0x17,
- 0x0A, 0x0B, 0x1A, 0x1B, 0x0E, 0x0F, 0x1E, 0x1F
+ 0x02, 0x03, 0x12, 0x13, 0x06, 0x07, 0x16, 0x17, 0x0A,
+ 0x0B, 0x1A, 0x1B, 0x0E, 0x0F, 0x1E, 0x1F
#else
- 0x00, 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15,
- 0x08, 0x09, 0x18, 0x19, 0x0C, 0x0D, 0x1C, 0x1D
+ 0x00, 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15, 0x08,
+ 0x09, 0x18, 0x19, 0x0C, 0x0D, 0x1C, 0x1D
#endif
- };
+ };
- w0 = vec_vmulesh ((__v8hi)__A, (__v8hi)__B);
- w1 = vec_vmulosh ((__v8hi)__A, (__v8hi)__B);
- return (__m128i) vec_perm (w0, w1, xform1);
+ __w0 = vec_vmulesh((__v8hi)__A, (__v8hi)__B);
+ __w1 = vec_vmulosh((__v8hi)__A, (__v8hi)__B);
+ return (__m128i)vec_perm(__w0, __w1, __xform1);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mullo_epi16 (__m128i __A, __m128i __B)
-{
- return (__m128i) ((__v8hi)__A * (__v8hi)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_mullo_epi16(__m128i __A, __m128i __B) {
+ return (__m128i)((__v8hi)__A * (__v8hi)__B);
}
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mul_su32 (__m64 __A, __m64 __B)
-{
- unsigned int a = __A;
- unsigned int b = __B;
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_mul_su32(__m64 __A, __m64 __B) {
+ unsigned int __a = __A;
+ unsigned int __b = __B;
- return ((__m64)a * (__m64)b);
+ return ((__m64)__a * (__m64)__b);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mul_epu32 (__m128i __A, __m128i __B)
-{
+#ifdef _ARCH_PWR8
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_mul_epu32(__m128i __A, __m128i __B) {
#if __GNUC__ < 8
- __v2du result;
+ __v2du __result;
#ifdef __LITTLE_ENDIAN__
/* VMX Vector Multiply Odd Unsigned Word. */
- __asm__(
- "vmulouw %0,%1,%2"
- : "=v" (result)
- : "v" (__A), "v" (__B)
- : );
+ __asm__("vmulouw %0,%1,%2" : "=v"(__result) : "v"(__A), "v"(__B) :);
#else
/* VMX Vector Multiply Even Unsigned Word. */
- __asm__(
- "vmuleuw %0,%1,%2"
- : "=v" (result)
- : "v" (__A), "v" (__B)
- : );
+ __asm__("vmuleuw %0,%1,%2" : "=v"(__result) : "v"(__A), "v"(__B) :);
#endif
- return (__m128i) result;
+ return (__m128i)__result;
#else
- return (__m128i) vec_mule ((__v4su)__A, (__v4su)__B);
+ return (__m128i)vec_mule((__v4su)__A, (__v4su)__B);
#endif
}
+#endif
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_slli_epi16 (__m128i __A, int __B)
-{
- __v8hu lshift;
- __v8hi result = { 0, 0, 0, 0, 0, 0, 0, 0 };
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_slli_epi16(__m128i __A, int __B) {
+ __v8hu __lshift;
+ __v8hi __result = {0, 0, 0, 0, 0, 0, 0, 0};
- if (__B >= 0 && __B < 16)
- {
- if (__builtin_constant_p(__B))
- lshift = (__v8hu) vec_splat_s16(__B);
- else
- lshift = vec_splats ((unsigned short) __B);
+ if (__B >= 0 && __B < 16) {
+ if (__builtin_constant_p(__B))
+ __lshift = (__v8hu)vec_splat_s16(__B);
+ else
+ __lshift = vec_splats((unsigned short)__B);
- result = vec_sl ((__v8hi) __A, lshift);
- }
+ __result = vec_sl((__v8hi)__A, __lshift);
+ }
- return (__m128i) result;
+ return (__m128i)__result;
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_slli_epi32 (__m128i __A, int __B)
-{
- __v4su lshift;
- __v4si result = { 0, 0, 0, 0 };
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_slli_epi32(__m128i __A, int __B) {
+ __v4su __lshift;
+ __v4si __result = {0, 0, 0, 0};
- if (__B >= 0 && __B < 32)
- {
- if (__builtin_constant_p(__B) && __B < 16)
- lshift = (__v4su) vec_splat_s32(__B);
- else
- lshift = vec_splats ((unsigned int) __B);
+ if (__B >= 0 && __B < 32) {
+ if (__builtin_constant_p(__B) && __B < 16)
+ __lshift = (__v4su)vec_splat_s32(__B);
+ else
+ __lshift = vec_splats((unsigned int)__B);
- result = vec_sl ((__v4si) __A, lshift);
- }
+ __result = vec_sl((__v4si)__A, __lshift);
+ }
- return (__m128i) result;
+ return (__m128i)__result;
}
#ifdef _ARCH_PWR8
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_slli_epi64 (__m128i __A, int __B)
-{
- __v2du lshift;
- __v2di result = { 0, 0 };
-
- if (__B >= 0 && __B < 64)
- {
- if (__builtin_constant_p(__B) && __B < 16)
- lshift = (__v2du) vec_splat_s32(__B);
- else
- lshift = (__v2du) vec_splats ((unsigned int) __B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_slli_epi64(__m128i __A, int __B) {
+ __v2du __lshift;
+ __v2di __result = {0, 0};
+
+ if (__B >= 0 && __B < 64) {
+ if (__builtin_constant_p(__B) && __B < 16)
+ __lshift = (__v2du)vec_splat_s32(__B);
+ else
+ __lshift = (__v2du)vec_splats((unsigned int)__B);
- result = vec_sl ((__v2di) __A, lshift);
- }
+ __result = vec_sl((__v2di)__A, __lshift);
+ }
- return (__m128i) result;
+ return (__m128i)__result;
}
#endif
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_srai_epi16 (__m128i __A, int __B)
-{
- __v8hu rshift = { 15, 15, 15, 15, 15, 15, 15, 15 };
- __v8hi result;
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_srai_epi16(__m128i __A, int __B) {
+ __v8hu __rshift = {15, 15, 15, 15, 15, 15, 15, 15};
+ __v8hi __result;
- if (__B < 16)
- {
- if (__builtin_constant_p(__B))
- rshift = (__v8hu) vec_splat_s16(__B);
- else
- rshift = vec_splats ((unsigned short) __B);
- }
- result = vec_sra ((__v8hi) __A, rshift);
+ if (__B < 16) {
+ if (__builtin_constant_p(__B))
+ __rshift = (__v8hu)vec_splat_s16(__B);
+ else
+ __rshift = vec_splats((unsigned short)__B);
+ }
+ __result = vec_sra((__v8hi)__A, __rshift);
- return (__m128i) result;
+ return (__m128i)__result;
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_srai_epi32 (__m128i __A, int __B)
-{
- __v4su rshift = { 31, 31, 31, 31 };
- __v4si result;
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_srai_epi32(__m128i __A, int __B) {
+ __v4su __rshift = {31, 31, 31, 31};
+ __v4si __result;
- if (__B < 32)
- {
- if (__builtin_constant_p(__B))
- {
- if (__B < 16)
- rshift = (__v4su) vec_splat_s32(__B);
- else
- rshift = (__v4su) vec_splats((unsigned int)__B);
- }
+ if (__B < 32) {
+ if (__builtin_constant_p(__B)) {
+ if (__B < 16)
+ __rshift = (__v4su)vec_splat_s32(__B);
else
- rshift = vec_splats ((unsigned int) __B);
- }
- result = vec_sra ((__v4si) __A, rshift);
+ __rshift = (__v4su)vec_splats((unsigned int)__B);
+ } else
+ __rshift = vec_splats((unsigned int)__B);
+ }
+ __result = vec_sra((__v4si)__A, __rshift);
- return (__m128i) result;
+ return (__m128i)__result;
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_bslli_si128 (__m128i __A, const int __N)
-{
- __v16qu result;
- const __v16qu zeros = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_bslli_si128(__m128i __A, const int __N) {
+ __v16qu __result;
+ const __v16qu __zeros = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
if (__N < 16)
- result = vec_sld ((__v16qu) __A, zeros, __N);
+ __result = vec_sld((__v16qu)__A, __zeros, __N);
else
- result = zeros;
+ __result = __zeros;
- return (__m128i) result;
+ return (__m128i)__result;
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_bsrli_si128 (__m128i __A, const int __N)
-{
- __v16qu result;
- const __v16qu zeros = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_bsrli_si128(__m128i __A, const int __N) {
+ __v16qu __result;
+ const __v16qu __zeros = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
if (__N < 16)
#ifdef __LITTLE_ENDIAN__
if (__builtin_constant_p(__N))
/* Would like to use Vector Shift Left Double by Octet
- Immediate here to use the immediate form and avoid
- load of __N * 8 value into a separate VR. */
- result = vec_sld (zeros, (__v16qu) __A, (16 - __N));
+ Immediate here to use the immediate form and avoid
+ load of __N * 8 value into a separate VR. */
+ __result = vec_sld(__zeros, (__v16qu)__A, (16 - __N));
else
#endif
- {
- __v16qu shift = vec_splats((unsigned char)(__N*8));
+ {
+ __v16qu __shift = vec_splats((unsigned char)(__N * 8));
#ifdef __LITTLE_ENDIAN__
- result = vec_sro ((__v16qu)__A, shift);
+ __result = vec_sro((__v16qu)__A, __shift);
#else
- result = vec_slo ((__v16qu)__A, shift);
+ __result = vec_slo((__v16qu)__A, __shift);
#endif
- }
+ }
else
- result = zeros;
+ __result = __zeros;
- return (__m128i) result;
+ return (__m128i)__result;
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_srli_si128 (__m128i __A, const int __N)
-{
- return _mm_bsrli_si128 (__A, __N);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_srli_si128(__m128i __A, const int __N) {
+ return _mm_bsrli_si128(__A, __N);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_slli_si128 (__m128i __A, const int _imm5)
-{
- __v16qu result;
- const __v16qu zeros = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 };
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_slli_si128(__m128i __A, const int _imm5) {
+ __v16qu __result;
+ const __v16qu __zeros = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
if (_imm5 < 16)
#ifdef __LITTLE_ENDIAN__
- result = vec_sld ((__v16qu) __A, zeros, _imm5);
+ __result = vec_sld((__v16qu)__A, __zeros, _imm5);
#else
- result = vec_sld (zeros, (__v16qu) __A, (16 - _imm5));
+ __result = vec_sld(__zeros, (__v16qu)__A, (16 - _imm5));
#endif
else
- result = zeros;
+ __result = __zeros;
- return (__m128i) result;
+ return (__m128i)__result;
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_srli_epi16 (__m128i __A, int __B)
-{
- __v8hu rshift;
- __v8hi result = { 0, 0, 0, 0, 0, 0, 0, 0 };
+ _mm_srli_epi16(__m128i __A, int __B) {
+ __v8hu __rshift;
+ __v8hi __result = {0, 0, 0, 0, 0, 0, 0, 0};
- if (__B < 16)
- {
- if (__builtin_constant_p(__B))
- rshift = (__v8hu) vec_splat_s16(__B);
- else
- rshift = vec_splats ((unsigned short) __B);
+ if (__B < 16) {
+ if (__builtin_constant_p(__B))
+ __rshift = (__v8hu)vec_splat_s16(__B);
+ else
+ __rshift = vec_splats((unsigned short)__B);
- result = vec_sr ((__v8hi) __A, rshift);
- }
+ __result = vec_sr((__v8hi)__A, __rshift);
+ }
- return (__m128i) result;
+ return (__m128i)__result;
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_srli_epi32 (__m128i __A, int __B)
-{
- __v4su rshift;
- __v4si result = { 0, 0, 0, 0 };
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_srli_epi32(__m128i __A, int __B) {
+ __v4su __rshift;
+ __v4si __result = {0, 0, 0, 0};
- if (__B < 32)
- {
- if (__builtin_constant_p(__B))
- {
- if (__B < 16)
- rshift = (__v4su) vec_splat_s32(__B);
- else
- rshift = (__v4su) vec_splats((unsigned int)__B);
- }
+ if (__B < 32) {
+ if (__builtin_constant_p(__B)) {
+ if (__B < 16)
+ __rshift = (__v4su)vec_splat_s32(__B);
else
- rshift = vec_splats ((unsigned int) __B);
+ __rshift = (__v4su)vec_splats((unsigned int)__B);
+ } else
+ __rshift = vec_splats((unsigned int)__B);
- result = vec_sr ((__v4si) __A, rshift);
- }
+ __result = vec_sr((__v4si)__A, __rshift);
+ }
- return (__m128i) result;
+ return (__m128i)__result;
}
#ifdef _ARCH_PWR8
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_srli_epi64 (__m128i __A, int __B)
-{
- __v2du rshift;
- __v2di result = { 0, 0 };
-
- if (__B < 64)
- {
- if (__builtin_constant_p(__B))
- {
- if (__B < 16)
- rshift = (__v2du) vec_splat_s32(__B);
- else
- rshift = (__v2du) vec_splats((unsigned long long)__B);
- }
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_srli_epi64(__m128i __A, int __B) {
+ __v2du __rshift;
+ __v2di __result = {0, 0};
+
+ if (__B < 64) {
+ if (__builtin_constant_p(__B)) {
+ if (__B < 16)
+ __rshift = (__v2du)vec_splat_s32(__B);
else
- rshift = (__v2du) vec_splats ((unsigned int) __B);
+ __rshift = (__v2du)vec_splats((unsigned long long)__B);
+ } else
+ __rshift = (__v2du)vec_splats((unsigned int)__B);
- result = vec_sr ((__v2di) __A, rshift);
- }
+ __result = vec_sr((__v2di)__A, __rshift);
+ }
- return (__m128i) result;
+ return (__m128i)__result;
}
#endif
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sll_epi16 (__m128i __A, __m128i __B)
-{
- __v8hu lshift;
- __vector __bool short shmask;
- const __v8hu shmax = { 15, 15, 15, 15, 15, 15, 15, 15 };
- __v8hu result;
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sll_epi16(__m128i __A, __m128i __B) {
+ __v8hu __lshift;
+ __vector __bool short __shmask;
+ const __v8hu __shmax = {15, 15, 15, 15, 15, 15, 15, 15};
+ __v8hu __result;
#ifdef __LITTLE_ENDIAN__
- lshift = vec_splat ((__v8hu) __B, 0);
+ __lshift = vec_splat((__v8hu)__B, 0);
#else
- lshift = vec_splat ((__v8hu) __B, 3);
+ __lshift = vec_splat((__v8hu)__B, 3);
#endif
- shmask = vec_cmple (lshift, shmax);
- result = vec_sl ((__v8hu) __A, lshift);
- result = vec_sel ((__v8hu) shmask, result, shmask);
+ __shmask = vec_cmple(__lshift, __shmax);
+ __result = vec_sl((__v8hu)__A, __lshift);
+ __result = vec_sel((__v8hu)__shmask, __result, __shmask);
- return (__m128i) result;
+ return (__m128i)__result;
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sll_epi32 (__m128i __A, __m128i __B)
-{
- __v4su lshift;
- __vector __bool int shmask;
- const __v4su shmax = { 32, 32, 32, 32 };
- __v4su result;
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sll_epi32(__m128i __A, __m128i __B) {
+ __v4su __lshift;
+ __vector __bool int __shmask;
+ const __v4su __shmax = {32, 32, 32, 32};
+ __v4su __result;
#ifdef __LITTLE_ENDIAN__
- lshift = vec_splat ((__v4su) __B, 0);
+ __lshift = vec_splat((__v4su)__B, 0);
#else
- lshift = vec_splat ((__v4su) __B, 1);
+ __lshift = vec_splat((__v4su)__B, 1);
#endif
- shmask = vec_cmplt (lshift, shmax);
- result = vec_sl ((__v4su) __A, lshift);
- result = vec_sel ((__v4su) shmask, result, shmask);
+ __shmask = vec_cmplt(__lshift, __shmax);
+ __result = vec_sl((__v4su)__A, __lshift);
+ __result = vec_sel((__v4su)__shmask, __result, __shmask);
- return (__m128i) result;
+ return (__m128i)__result;
}
#ifdef _ARCH_PWR8
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sll_epi64 (__m128i __A, __m128i __B)
-{
- __v2du lshift;
- __vector __bool long long shmask;
- const __v2du shmax = { 64, 64 };
- __v2du result;
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sll_epi64(__m128i __A, __m128i __B) {
+ __v2du __lshift;
+ __vector __bool long long __shmask;
+ const __v2du __shmax = {64, 64};
+ __v2du __result;
- lshift = vec_splat ((__v2du) __B, 0);
- shmask = vec_cmplt (lshift, shmax);
- result = vec_sl ((__v2du) __A, lshift);
- result = (__v2du)vec_sel ((__v2df) shmask, (__v2df)result, shmask);
+ __lshift = vec_splat((__v2du)__B, 0);
+ __shmask = vec_cmplt(__lshift, __shmax);
+ __result = vec_sl((__v2du)__A, __lshift);
+ __result = vec_sel((__v2du)__shmask, __result, __shmask);
- return (__m128i) result;
+ return (__m128i)__result;
}
#endif
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sra_epi16 (__m128i __A, __m128i __B)
-{
- const __v8hu rshmax = { 15, 15, 15, 15, 15, 15, 15, 15 };
- __v8hu rshift;
- __v8hi result;
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sra_epi16(__m128i __A, __m128i __B) {
+ const __v8hu __rshmax = {15, 15, 15, 15, 15, 15, 15, 15};
+ __v8hu __rshift;
+ __v8hi __result;
#ifdef __LITTLE_ENDIAN__
- rshift = vec_splat ((__v8hu)__B, 0);
+ __rshift = vec_splat((__v8hu)__B, 0);
#else
- rshift = vec_splat ((__v8hu)__B, 3);
+ __rshift = vec_splat((__v8hu)__B, 3);
#endif
- rshift = vec_min (rshift, rshmax);
- result = vec_sra ((__v8hi) __A, rshift);
+ __rshift = vec_min(__rshift, __rshmax);
+ __result = vec_sra((__v8hi)__A, __rshift);
- return (__m128i) result;
+ return (__m128i)__result;
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sra_epi32 (__m128i __A, __m128i __B)
-{
- const __v4su rshmax = { 31, 31, 31, 31 };
- __v4su rshift;
- __v4si result;
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sra_epi32(__m128i __A, __m128i __B) {
+ const __v4su __rshmax = {31, 31, 31, 31};
+ __v4su __rshift;
+ __v4si __result;
#ifdef __LITTLE_ENDIAN__
- rshift = vec_splat ((__v4su)__B, 0);
+ __rshift = vec_splat((__v4su)__B, 0);
#else
- rshift = vec_splat ((__v4su)__B, 1);
+ __rshift = vec_splat((__v4su)__B, 1);
#endif
- rshift = vec_min (rshift, rshmax);
- result = vec_sra ((__v4si) __A, rshift);
+ __rshift = vec_min(__rshift, __rshmax);
+ __result = vec_sra((__v4si)__A, __rshift);
- return (__m128i) result;
+ return (__m128i)__result;
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_srl_epi16 (__m128i __A, __m128i __B)
-{
- __v8hu rshift;
- __vector __bool short shmask;
- const __v8hu shmax = { 15, 15, 15, 15, 15, 15, 15, 15 };
- __v8hu result;
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_srl_epi16(__m128i __A, __m128i __B) {
+ __v8hu __rshift;
+ __vector __bool short __shmask;
+ const __v8hu __shmax = {15, 15, 15, 15, 15, 15, 15, 15};
+ __v8hu __result;
#ifdef __LITTLE_ENDIAN__
- rshift = vec_splat ((__v8hu) __B, 0);
+ __rshift = vec_splat((__v8hu)__B, 0);
#else
- rshift = vec_splat ((__v8hu) __B, 3);
+ __rshift = vec_splat((__v8hu)__B, 3);
#endif
- shmask = vec_cmple (rshift, shmax);
- result = vec_sr ((__v8hu) __A, rshift);
- result = vec_sel ((__v8hu) shmask, result, shmask);
+ __shmask = vec_cmple(__rshift, __shmax);
+ __result = vec_sr((__v8hu)__A, __rshift);
+ __result = vec_sel((__v8hu)__shmask, __result, __shmask);
- return (__m128i) result;
+ return (__m128i)__result;
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_srl_epi32 (__m128i __A, __m128i __B)
-{
- __v4su rshift;
- __vector __bool int shmask;
- const __v4su shmax = { 32, 32, 32, 32 };
- __v4su result;
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_srl_epi32(__m128i __A, __m128i __B) {
+ __v4su __rshift;
+ __vector __bool int __shmask;
+ const __v4su __shmax = {32, 32, 32, 32};
+ __v4su __result;
#ifdef __LITTLE_ENDIAN__
- rshift = vec_splat ((__v4su) __B, 0);
+ __rshift = vec_splat((__v4su)__B, 0);
#else
- rshift = vec_splat ((__v4su) __B, 1);
+ __rshift = vec_splat((__v4su)__B, 1);
#endif
- shmask = vec_cmplt (rshift, shmax);
- result = vec_sr ((__v4su) __A, rshift);
- result = vec_sel ((__v4su) shmask, result, shmask);
+ __shmask = vec_cmplt(__rshift, __shmax);
+ __result = vec_sr((__v4su)__A, __rshift);
+ __result = vec_sel((__v4su)__shmask, __result, __shmask);
- return (__m128i) result;
+ return (__m128i)__result;
}
#ifdef _ARCH_PWR8
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_srl_epi64 (__m128i __A, __m128i __B)
-{
- __v2du rshift;
- __vector __bool long long shmask;
- const __v2du shmax = { 64, 64 };
- __v2du result;
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_srl_epi64(__m128i __A, __m128i __B) {
+ __v2du __rshift;
+ __vector __bool long long __shmask;
+ const __v2du __shmax = {64, 64};
+ __v2du __result;
- rshift = vec_splat ((__v2du) __B, 0);
- shmask = vec_cmplt (rshift, shmax);
- result = vec_sr ((__v2du) __A, rshift);
- result = (__v2du)vec_sel ((__v2df) shmask, (__v2df)result, shmask);
+ __rshift = vec_splat((__v2du)__B, 0);
+ __shmask = vec_cmplt(__rshift, __shmax);
+ __result = vec_sr((__v2du)__A, __rshift);
+ __result = vec_sel((__v2du)__shmask, __result, __shmask);
- return (__m128i) result;
+ return (__m128i)__result;
}
#endif
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_and_pd (__m128d __A, __m128d __B)
-{
- return (vec_and ((__v2df) __A, (__v2df) __B));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_and_pd(__m128d __A, __m128d __B) {
+ return (vec_and((__v2df)__A, (__v2df)__B));
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_andnot_pd (__m128d __A, __m128d __B)
-{
- return (vec_andc ((__v2df) __B, (__v2df) __A));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_andnot_pd(__m128d __A, __m128d __B) {
+ return (vec_andc((__v2df)__B, (__v2df)__A));
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_or_pd (__m128d __A, __m128d __B)
-{
- return (vec_or ((__v2df) __A, (__v2df) __B));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_or_pd(__m128d __A, __m128d __B) {
+ return (vec_or((__v2df)__A, (__v2df)__B));
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_xor_pd (__m128d __A, __m128d __B)
-{
- return (vec_xor ((__v2df) __A, (__v2df) __B));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_xor_pd(__m128d __A, __m128d __B) {
+ return (vec_xor((__v2df)__A, (__v2df)__B));
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_and_si128 (__m128i __A, __m128i __B)
-{
- return (__m128i)vec_and ((__v2di) __A, (__v2di) __B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_and_si128(__m128i __A, __m128i __B) {
+ return (__m128i)vec_and((__v2di)__A, (__v2di)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_andnot_si128 (__m128i __A, __m128i __B)
-{
- return (__m128i)vec_andc ((__v2di) __B, (__v2di) __A);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_andnot_si128(__m128i __A, __m128i __B) {
+ return (__m128i)vec_andc((__v2di)__B, (__v2di)__A);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_or_si128 (__m128i __A, __m128i __B)
-{
- return (__m128i)vec_or ((__v2di) __A, (__v2di) __B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_or_si128(__m128i __A, __m128i __B) {
+ return (__m128i)vec_or((__v2di)__A, (__v2di)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_xor_si128 (__m128i __A, __m128i __B)
-{
- return (__m128i)vec_xor ((__v2di) __A, (__v2di) __B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_xor_si128(__m128i __A, __m128i __B) {
+ return (__m128i)vec_xor((__v2di)__A, (__v2di)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpeq_epi8 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_cmpeq ((__v16qi) __A, (__v16qi)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpeq_epi8(__m128i __A, __m128i __B) {
+ return (__m128i)vec_cmpeq((__v16qi)__A, (__v16qi)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpeq_epi16 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_cmpeq ((__v8hi) __A, (__v8hi)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpeq_epi16(__m128i __A, __m128i __B) {
+ return (__m128i)vec_cmpeq((__v8hi)__A, (__v8hi)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpeq_epi32 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_cmpeq ((__v4si) __A, (__v4si)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpeq_epi32(__m128i __A, __m128i __B) {
+ return (__m128i)vec_cmpeq((__v4si)__A, (__v4si)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmplt_epi8 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_cmplt ((__v16qi) __A, (__v16qi)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmplt_epi8(__m128i __A, __m128i __B) {
+ return (__m128i)vec_cmplt((__v16qi)__A, (__v16qi)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmplt_epi16 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_cmplt ((__v8hi) __A, (__v8hi)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmplt_epi16(__m128i __A, __m128i __B) {
+ return (__m128i)vec_cmplt((__v8hi)__A, (__v8hi)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmplt_epi32 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_cmplt ((__v4si) __A, (__v4si)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmplt_epi32(__m128i __A, __m128i __B) {
+ return (__m128i)vec_cmplt((__v4si)__A, (__v4si)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpgt_epi8 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_cmpgt ((__v16qi) __A, (__v16qi)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpgt_epi8(__m128i __A, __m128i __B) {
+ return (__m128i)vec_cmpgt((__v16qi)__A, (__v16qi)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpgt_epi16 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_cmpgt ((__v8hi) __A, (__v8hi)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpgt_epi16(__m128i __A, __m128i __B) {
+ return (__m128i)vec_cmpgt((__v8hi)__A, (__v8hi)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpgt_epi32 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_cmpgt ((__v4si) __A, (__v4si)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpgt_epi32(__m128i __A, __m128i __B) {
+ return (__m128i)vec_cmpgt((__v4si)__A, (__v4si)__B);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_extract_epi16 (__m128i const __A, int const __N)
-{
- return (unsigned short) ((__v8hi)__A)[__N & 7];
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_extract_epi16(__m128i const __A, int const __N) {
+ return (unsigned short)((__v8hi)__A)[__N & 7];
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_insert_epi16 (__m128i const __A, int const __D, int const __N)
-{
- __v8hi result = (__v8hi)__A;
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_insert_epi16(__m128i const __A, int const __D, int const __N) {
+ __v8hi __result = (__v8hi)__A;
- result [(__N & 7)] = __D;
+ __result[(__N & 7)] = __D;
- return (__m128i) result;
+ return (__m128i)__result;
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_max_epi16 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_max ((__v8hi)__A, (__v8hi)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_max_epi16(__m128i __A, __m128i __B) {
+ return (__m128i)vec_max((__v8hi)__A, (__v8hi)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_max_epu8 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_max ((__v16qu) __A, (__v16qu)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_max_epu8(__m128i __A, __m128i __B) {
+ return (__m128i)vec_max((__v16qu)__A, (__v16qu)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_min_epi16 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_min ((__v8hi) __A, (__v8hi)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_min_epi16(__m128i __A, __m128i __B) {
+ return (__m128i)vec_min((__v8hi)__A, (__v8hi)__B);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_min_epu8 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_min ((__v16qu) __A, (__v16qu)__B);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_min_epu8(__m128i __A, __m128i __B) {
+ return (__m128i)vec_min((__v16qu)__A, (__v16qu)__B);
}
-
#ifdef _ARCH_PWR8
/* Intrinsic functions that require PowerISA 2.07 minimum. */
-/* Creates a 4-bit mask from the most significant bits of the SPFP values. */
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_movemask_epi8 (__m128i __A)
-{
- __vector unsigned long long result;
- static const __vector unsigned char perm_mask =
- {
- 0x78, 0x70, 0x68, 0x60, 0x58, 0x50, 0x48, 0x40,
- 0x38, 0x30, 0x28, 0x20, 0x18, 0x10, 0x08, 0x00
- };
+/* Return a mask created from the most significant bit of each 8-bit
+ element in A. */
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_movemask_epi8(__m128i __A) {
+#ifdef _ARCH_PWR10
+ return vec_extractm((__v16qu)__A);
+#else
+ __vector unsigned long long __result;
+ static const __vector unsigned char __perm_mask = {
+ 0x78, 0x70, 0x68, 0x60, 0x58, 0x50, 0x48, 0x40,
+ 0x38, 0x30, 0x28, 0x20, 0x18, 0x10, 0x08, 0x00};
- result = ((__vector unsigned long long)
- vec_vbpermq ((__vector unsigned char) __A,
- (__vector unsigned char) perm_mask));
+ __result = ((__vector unsigned long long)vec_vbpermq(
+ (__vector unsigned char)__A, (__vector unsigned char)__perm_mask));
#ifdef __LITTLE_ENDIAN__
- return result[1];
+ return __result[1];
#else
- return result[0];
+ return __result[0];
#endif
+#endif /* !_ARCH_PWR10 */
}
#endif /* _ARCH_PWR8 */
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mulhi_epu16 (__m128i __A, __m128i __B)
-{
- __v4su w0, w1;
- __v16qu xform1 = {
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_mulhi_epu16(__m128i __A, __m128i __B) {
+ __v4su __w0, __w1;
+ __v16qu __xform1 = {
#ifdef __LITTLE_ENDIAN__
- 0x02, 0x03, 0x12, 0x13, 0x06, 0x07, 0x16, 0x17,
- 0x0A, 0x0B, 0x1A, 0x1B, 0x0E, 0x0F, 0x1E, 0x1F
+ 0x02, 0x03, 0x12, 0x13, 0x06, 0x07, 0x16, 0x17, 0x0A,
+ 0x0B, 0x1A, 0x1B, 0x0E, 0x0F, 0x1E, 0x1F
#else
- 0x00, 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15,
- 0x08, 0x09, 0x18, 0x19, 0x0C, 0x0D, 0x1C, 0x1D
+ 0x00, 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15, 0x08,
+ 0x09, 0x18, 0x19, 0x0C, 0x0D, 0x1C, 0x1D
#endif
- };
+ };
- w0 = vec_vmuleuh ((__v8hu)__A, (__v8hu)__B);
- w1 = vec_vmulouh ((__v8hu)__A, (__v8hu)__B);
- return (__m128i) vec_perm (w0, w1, xform1);
+ __w0 = vec_vmuleuh((__v8hu)__A, (__v8hu)__B);
+ __w1 = vec_vmulouh((__v8hu)__A, (__v8hu)__B);
+ return (__m128i)vec_perm(__w0, __w1, __xform1);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_shufflehi_epi16 (__m128i __A, const int __mask)
-{
- unsigned long element_selector_98 = __mask & 0x03;
- unsigned long element_selector_BA = (__mask >> 2) & 0x03;
- unsigned long element_selector_DC = (__mask >> 4) & 0x03;
- unsigned long element_selector_FE = (__mask >> 6) & 0x03;
- static const unsigned short permute_selectors[4] =
- {
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_shufflehi_epi16(__m128i __A, const int __mask) {
+ unsigned long __element_selector_98 = __mask & 0x03;
+ unsigned long __element_selector_BA = (__mask >> 2) & 0x03;
+ unsigned long __element_selector_DC = (__mask >> 4) & 0x03;
+ unsigned long __element_selector_FE = (__mask >> 6) & 0x03;
+ static const unsigned short __permute_selectors[4] = {
#ifdef __LITTLE_ENDIAN__
- 0x0908, 0x0B0A, 0x0D0C, 0x0F0E
+ 0x0908, 0x0B0A, 0x0D0C, 0x0F0E
#else
- 0x0809, 0x0A0B, 0x0C0D, 0x0E0F
+ 0x0809, 0x0A0B, 0x0C0D, 0x0E0F
#endif
- };
- __v2du pmask =
+ };
+ __v2du __pmask =
#ifdef __LITTLE_ENDIAN__
- { 0x1716151413121110UL, 0UL};
+ {0x1716151413121110UL, 0UL};
#else
- { 0x1011121314151617UL, 0UL};
+ {0x1011121314151617UL, 0UL};
#endif
- __m64_union t;
- __v2du a, r;
-
- t.as_short[0] = permute_selectors[element_selector_98];
- t.as_short[1] = permute_selectors[element_selector_BA];
- t.as_short[2] = permute_selectors[element_selector_DC];
- t.as_short[3] = permute_selectors[element_selector_FE];
- pmask[1] = t.as_m64;
- a = (__v2du)__A;
- r = vec_perm (a, a, (__vector unsigned char)pmask);
- return (__m128i) r;
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_shufflelo_epi16 (__m128i __A, const int __mask)
-{
- unsigned long element_selector_10 = __mask & 0x03;
- unsigned long element_selector_32 = (__mask >> 2) & 0x03;
- unsigned long element_selector_54 = (__mask >> 4) & 0x03;
- unsigned long element_selector_76 = (__mask >> 6) & 0x03;
- static const unsigned short permute_selectors[4] =
- {
+ __m64_union __t;
+ __v2du __a, __r;
+
+ __t.as_short[0] = __permute_selectors[__element_selector_98];
+ __t.as_short[1] = __permute_selectors[__element_selector_BA];
+ __t.as_short[2] = __permute_selectors[__element_selector_DC];
+ __t.as_short[3] = __permute_selectors[__element_selector_FE];
+ __pmask[1] = __t.as_m64;
+ __a = (__v2du)__A;
+ __r = vec_perm(__a, __a, (__vector unsigned char)__pmask);
+ return (__m128i)__r;
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_shufflelo_epi16(__m128i __A, const int __mask) {
+ unsigned long __element_selector_10 = __mask & 0x03;
+ unsigned long __element_selector_32 = (__mask >> 2) & 0x03;
+ unsigned long __element_selector_54 = (__mask >> 4) & 0x03;
+ unsigned long __element_selector_76 = (__mask >> 6) & 0x03;
+ static const unsigned short __permute_selectors[4] = {
#ifdef __LITTLE_ENDIAN__
- 0x0100, 0x0302, 0x0504, 0x0706
+ 0x0100, 0x0302, 0x0504, 0x0706
#else
- 0x0001, 0x0203, 0x0405, 0x0607
+ 0x0001, 0x0203, 0x0405, 0x0607
#endif
- };
- __v2du pmask =
+ };
+ __v2du __pmask =
#ifdef __LITTLE_ENDIAN__
- { 0UL, 0x1f1e1d1c1b1a1918UL};
+ {0UL, 0x1f1e1d1c1b1a1918UL};
#else
- { 0UL, 0x18191a1b1c1d1e1fUL};
+ {0UL, 0x18191a1b1c1d1e1fUL};
#endif
- __m64_union t;
- __v2du a, r;
- t.as_short[0] = permute_selectors[element_selector_10];
- t.as_short[1] = permute_selectors[element_selector_32];
- t.as_short[2] = permute_selectors[element_selector_54];
- t.as_short[3] = permute_selectors[element_selector_76];
- pmask[0] = t.as_m64;
- a = (__v2du)__A;
- r = vec_perm (a, a, (__vector unsigned char)pmask);
- return (__m128i) r;
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_shuffle_epi32 (__m128i __A, const int __mask)
-{
- unsigned long element_selector_10 = __mask & 0x03;
- unsigned long element_selector_32 = (__mask >> 2) & 0x03;
- unsigned long element_selector_54 = (__mask >> 4) & 0x03;
- unsigned long element_selector_76 = (__mask >> 6) & 0x03;
- static const unsigned int permute_selectors[4] =
- {
+ __m64_union __t;
+ __v2du __a, __r;
+ __t.as_short[0] = __permute_selectors[__element_selector_10];
+ __t.as_short[1] = __permute_selectors[__element_selector_32];
+ __t.as_short[2] = __permute_selectors[__element_selector_54];
+ __t.as_short[3] = __permute_selectors[__element_selector_76];
+ __pmask[0] = __t.as_m64;
+ __a = (__v2du)__A;
+ __r = vec_perm(__a, __a, (__vector unsigned char)__pmask);
+ return (__m128i)__r;
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_shuffle_epi32(__m128i __A, const int __mask) {
+ unsigned long __element_selector_10 = __mask & 0x03;
+ unsigned long __element_selector_32 = (__mask >> 2) & 0x03;
+ unsigned long __element_selector_54 = (__mask >> 4) & 0x03;
+ unsigned long __element_selector_76 = (__mask >> 6) & 0x03;
+ static const unsigned int __permute_selectors[4] = {
#ifdef __LITTLE_ENDIAN__
- 0x03020100, 0x07060504, 0x0B0A0908, 0x0F0E0D0C
+ 0x03020100, 0x07060504, 0x0B0A0908, 0x0F0E0D0C
#else
0x00010203, 0x04050607, 0x08090A0B, 0x0C0D0E0F
#endif
- };
- __v4su t;
-
- t[0] = permute_selectors[element_selector_10];
- t[1] = permute_selectors[element_selector_32];
- t[2] = permute_selectors[element_selector_54] + 0x10101010;
- t[3] = permute_selectors[element_selector_76] + 0x10101010;
- return (__m128i)vec_perm ((__v4si) __A, (__v4si)__A, (__vector unsigned char)t);
-}
-
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskmoveu_si128 (__m128i __A, __m128i __B, char *__C)
-{
- __v2du hibit = { 0x7f7f7f7f7f7f7f7fUL, 0x7f7f7f7f7f7f7f7fUL};
- __v16qu mask, tmp;
- __m128i_u *p = (__m128i_u*)__C;
-
- tmp = (__v16qu)_mm_loadu_si128(p);
- mask = (__v16qu)vec_cmpgt ((__v16qu)__B, (__v16qu)hibit);
- tmp = vec_sel (tmp, (__v16qu)__A, mask);
- _mm_storeu_si128 (p, (__m128i)tmp);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_avg_epu8 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_avg ((__v16qu)__A, (__v16qu)__B);
-}
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_avg_epu16 (__m128i __A, __m128i __B)
-{
- return (__m128i) vec_avg ((__v8hu)__A, (__v8hu)__B);
-}
-
-
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sad_epu8 (__m128i __A, __m128i __B)
-{
- __v16qu a, b;
- __v16qu vmin, vmax, vabsdiff;
- __v4si vsum;
- const __v4su zero = { 0, 0, 0, 0 };
- __v4si result;
-
- a = (__v16qu) __A;
- b = (__v16qu) __B;
- vmin = vec_min (a, b);
- vmax = vec_max (a, b);
- vabsdiff = vec_sub (vmax, vmin);
+ };
+ __v4su __t;
+
+ __t[0] = __permute_selectors[__element_selector_10];
+ __t[1] = __permute_selectors[__element_selector_32];
+ __t[2] = __permute_selectors[__element_selector_54] + 0x10101010;
+ __t[3] = __permute_selectors[__element_selector_76] + 0x10101010;
+ return (__m128i)vec_perm((__v4si)__A, (__v4si)__A,
+ (__vector unsigned char)__t);
+}
+
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_maskmoveu_si128(__m128i __A, __m128i __B, char *__C) {
+ __v2du __hibit = {0x7f7f7f7f7f7f7f7fUL, 0x7f7f7f7f7f7f7f7fUL};
+ __v16qu __mask, __tmp;
+ __m128i_u *__p = (__m128i_u *)__C;
+
+ __tmp = (__v16qu)_mm_loadu_si128(__p);
+ __mask = (__v16qu)vec_cmpgt((__v16qu)__B, (__v16qu)__hibit);
+ __tmp = vec_sel(__tmp, (__v16qu)__A, __mask);
+ _mm_storeu_si128(__p, (__m128i)__tmp);
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_avg_epu8(__m128i __A, __m128i __B) {
+ return (__m128i)vec_avg((__v16qu)__A, (__v16qu)__B);
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_avg_epu16(__m128i __A, __m128i __B) {
+ return (__m128i)vec_avg((__v8hu)__A, (__v8hu)__B);
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sad_epu8(__m128i __A, __m128i __B) {
+ __v16qu __a, __b;
+ __v16qu __vabsdiff;
+ __v4si __vsum;
+ const __v4su __zero = {0, 0, 0, 0};
+ __v4si __result;
+
+ __a = (__v16qu)__A;
+ __b = (__v16qu)__B;
+#ifndef _ARCH_PWR9
+ __v16qu __vmin = vec_min(__a, __b);
+ __v16qu __vmax = vec_max(__a, __b);
+ __vabsdiff = vec_sub(__vmax, __vmin);
+#else
+ __vabsdiff = vec_absd(__a, __b);
+#endif
/* Sum four groups of bytes into integers. */
- vsum = (__vector signed int) vec_sum4s (vabsdiff, zero);
- /* Sum across four integers with two integer results. */
- result = vec_sum2s (vsum, (__vector signed int) zero);
- /* Rotate the sums into the correct position. */
+ __vsum = (__vector signed int)vec_sum4s(__vabsdiff, __zero);
#ifdef __LITTLE_ENDIAN__
- result = vec_sld (result, result, 4);
+ /* Sum across four integers with two integer results. */
+ __asm__("vsum2sws %0,%1,%2" : "=v"(__result) : "v"(__vsum), "v"(__zero));
+ /* Note: vec_sum2s could be used here, but on little-endian, vector
+ shifts are added that are not needed for this use-case.
+ A vector shift to correctly position the 32-bit integer results
+ (currently at [0] and [2]) to [1] and [3] would then need to be
+ swapped back again since the desired results are two 64-bit
+ integers ([1]|[0] and [3]|[2]). Thus, no shift is performed. */
#else
- result = vec_sld (result, result, 6);
-#endif
+ /* Sum across four integers with two integer results. */
+ __result = vec_sum2s(__vsum, (__vector signed int)__zero);
/* Rotate the sums into the correct position. */
- return (__m128i) result;
+ __result = vec_sld(__result, __result, 6);
+#endif
+ return (__m128i)__result;
}
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_stream_si32 (int *__A, int __B)
-{
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_stream_si32(int *__A, int __B) {
/* Use the data cache block touch for store transient. */
- __asm__ (
- "dcbtstt 0,%0"
- :
- : "b" (__A)
- : "memory"
- );
+ __asm__("dcbtstt 0,%0" : : "b"(__A) : "memory");
*__A = __B;
}
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_stream_si64 (long long int *__A, long long int __B)
-{
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_stream_si64(long long int *__A, long long int __B) {
/* Use the data cache block touch for store transient. */
- __asm__ (
- " dcbtstt 0,%0"
- :
- : "b" (__A)
- : "memory"
- );
+ __asm__(" dcbtstt 0,%0" : : "b"(__A) : "memory");
*__A = __B;
}
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_stream_si128 (__m128i *__A, __m128i __B)
-{
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_stream_si128(__m128i *__A, __m128i __B) {
/* Use the data cache block touch for store transient. */
- __asm__ (
- "dcbtstt 0,%0"
- :
- : "b" (__A)
- : "memory"
- );
+ __asm__("dcbtstt 0,%0" : : "b"(__A) : "memory");
*__A = __B;
}
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_stream_pd (double *__A, __m128d __B)
-{
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_stream_pd(double *__A, __m128d __B) {
/* Use the data cache block touch for store transient. */
- __asm__ (
- "dcbtstt 0,%0"
- :
- : "b" (__A)
- : "memory"
- );
- *(__m128d*)__A = __B;
-}
-
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_clflush (void const *__A)
-{
+ __asm__("dcbtstt 0,%0" : : "b"(__A) : "memory");
+ *(__m128d *)__A = __B;
+}
+
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_clflush(void const *__A) {
/* Use the data cache block flush. */
- __asm__ (
- "dcbf 0,%0"
- :
- : "b" (__A)
- : "memory"
- );
+ __asm__("dcbf 0,%0" : : "b"(__A) : "memory");
}
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_lfence (void)
-{
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_lfence(void) {
/* Use light weight sync for load to load ordering. */
- __atomic_thread_fence (__ATOMIC_RELEASE);
+ __atomic_thread_fence(__ATOMIC_RELEASE);
}
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mfence (void)
-{
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_mfence(void) {
/* Use heavy weight sync for any to any ordering. */
- __atomic_thread_fence (__ATOMIC_SEQ_CST);
+ __atomic_thread_fence(__ATOMIC_SEQ_CST);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi32_si128 (int __A)
-{
- return _mm_set_epi32 (0, 0, 0, __A);
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsi32_si128(int __A) {
+ return _mm_set_epi32(0, 0, 0, __A);
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi64_si128 (long long __A)
-{
- return __extension__ (__m128i)(__v2di){ __A, 0LL };
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsi64_si128(long long __A) {
+ return __extension__(__m128i)(__v2di){__A, 0LL};
}
/* Microsoft intrinsic. */
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi64x_si128 (long long __A)
-{
- return __extension__ (__m128i)(__v2di){ __A, 0LL };
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsi64x_si128(long long __A) {
+ return __extension__(__m128i)(__v2di){__A, 0LL};
}
/* Casts between various SP, DP, INT vector types. Note that these do no
conversion of values, they just change the type. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_castpd_ps(__m128d __A)
-{
- return (__m128) __A;
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_castpd_ps(__m128d __A) {
+ return (__m128)__A;
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_castpd_si128(__m128d __A)
-{
- return (__m128i) __A;
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_castpd_si128(__m128d __A) {
+ return (__m128i)__A;
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_castps_pd(__m128 __A)
-{
- return (__m128d) __A;
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_castps_pd(__m128 __A) {
+ return (__m128d)__A;
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_castps_si128(__m128 __A)
-{
- return (__m128i) __A;
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_castps_si128(__m128 __A) {
+ return (__m128i)__A;
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_castsi128_ps(__m128i __A)
-{
- return (__m128) __A;
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_castsi128_ps(__m128i __A) {
+ return (__m128)__A;
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_castsi128_pd(__m128i __A)
-{
- return (__m128d) __A;
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_castsi128_pd(__m128i __A) {
+ return (__m128d)__A;
}
#else
diff --git a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/immintrin.h b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/immintrin.h
new file mode 100644
index 000000000000..c1ada9889d4a
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/immintrin.h
@@ -0,0 +1,27 @@
+/*===---- immintrin.h - Implementation of Intel intrinsics on PowerPC ------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef IMMINTRIN_H_
+#define IMMINTRIN_H_
+
+#include <x86gprintrin.h>
+
+#include <mmintrin.h>
+
+#include <xmmintrin.h>
+
+#include <emmintrin.h>
+
+#include <pmmintrin.h>
+
+#include <tmmintrin.h>
+
+#include <smmintrin.h>
+
+#endif /* IMMINTRIN_H_ */
diff --git a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/mm_malloc.h b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/mm_malloc.h
index b6bf22f92887..29c1de4a83e1 100644
--- a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/mm_malloc.h
+++ b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/mm_malloc.h
@@ -17,31 +17,25 @@
/* We can't depend on <stdlib.h> since the prototype of posix_memalign
may not be visible. */
#ifndef __cplusplus
-extern int posix_memalign (void **, size_t, size_t);
+extern int posix_memalign(void **, size_t, size_t);
#else
-extern "C" int posix_memalign (void **, size_t, size_t);
+extern "C" int posix_memalign(void **, size_t, size_t);
#endif
-static __inline void *
-_mm_malloc (size_t size, size_t alignment)
-{
+static __inline void *_mm_malloc(size_t __size, size_t __alignment) {
/* PowerPC64 ELF V2 ABI requires quadword alignment. */
- size_t vec_align = sizeof (__vector float);
- void *ptr;
+ size_t __vec_align = sizeof(__vector float);
+ void *__ptr;
- if (alignment < vec_align)
- alignment = vec_align;
- if (posix_memalign (&ptr, alignment, size) == 0)
- return ptr;
+ if (__alignment < __vec_align)
+ __alignment = __vec_align;
+ if (posix_memalign(&__ptr, __alignment, __size) == 0)
+ return __ptr;
else
return NULL;
}
-static __inline void
-_mm_free (void * ptr)
-{
- free (ptr);
-}
+static __inline void _mm_free(void *__ptr) { free(__ptr); }
#else
#include_next <mm_malloc.h>
diff --git a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/mmintrin.h b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/mmintrin.h
index 54e4ee9f4468..6f9c137b6a09 100644
--- a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/mmintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/mmintrin.h
@@ -149,17 +149,17 @@ extern __inline long long
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_packs_pi16(__m64 __m1, __m64 __m2) {
- __vector signed short vm1;
- __vector signed char vresult;
+ __vector signed short __vm1;
+ __vector signed char __vresult;
- vm1 = (__vector signed short)(__vector unsigned long long)
+ __vm1 = (__vector signed short)(__vector unsigned long long)
#ifdef __LITTLE_ENDIAN__
{__m1, __m2};
#else
{__m2, __m1};
#endif
- vresult = vec_packs(vm1, vm1);
- return (__m64)((__vector long long)vresult)[0];
+ __vresult = vec_packs(__vm1, __vm1);
+ return (__m64)((__vector long long)__vresult)[0];
}
extern __inline __m64
@@ -174,17 +174,17 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_packs_pi32(__m64 __m1, __m64 __m2) {
- __vector signed int vm1;
- __vector signed short vresult;
+ __vector signed int __vm1;
+ __vector signed short __vresult;
- vm1 = (__vector signed int)(__vector unsigned long long)
+ __vm1 = (__vector signed int)(__vector unsigned long long)
#ifdef __LITTLE_ENDIAN__
{__m1, __m2};
#else
{__m2, __m1};
#endif
- vresult = vec_packs(vm1, vm1);
- return (__m64)((__vector long long)vresult)[0];
+ __vresult = vec_packs(__vm1, __vm1);
+ return (__m64)((__vector long long)__vresult)[0];
}
extern __inline __m64
@@ -199,19 +199,20 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_packs_pu16(__m64 __m1, __m64 __m2) {
- __vector unsigned char r;
- __vector signed short vm1 = (__vector signed short)(__vector long long)
+ __vector unsigned char __r;
+ __vector signed short __vm1 = (__vector signed short)(__vector long long)
#ifdef __LITTLE_ENDIAN__
{__m1, __m2};
#else
{__m2, __m1};
#endif
const __vector signed short __zero = {0};
- __vector __bool short __select = vec_cmplt(vm1, __zero);
- r = vec_packs((__vector unsigned short)vm1, (__vector unsigned short)vm1);
- __vector __bool char packsel = vec_pack(__select, __select);
- r = vec_sel(r, (const __vector unsigned char)__zero, packsel);
- return (__m64)((__vector long long)r)[0];
+ __vector __bool short __select = vec_cmplt(__vm1, __zero);
+ __r =
+ vec_packs((__vector unsigned short)__vm1, (__vector unsigned short)__vm1);
+ __vector __bool char __packsel = vec_pack(__select, __select);
+ __r = vec_sel(__r, (const __vector unsigned char)__zero, __packsel);
+ return (__m64)((__vector long long)__r)[0];
}
extern __inline __m64
@@ -227,28 +228,28 @@ extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_unpackhi_pi8(__m64 __m1, __m64 __m2) {
#if _ARCH_PWR8
- __vector unsigned char a, b, c;
+ __vector unsigned char __a, __b, __c;
- a = (__vector unsigned char)vec_splats(__m1);
- b = (__vector unsigned char)vec_splats(__m2);
- c = vec_mergel(a, b);
- return (__m64)((__vector long long)c)[1];
+ __a = (__vector unsigned char)vec_splats(__m1);
+ __b = (__vector unsigned char)vec_splats(__m2);
+ __c = vec_mergel(__a, __b);
+ return (__m64)((__vector long long)__c)[1];
#else
- __m64_union m1, m2, res;
+ __m64_union __mu1, __mu2, __res;
- m1.as_m64 = __m1;
- m2.as_m64 = __m2;
+ __mu1.as_m64 = __m1;
+ __mu2.as_m64 = __m2;
- res.as_char[0] = m1.as_char[4];
- res.as_char[1] = m2.as_char[4];
- res.as_char[2] = m1.as_char[5];
- res.as_char[3] = m2.as_char[5];
- res.as_char[4] = m1.as_char[6];
- res.as_char[5] = m2.as_char[6];
- res.as_char[6] = m1.as_char[7];
- res.as_char[7] = m2.as_char[7];
+ __res.as_char[0] = __mu1.as_char[4];
+ __res.as_char[1] = __mu2.as_char[4];
+ __res.as_char[2] = __mu1.as_char[5];
+ __res.as_char[3] = __mu2.as_char[5];
+ __res.as_char[4] = __mu1.as_char[6];
+ __res.as_char[5] = __mu2.as_char[6];
+ __res.as_char[6] = __mu1.as_char[7];
+ __res.as_char[7] = __mu2.as_char[7];
- return (__m64)res.as_m64;
+ return (__m64)__res.as_m64;
#endif
}
@@ -263,17 +264,17 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_unpackhi_pi16(__m64 __m1, __m64 __m2) {
- __m64_union m1, m2, res;
+ __m64_union __mu1, __mu2, __res;
- m1.as_m64 = __m1;
- m2.as_m64 = __m2;
+ __mu1.as_m64 = __m1;
+ __mu2.as_m64 = __m2;
- res.as_short[0] = m1.as_short[2];
- res.as_short[1] = m2.as_short[2];
- res.as_short[2] = m1.as_short[3];
- res.as_short[3] = m2.as_short[3];
+ __res.as_short[0] = __mu1.as_short[2];
+ __res.as_short[1] = __mu2.as_short[2];
+ __res.as_short[2] = __mu1.as_short[3];
+ __res.as_short[3] = __mu2.as_short[3];
- return (__m64)res.as_m64;
+ return (__m64)__res.as_m64;
}
extern __inline __m64
@@ -286,15 +287,15 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_unpackhi_pi32(__m64 __m1, __m64 __m2) {
- __m64_union m1, m2, res;
+ __m64_union __mu1, __mu2, __res;
- m1.as_m64 = __m1;
- m2.as_m64 = __m2;
+ __mu1.as_m64 = __m1;
+ __mu2.as_m64 = __m2;
- res.as_int[0] = m1.as_int[1];
- res.as_int[1] = m2.as_int[1];
+ __res.as_int[0] = __mu1.as_int[1];
+ __res.as_int[1] = __mu2.as_int[1];
- return (__m64)res.as_m64;
+ return (__m64)__res.as_m64;
}
extern __inline __m64
@@ -308,28 +309,28 @@ extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_unpacklo_pi8(__m64 __m1, __m64 __m2) {
#if _ARCH_PWR8
- __vector unsigned char a, b, c;
+ __vector unsigned char __a, __b, __c;
- a = (__vector unsigned char)vec_splats(__m1);
- b = (__vector unsigned char)vec_splats(__m2);
- c = vec_mergel(a, b);
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector unsigned char)vec_splats(__m1);
+ __b = (__vector unsigned char)vec_splats(__m2);
+ __c = vec_mergel(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
#else
- __m64_union m1, m2, res;
+ __m64_union __mu1, __mu2, __res;
- m1.as_m64 = __m1;
- m2.as_m64 = __m2;
+ __mu1.as_m64 = __m1;
+ __mu2.as_m64 = __m2;
- res.as_char[0] = m1.as_char[0];
- res.as_char[1] = m2.as_char[0];
- res.as_char[2] = m1.as_char[1];
- res.as_char[3] = m2.as_char[1];
- res.as_char[4] = m1.as_char[2];
- res.as_char[5] = m2.as_char[2];
- res.as_char[6] = m1.as_char[3];
- res.as_char[7] = m2.as_char[3];
+ __res.as_char[0] = __mu1.as_char[0];
+ __res.as_char[1] = __mu2.as_char[0];
+ __res.as_char[2] = __mu1.as_char[1];
+ __res.as_char[3] = __mu2.as_char[1];
+ __res.as_char[4] = __mu1.as_char[2];
+ __res.as_char[5] = __mu2.as_char[2];
+ __res.as_char[6] = __mu1.as_char[3];
+ __res.as_char[7] = __mu2.as_char[3];
- return (__m64)res.as_m64;
+ return (__m64)__res.as_m64;
#endif
}
@@ -343,17 +344,17 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_unpacklo_pi16(__m64 __m1, __m64 __m2) {
- __m64_union m1, m2, res;
+ __m64_union __mu1, __mu2, __res;
- m1.as_m64 = __m1;
- m2.as_m64 = __m2;
+ __mu1.as_m64 = __m1;
+ __mu2.as_m64 = __m2;
- res.as_short[0] = m1.as_short[0];
- res.as_short[1] = m2.as_short[0];
- res.as_short[2] = m1.as_short[1];
- res.as_short[3] = m2.as_short[1];
+ __res.as_short[0] = __mu1.as_short[0];
+ __res.as_short[1] = __mu2.as_short[0];
+ __res.as_short[2] = __mu1.as_short[1];
+ __res.as_short[3] = __mu2.as_short[1];
- return (__m64)res.as_m64;
+ return (__m64)__res.as_m64;
}
extern __inline __m64
@@ -367,15 +368,15 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_unpacklo_pi32(__m64 __m1, __m64 __m2) {
- __m64_union m1, m2, res;
+ __m64_union __mu1, __mu2, __res;
- m1.as_m64 = __m1;
- m2.as_m64 = __m2;
+ __mu1.as_m64 = __m1;
+ __mu2.as_m64 = __m2;
- res.as_int[0] = m1.as_int[0];
- res.as_int[1] = m2.as_int[0];
+ __res.as_int[0] = __mu1.as_int[0];
+ __res.as_int[1] = __mu2.as_int[0];
- return (__m64)res.as_m64;
+ return (__m64)__res.as_m64;
}
extern __inline __m64
@@ -389,28 +390,28 @@ extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_add_pi8(__m64 __m1, __m64 __m2) {
#if _ARCH_PWR8
- __vector signed char a, b, c;
+ __vector signed char __a, __b, __c;
- a = (__vector signed char)vec_splats(__m1);
- b = (__vector signed char)vec_splats(__m2);
- c = vec_add(a, b);
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector signed char)vec_splats(__m1);
+ __b = (__vector signed char)vec_splats(__m2);
+ __c = vec_add(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
#else
- __m64_union m1, m2, res;
+ __m64_union __mu1, __mu2, __res;
- m1.as_m64 = __m1;
- m2.as_m64 = __m2;
+ __mu1.as_m64 = __m1;
+ __mu2.as_m64 = __m2;
- res.as_char[0] = m1.as_char[0] + m2.as_char[0];
- res.as_char[1] = m1.as_char[1] + m2.as_char[1];
- res.as_char[2] = m1.as_char[2] + m2.as_char[2];
- res.as_char[3] = m1.as_char[3] + m2.as_char[3];
- res.as_char[4] = m1.as_char[4] + m2.as_char[4];
- res.as_char[5] = m1.as_char[5] + m2.as_char[5];
- res.as_char[6] = m1.as_char[6] + m2.as_char[6];
- res.as_char[7] = m1.as_char[7] + m2.as_char[7];
+ __res.as_char[0] = __mu1.as_char[0] + __mu2.as_char[0];
+ __res.as_char[1] = __mu1.as_char[1] + __mu2.as_char[1];
+ __res.as_char[2] = __mu1.as_char[2] + __mu2.as_char[2];
+ __res.as_char[3] = __mu1.as_char[3] + __mu2.as_char[3];
+ __res.as_char[4] = __mu1.as_char[4] + __mu2.as_char[4];
+ __res.as_char[5] = __mu1.as_char[5] + __mu2.as_char[5];
+ __res.as_char[6] = __mu1.as_char[6] + __mu2.as_char[6];
+ __res.as_char[7] = __mu1.as_char[7] + __mu2.as_char[7];
- return (__m64)res.as_m64;
+ return (__m64)__res.as_m64;
#endif
}
@@ -425,24 +426,24 @@ extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_add_pi16(__m64 __m1, __m64 __m2) {
#if _ARCH_PWR8
- __vector signed short a, b, c;
+ __vector signed short __a, __b, __c;
- a = (__vector signed short)vec_splats(__m1);
- b = (__vector signed short)vec_splats(__m2);
- c = vec_add(a, b);
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector signed short)vec_splats(__m1);
+ __b = (__vector signed short)vec_splats(__m2);
+ __c = vec_add(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
#else
- __m64_union m1, m2, res;
+ __m64_union __mu1, __mu2, __res;
- m1.as_m64 = __m1;
- m2.as_m64 = __m2;
+ __mu1.as_m64 = __m1;
+ __mu2.as_m64 = __m2;
- res.as_short[0] = m1.as_short[0] + m2.as_short[0];
- res.as_short[1] = m1.as_short[1] + m2.as_short[1];
- res.as_short[2] = m1.as_short[2] + m2.as_short[2];
- res.as_short[3] = m1.as_short[3] + m2.as_short[3];
+ __res.as_short[0] = __mu1.as_short[0] + __mu2.as_short[0];
+ __res.as_short[1] = __mu1.as_short[1] + __mu2.as_short[1];
+ __res.as_short[2] = __mu1.as_short[2] + __mu2.as_short[2];
+ __res.as_short[3] = __mu1.as_short[3] + __mu2.as_short[3];
- return (__m64)res.as_m64;
+ return (__m64)__res.as_m64;
#endif
}
@@ -457,22 +458,22 @@ extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_add_pi32(__m64 __m1, __m64 __m2) {
#if _ARCH_PWR9
- __vector signed int a, b, c;
+ __vector signed int __a, __b, __c;
- a = (__vector signed int)vec_splats(__m1);
- b = (__vector signed int)vec_splats(__m2);
- c = vec_add(a, b);
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector signed int)vec_splats(__m1);
+ __b = (__vector signed int)vec_splats(__m2);
+ __c = vec_add(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
#else
- __m64_union m1, m2, res;
+ __m64_union __mu1, __mu2, __res;
- m1.as_m64 = __m1;
- m2.as_m64 = __m2;
+ __mu1.as_m64 = __m1;
+ __mu2.as_m64 = __m2;
- res.as_int[0] = m1.as_int[0] + m2.as_int[0];
- res.as_int[1] = m1.as_int[1] + m2.as_int[1];
+ __res.as_int[0] = __mu1.as_int[0] + __mu2.as_int[0];
+ __res.as_int[1] = __mu1.as_int[1] + __mu2.as_int[1];
- return (__m64)res.as_m64;
+ return (__m64)__res.as_m64;
#endif
}
@@ -487,28 +488,28 @@ extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_sub_pi8(__m64 __m1, __m64 __m2) {
#if _ARCH_PWR8
- __vector signed char a, b, c;
+ __vector signed char __a, __b, __c;
- a = (__vector signed char)vec_splats(__m1);
- b = (__vector signed char)vec_splats(__m2);
- c = vec_sub(a, b);
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector signed char)vec_splats(__m1);
+ __b = (__vector signed char)vec_splats(__m2);
+ __c = vec_sub(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
#else
- __m64_union m1, m2, res;
+ __m64_union __mu1, __mu2, __res;
- m1.as_m64 = __m1;
- m2.as_m64 = __m2;
+ __mu1.as_m64 = __m1;
+ __mu2.as_m64 = __m2;
- res.as_char[0] = m1.as_char[0] - m2.as_char[0];
- res.as_char[1] = m1.as_char[1] - m2.as_char[1];
- res.as_char[2] = m1.as_char[2] - m2.as_char[2];
- res.as_char[3] = m1.as_char[3] - m2.as_char[3];
- res.as_char[4] = m1.as_char[4] - m2.as_char[4];
- res.as_char[5] = m1.as_char[5] - m2.as_char[5];
- res.as_char[6] = m1.as_char[6] - m2.as_char[6];
- res.as_char[7] = m1.as_char[7] - m2.as_char[7];
+ __res.as_char[0] = __mu1.as_char[0] - __mu2.as_char[0];
+ __res.as_char[1] = __mu1.as_char[1] - __mu2.as_char[1];
+ __res.as_char[2] = __mu1.as_char[2] - __mu2.as_char[2];
+ __res.as_char[3] = __mu1.as_char[3] - __mu2.as_char[3];
+ __res.as_char[4] = __mu1.as_char[4] - __mu2.as_char[4];
+ __res.as_char[5] = __mu1.as_char[5] - __mu2.as_char[5];
+ __res.as_char[6] = __mu1.as_char[6] - __mu2.as_char[6];
+ __res.as_char[7] = __mu1.as_char[7] - __mu2.as_char[7];
- return (__m64)res.as_m64;
+ return (__m64)__res.as_m64;
#endif
}
@@ -523,24 +524,24 @@ extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_sub_pi16(__m64 __m1, __m64 __m2) {
#if _ARCH_PWR8
- __vector signed short a, b, c;
+ __vector signed short __a, __b, __c;
- a = (__vector signed short)vec_splats(__m1);
- b = (__vector signed short)vec_splats(__m2);
- c = vec_sub(a, b);
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector signed short)vec_splats(__m1);
+ __b = (__vector signed short)vec_splats(__m2);
+ __c = vec_sub(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
#else
- __m64_union m1, m2, res;
+ __m64_union __mu1, __mu2, __res;
- m1.as_m64 = __m1;
- m2.as_m64 = __m2;
+ __mu1.as_m64 = __m1;
+ __mu2.as_m64 = __m2;
- res.as_short[0] = m1.as_short[0] - m2.as_short[0];
- res.as_short[1] = m1.as_short[1] - m2.as_short[1];
- res.as_short[2] = m1.as_short[2] - m2.as_short[2];
- res.as_short[3] = m1.as_short[3] - m2.as_short[3];
+ __res.as_short[0] = __mu1.as_short[0] - __mu2.as_short[0];
+ __res.as_short[1] = __mu1.as_short[1] - __mu2.as_short[1];
+ __res.as_short[2] = __mu1.as_short[2] - __mu2.as_short[2];
+ __res.as_short[3] = __mu1.as_short[3] - __mu2.as_short[3];
- return (__m64)res.as_m64;
+ return (__m64)__res.as_m64;
#endif
}
@@ -555,22 +556,22 @@ extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_sub_pi32(__m64 __m1, __m64 __m2) {
#if _ARCH_PWR9
- __vector signed int a, b, c;
+ __vector signed int __a, __b, __c;
- a = (__vector signed int)vec_splats(__m1);
- b = (__vector signed int)vec_splats(__m2);
- c = vec_sub(a, b);
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector signed int)vec_splats(__m1);
+ __b = (__vector signed int)vec_splats(__m2);
+ __c = vec_sub(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
#else
- __m64_union m1, m2, res;
+ __m64_union __mu1, __mu2, __res;
- m1.as_m64 = __m1;
- m2.as_m64 = __m2;
+ __mu1.as_m64 = __m1;
+ __mu2.as_m64 = __m2;
- res.as_int[0] = m1.as_int[0] - m2.as_int[0];
- res.as_int[1] = m1.as_int[1] - m2.as_int[1];
+ __res.as_int[0] = __mu1.as_int[0] - __mu2.as_int[0];
+ __res.as_int[1] = __mu1.as_int[1] - __mu2.as_int[1];
- return (__m64)res.as_m64;
+ return (__m64)__res.as_m64;
#endif
}
@@ -708,25 +709,25 @@ extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_cmpeq_pi8(__m64 __m1, __m64 __m2) {
#if defined(_ARCH_PWR6) && defined(__powerpc64__)
- __m64 res;
- __asm__("cmpb %0,%1,%2;\n" : "=r"(res) : "r"(__m1), "r"(__m2) :);
- return (res);
+ __m64 __res;
+ __asm__("cmpb %0,%1,%2;\n" : "=r"(__res) : "r"(__m1), "r"(__m2) :);
+ return (__res);
#else
- __m64_union m1, m2, res;
+ __m64_union __mu1, __mu2, __res;
- m1.as_m64 = __m1;
- m2.as_m64 = __m2;
+ __mu1.as_m64 = __m1;
+ __mu2.as_m64 = __m2;
- res.as_char[0] = (m1.as_char[0] == m2.as_char[0]) ? -1 : 0;
- res.as_char[1] = (m1.as_char[1] == m2.as_char[1]) ? -1 : 0;
- res.as_char[2] = (m1.as_char[2] == m2.as_char[2]) ? -1 : 0;
- res.as_char[3] = (m1.as_char[3] == m2.as_char[3]) ? -1 : 0;
- res.as_char[4] = (m1.as_char[4] == m2.as_char[4]) ? -1 : 0;
- res.as_char[5] = (m1.as_char[5] == m2.as_char[5]) ? -1 : 0;
- res.as_char[6] = (m1.as_char[6] == m2.as_char[6]) ? -1 : 0;
- res.as_char[7] = (m1.as_char[7] == m2.as_char[7]) ? -1 : 0;
+ __res.as_char[0] = (__mu1.as_char[0] == __mu2.as_char[0]) ? -1 : 0;
+ __res.as_char[1] = (__mu1.as_char[1] == __mu2.as_char[1]) ? -1 : 0;
+ __res.as_char[2] = (__mu1.as_char[2] == __mu2.as_char[2]) ? -1 : 0;
+ __res.as_char[3] = (__mu1.as_char[3] == __mu2.as_char[3]) ? -1 : 0;
+ __res.as_char[4] = (__mu1.as_char[4] == __mu2.as_char[4]) ? -1 : 0;
+ __res.as_char[5] = (__mu1.as_char[5] == __mu2.as_char[5]) ? -1 : 0;
+ __res.as_char[6] = (__mu1.as_char[6] == __mu2.as_char[6]) ? -1 : 0;
+ __res.as_char[7] = (__mu1.as_char[7] == __mu2.as_char[7]) ? -1 : 0;
- return (__m64)res.as_m64;
+ return (__m64)__res.as_m64;
#endif
}
@@ -740,28 +741,28 @@ extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_cmpgt_pi8(__m64 __m1, __m64 __m2) {
#if _ARCH_PWR8
- __vector signed char a, b, c;
+ __vector signed char __a, __b, __c;
- a = (__vector signed char)vec_splats(__m1);
- b = (__vector signed char)vec_splats(__m2);
- c = (__vector signed char)vec_cmpgt(a, b);
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector signed char)vec_splats(__m1);
+ __b = (__vector signed char)vec_splats(__m2);
+ __c = (__vector signed char)vec_cmpgt(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
#else
- __m64_union m1, m2, res;
+ __m64_union __mu1, __mu2, __res;
- m1.as_m64 = __m1;
- m2.as_m64 = __m2;
+ __mu1.as_m64 = __m1;
+ __mu2.as_m64 = __m2;
- res.as_char[0] = (m1.as_char[0] > m2.as_char[0]) ? -1 : 0;
- res.as_char[1] = (m1.as_char[1] > m2.as_char[1]) ? -1 : 0;
- res.as_char[2] = (m1.as_char[2] > m2.as_char[2]) ? -1 : 0;
- res.as_char[3] = (m1.as_char[3] > m2.as_char[3]) ? -1 : 0;
- res.as_char[4] = (m1.as_char[4] > m2.as_char[4]) ? -1 : 0;
- res.as_char[5] = (m1.as_char[5] > m2.as_char[5]) ? -1 : 0;
- res.as_char[6] = (m1.as_char[6] > m2.as_char[6]) ? -1 : 0;
- res.as_char[7] = (m1.as_char[7] > m2.as_char[7]) ? -1 : 0;
+ __res.as_char[0] = (__mu1.as_char[0] > __mu2.as_char[0]) ? -1 : 0;
+ __res.as_char[1] = (__mu1.as_char[1] > __mu2.as_char[1]) ? -1 : 0;
+ __res.as_char[2] = (__mu1.as_char[2] > __mu2.as_char[2]) ? -1 : 0;
+ __res.as_char[3] = (__mu1.as_char[3] > __mu2.as_char[3]) ? -1 : 0;
+ __res.as_char[4] = (__mu1.as_char[4] > __mu2.as_char[4]) ? -1 : 0;
+ __res.as_char[5] = (__mu1.as_char[5] > __mu2.as_char[5]) ? -1 : 0;
+ __res.as_char[6] = (__mu1.as_char[6] > __mu2.as_char[6]) ? -1 : 0;
+ __res.as_char[7] = (__mu1.as_char[7] > __mu2.as_char[7]) ? -1 : 0;
- return (__m64)res.as_m64;
+ return (__m64)__res.as_m64;
#endif
}
@@ -777,24 +778,24 @@ extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_cmpeq_pi16(__m64 __m1, __m64 __m2) {
#if _ARCH_PWR8
- __vector signed short a, b, c;
+ __vector signed short __a, __b, __c;
- a = (__vector signed short)vec_splats(__m1);
- b = (__vector signed short)vec_splats(__m2);
- c = (__vector signed short)vec_cmpeq(a, b);
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector signed short)vec_splats(__m1);
+ __b = (__vector signed short)vec_splats(__m2);
+ __c = (__vector signed short)vec_cmpeq(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
#else
- __m64_union m1, m2, res;
+ __m64_union __mu1, __mu2, __res;
- m1.as_m64 = __m1;
- m2.as_m64 = __m2;
+ __mu1.as_m64 = __m1;
+ __mu2.as_m64 = __m2;
- res.as_short[0] = (m1.as_short[0] == m2.as_short[0]) ? -1 : 0;
- res.as_short[1] = (m1.as_short[1] == m2.as_short[1]) ? -1 : 0;
- res.as_short[2] = (m1.as_short[2] == m2.as_short[2]) ? -1 : 0;
- res.as_short[3] = (m1.as_short[3] == m2.as_short[3]) ? -1 : 0;
+ __res.as_short[0] = (__mu1.as_short[0] == __mu2.as_short[0]) ? -1 : 0;
+ __res.as_short[1] = (__mu1.as_short[1] == __mu2.as_short[1]) ? -1 : 0;
+ __res.as_short[2] = (__mu1.as_short[2] == __mu2.as_short[2]) ? -1 : 0;
+ __res.as_short[3] = (__mu1.as_short[3] == __mu2.as_short[3]) ? -1 : 0;
- return (__m64)res.as_m64;
+ return (__m64)__res.as_m64;
#endif
}
@@ -808,24 +809,24 @@ extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_cmpgt_pi16(__m64 __m1, __m64 __m2) {
#if _ARCH_PWR8
- __vector signed short a, b, c;
+ __vector signed short __a, __b, __c;
- a = (__vector signed short)vec_splats(__m1);
- b = (__vector signed short)vec_splats(__m2);
- c = (__vector signed short)vec_cmpgt(a, b);
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector signed short)vec_splats(__m1);
+ __b = (__vector signed short)vec_splats(__m2);
+ __c = (__vector signed short)vec_cmpgt(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
#else
- __m64_union m1, m2, res;
+ __m64_union __mu1, __mu2, __res;
- m1.as_m64 = __m1;
- m2.as_m64 = __m2;
+ __mu1.as_m64 = __m1;
+ __mu2.as_m64 = __m2;
- res.as_short[0] = (m1.as_short[0] > m2.as_short[0]) ? -1 : 0;
- res.as_short[1] = (m1.as_short[1] > m2.as_short[1]) ? -1 : 0;
- res.as_short[2] = (m1.as_short[2] > m2.as_short[2]) ? -1 : 0;
- res.as_short[3] = (m1.as_short[3] > m2.as_short[3]) ? -1 : 0;
+ __res.as_short[0] = (__mu1.as_short[0] > __mu2.as_short[0]) ? -1 : 0;
+ __res.as_short[1] = (__mu1.as_short[1] > __mu2.as_short[1]) ? -1 : 0;
+ __res.as_short[2] = (__mu1.as_short[2] > __mu2.as_short[2]) ? -1 : 0;
+ __res.as_short[3] = (__mu1.as_short[3] > __mu2.as_short[3]) ? -1 : 0;
- return (__m64)res.as_m64;
+ return (__m64)__res.as_m64;
#endif
}
@@ -841,22 +842,22 @@ extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_cmpeq_pi32(__m64 __m1, __m64 __m2) {
#if _ARCH_PWR9
- __vector signed int a, b, c;
+ __vector signed int __a, __b, __c;
- a = (__vector signed int)vec_splats(__m1);
- b = (__vector signed int)vec_splats(__m2);
- c = (__vector signed int)vec_cmpeq(a, b);
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector signed int)vec_splats(__m1);
+ __b = (__vector signed int)vec_splats(__m2);
+ __c = (__vector signed int)vec_cmpeq(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
#else
- __m64_union m1, m2, res;
+ __m64_union __mu1, __mu2, __res;
- m1.as_m64 = __m1;
- m2.as_m64 = __m2;
+ __mu1.as_m64 = __m1;
+ __mu2.as_m64 = __m2;
- res.as_int[0] = (m1.as_int[0] == m2.as_int[0]) ? -1 : 0;
- res.as_int[1] = (m1.as_int[1] == m2.as_int[1]) ? -1 : 0;
+ __res.as_int[0] = (__mu1.as_int[0] == __mu2.as_int[0]) ? -1 : 0;
+ __res.as_int[1] = (__mu1.as_int[1] == __mu2.as_int[1]) ? -1 : 0;
- return (__m64)res.as_m64;
+ return (__m64)__res.as_m64;
#endif
}
@@ -870,22 +871,22 @@ extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_cmpgt_pi32(__m64 __m1, __m64 __m2) {
#if _ARCH_PWR9
- __vector signed int a, b, c;
+ __vector signed int __a, __b, __c;
- a = (__vector signed int)vec_splats(__m1);
- b = (__vector signed int)vec_splats(__m2);
- c = (__vector signed int)vec_cmpgt(a, b);
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector signed int)vec_splats(__m1);
+ __b = (__vector signed int)vec_splats(__m2);
+ __c = (__vector signed int)vec_cmpgt(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
#else
- __m64_union m1, m2, res;
+ __m64_union __mu1, __mu2, __res;
- m1.as_m64 = __m1;
- m2.as_m64 = __m2;
+ __mu1.as_m64 = __m1;
+ __mu2.as_m64 = __m2;
- res.as_int[0] = (m1.as_int[0] > m2.as_int[0]) ? -1 : 0;
- res.as_int[1] = (m1.as_int[1] > m2.as_int[1]) ? -1 : 0;
+ __res.as_int[0] = (__mu1.as_int[0] > __mu2.as_int[0]) ? -1 : 0;
+ __res.as_int[1] = (__mu1.as_int[1] > __mu2.as_int[1]) ? -1 : 0;
- return (__m64)res.as_m64;
+ return (__m64)__res.as_m64;
#endif
}
@@ -901,12 +902,12 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_adds_pi8(__m64 __m1, __m64 __m2) {
- __vector signed char a, b, c;
+ __vector signed char __a, __b, __c;
- a = (__vector signed char)vec_splats(__m1);
- b = (__vector signed char)vec_splats(__m2);
- c = vec_adds(a, b);
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector signed char)vec_splats(__m1);
+ __b = (__vector signed char)vec_splats(__m2);
+ __c = vec_adds(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
}
extern __inline __m64
@@ -919,12 +920,12 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_adds_pi16(__m64 __m1, __m64 __m2) {
- __vector signed short a, b, c;
+ __vector signed short __a, __b, __c;
- a = (__vector signed short)vec_splats(__m1);
- b = (__vector signed short)vec_splats(__m2);
- c = vec_adds(a, b);
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector signed short)vec_splats(__m1);
+ __b = (__vector signed short)vec_splats(__m2);
+ __c = vec_adds(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
}
extern __inline __m64
@@ -937,12 +938,12 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_adds_pu8(__m64 __m1, __m64 __m2) {
- __vector unsigned char a, b, c;
+ __vector unsigned char __a, __b, __c;
- a = (__vector unsigned char)vec_splats(__m1);
- b = (__vector unsigned char)vec_splats(__m2);
- c = vec_adds(a, b);
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector unsigned char)vec_splats(__m1);
+ __b = (__vector unsigned char)vec_splats(__m2);
+ __c = vec_adds(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
}
extern __inline __m64
@@ -956,12 +957,12 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_adds_pu16(__m64 __m1, __m64 __m2) {
- __vector unsigned short a, b, c;
+ __vector unsigned short __a, __b, __c;
- a = (__vector unsigned short)vec_splats(__m1);
- b = (__vector unsigned short)vec_splats(__m2);
- c = vec_adds(a, b);
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector unsigned short)vec_splats(__m1);
+ __b = (__vector unsigned short)vec_splats(__m2);
+ __c = vec_adds(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
}
extern __inline __m64
@@ -975,12 +976,12 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_subs_pi8(__m64 __m1, __m64 __m2) {
- __vector signed char a, b, c;
+ __vector signed char __a, __b, __c;
- a = (__vector signed char)vec_splats(__m1);
- b = (__vector signed char)vec_splats(__m2);
- c = vec_subs(a, b);
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector signed char)vec_splats(__m1);
+ __b = (__vector signed char)vec_splats(__m2);
+ __c = vec_subs(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
}
extern __inline __m64
@@ -994,12 +995,12 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_subs_pi16(__m64 __m1, __m64 __m2) {
- __vector signed short a, b, c;
+ __vector signed short __a, __b, __c;
- a = (__vector signed short)vec_splats(__m1);
- b = (__vector signed short)vec_splats(__m2);
- c = vec_subs(a, b);
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector signed short)vec_splats(__m1);
+ __b = (__vector signed short)vec_splats(__m2);
+ __c = vec_subs(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
}
extern __inline __m64
@@ -1013,12 +1014,12 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_subs_pu8(__m64 __m1, __m64 __m2) {
- __vector unsigned char a, b, c;
+ __vector unsigned char __a, __b, __c;
- a = (__vector unsigned char)vec_splats(__m1);
- b = (__vector unsigned char)vec_splats(__m2);
- c = vec_subs(a, b);
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector unsigned char)vec_splats(__m1);
+ __b = (__vector unsigned char)vec_splats(__m2);
+ __c = vec_subs(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
}
extern __inline __m64
@@ -1032,12 +1033,12 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_subs_pu16(__m64 __m1, __m64 __m2) {
- __vector unsigned short a, b, c;
+ __vector unsigned short __a, __b, __c;
- a = (__vector unsigned short)vec_splats(__m1);
- b = (__vector unsigned short)vec_splats(__m2);
- c = vec_subs(a, b);
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector unsigned short)vec_splats(__m1);
+ __b = (__vector unsigned short)vec_splats(__m2);
+ __c = vec_subs(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
}
extern __inline __m64
@@ -1052,14 +1053,14 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_madd_pi16(__m64 __m1, __m64 __m2) {
- __vector signed short a, b;
- __vector signed int c;
- __vector signed int zero = {0, 0, 0, 0};
+ __vector signed short __a, __b;
+ __vector signed int __c;
+ __vector signed int __zero = {0, 0, 0, 0};
- a = (__vector signed short)vec_splats(__m1);
- b = (__vector signed short)vec_splats(__m2);
- c = vec_vmsumshm(a, b, zero);
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector signed short)vec_splats(__m1);
+ __b = (__vector signed short)vec_splats(__m2);
+ __c = vec_vmsumshm(__a, __b, __zero);
+ return (__m64)((__vector long long)__c)[0];
}
extern __inline __m64
@@ -1072,10 +1073,10 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_mulhi_pi16(__m64 __m1, __m64 __m2) {
- __vector signed short a, b;
- __vector signed short c;
- __vector signed int w0, w1;
- __vector unsigned char xform1 = {
+ __vector signed short __a, __b;
+ __vector signed short __c;
+ __vector signed int __w0, __w1;
+ __vector unsigned char __xform1 = {
#ifdef __LITTLE_ENDIAN__
0x02, 0x03, 0x12, 0x13, 0x06, 0x07, 0x16, 0x17, 0x0A,
0x0B, 0x1A, 0x1B, 0x0E, 0x0F, 0x1E, 0x1F
@@ -1085,14 +1086,14 @@ extern __inline __m64
#endif
};
- a = (__vector signed short)vec_splats(__m1);
- b = (__vector signed short)vec_splats(__m2);
+ __a = (__vector signed short)vec_splats(__m1);
+ __b = (__vector signed short)vec_splats(__m2);
- w0 = vec_vmulesh(a, b);
- w1 = vec_vmulosh(a, b);
- c = (__vector signed short)vec_perm(w0, w1, xform1);
+ __w0 = vec_vmulesh(__a, __b);
+ __w1 = vec_vmulosh(__a, __b);
+ __c = (__vector signed short)vec_perm(__w0, __w1, __xform1);
- return (__m64)((__vector long long)c)[0];
+ return (__m64)((__vector long long)__c)[0];
}
extern __inline __m64
@@ -1106,12 +1107,12 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_mullo_pi16(__m64 __m1, __m64 __m2) {
- __vector signed short a, b, c;
+ __vector signed short __a, __b, __c;
- a = (__vector signed short)vec_splats(__m1);
- b = (__vector signed short)vec_splats(__m2);
- c = a * b;
- return (__m64)((__vector long long)c)[0];
+ __a = (__vector signed short)vec_splats(__m1);
+ __b = (__vector signed short)vec_splats(__m2);
+ __c = __a * __b;
+ return (__m64)((__vector long long)__c)[0];
}
extern __inline __m64
@@ -1124,14 +1125,14 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_sll_pi16(__m64 __m, __m64 __count) {
- __vector signed short m, r;
- __vector unsigned short c;
+ __vector signed short __r;
+ __vector unsigned short __c;
if (__count <= 15) {
- m = (__vector signed short)vec_splats(__m);
- c = (__vector unsigned short)vec_splats((unsigned short)__count);
- r = vec_sl(m, (__vector unsigned short)c);
- return (__m64)((__vector long long)r)[0];
+ __r = (__vector signed short)vec_splats(__m);
+ __c = (__vector unsigned short)vec_splats((unsigned short)__count);
+ __r = vec_sl(__r, (__vector unsigned short)__c);
+ return (__m64)((__vector long long)__r)[0];
} else
return (0);
}
@@ -1159,13 +1160,13 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_sll_pi32(__m64 __m, __m64 __count) {
- __m64_union m, res;
+ __m64_union __res;
- m.as_m64 = __m;
+ __res.as_m64 = __m;
- res.as_int[0] = m.as_int[0] << __count;
- res.as_int[1] = m.as_int[1] << __count;
- return (res.as_m64);
+ __res.as_int[0] = __res.as_int[0] << __count;
+ __res.as_int[1] = __res.as_int[1] << __count;
+ return (__res.as_m64);
}
extern __inline __m64
@@ -1191,14 +1192,14 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_sra_pi16(__m64 __m, __m64 __count) {
- __vector signed short m, r;
- __vector unsigned short c;
+ __vector signed short __r;
+ __vector unsigned short __c;
if (__count <= 15) {
- m = (__vector signed short)vec_splats(__m);
- c = (__vector unsigned short)vec_splats((unsigned short)__count);
- r = vec_sra(m, (__vector unsigned short)c);
- return (__m64)((__vector long long)r)[0];
+ __r = (__vector signed short)vec_splats(__m);
+ __c = (__vector unsigned short)vec_splats((unsigned short)__count);
+ __r = vec_sra(__r, (__vector unsigned short)__c);
+ return (__m64)((__vector long long)__r)[0];
} else
return (0);
}
@@ -1226,13 +1227,13 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_sra_pi32(__m64 __m, __m64 __count) {
- __m64_union m, res;
+ __m64_union __res;
- m.as_m64 = __m;
+ __res.as_m64 = __m;
- res.as_int[0] = m.as_int[0] >> __count;
- res.as_int[1] = m.as_int[1] >> __count;
- return (res.as_m64);
+ __res.as_int[0] = __res.as_int[0] >> __count;
+ __res.as_int[1] = __res.as_int[1] >> __count;
+ return (__res.as_m64);
}
extern __inline __m64
@@ -1258,14 +1259,14 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_srl_pi16(__m64 __m, __m64 __count) {
- __vector unsigned short m, r;
- __vector unsigned short c;
+ __vector unsigned short __r;
+ __vector unsigned short __c;
if (__count <= 15) {
- m = (__vector unsigned short)vec_splats(__m);
- c = (__vector unsigned short)vec_splats((unsigned short)__count);
- r = vec_sr(m, (__vector unsigned short)c);
- return (__m64)((__vector long long)r)[0];
+ __r = (__vector unsigned short)vec_splats(__m);
+ __c = (__vector unsigned short)vec_splats((unsigned short)__count);
+ __r = vec_sr(__r, (__vector unsigned short)__c);
+ return (__m64)((__vector long long)__r)[0];
} else
return (0);
}
@@ -1293,13 +1294,13 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_srl_pi32(__m64 __m, __m64 __count) {
- __m64_union m, res;
+ __m64_union __res;
- m.as_m64 = __m;
+ __res.as_m64 = __m;
- res.as_int[0] = (unsigned int)m.as_int[0] >> __count;
- res.as_int[1] = (unsigned int)m.as_int[1] >> __count;
- return (res.as_m64);
+ __res.as_int[0] = (unsigned int)__res.as_int[0] >> __count;
+ __res.as_int[1] = (unsigned int)__res.as_int[1] >> __count;
+ return (__res.as_m64);
}
extern __inline __m64
@@ -1326,24 +1327,24 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_set_pi32(int __i1, int __i0) {
- __m64_union res;
+ __m64_union __res;
- res.as_int[0] = __i0;
- res.as_int[1] = __i1;
- return (res.as_m64);
+ __res.as_int[0] = __i0;
+ __res.as_int[1] = __i1;
+ return (__res.as_m64);
}
/* Creates a vector of four 16-bit values; W0 is least significant. */
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_set_pi16(short __w3, short __w2, short __w1, short __w0) {
- __m64_union res;
+ __m64_union __res;
- res.as_short[0] = __w0;
- res.as_short[1] = __w1;
- res.as_short[2] = __w2;
- res.as_short[3] = __w3;
- return (res.as_m64);
+ __res.as_short[0] = __w0;
+ __res.as_short[1] = __w1;
+ __res.as_short[2] = __w2;
+ __res.as_short[3] = __w3;
+ return (__res.as_m64);
}
/* Creates a vector of eight 8-bit values; B0 is least significant. */
@@ -1351,28 +1352,28 @@ extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_set_pi8(char __b7, char __b6, char __b5, char __b4, char __b3,
char __b2, char __b1, char __b0) {
- __m64_union res;
+ __m64_union __res;
- res.as_char[0] = __b0;
- res.as_char[1] = __b1;
- res.as_char[2] = __b2;
- res.as_char[3] = __b3;
- res.as_char[4] = __b4;
- res.as_char[5] = __b5;
- res.as_char[6] = __b6;
- res.as_char[7] = __b7;
- return (res.as_m64);
+ __res.as_char[0] = __b0;
+ __res.as_char[1] = __b1;
+ __res.as_char[2] = __b2;
+ __res.as_char[3] = __b3;
+ __res.as_char[4] = __b4;
+ __res.as_char[5] = __b5;
+ __res.as_char[6] = __b6;
+ __res.as_char[7] = __b7;
+ return (__res.as_m64);
}
/* Similar, but with the arguments in reverse order. */
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_setr_pi32(int __i0, int __i1) {
- __m64_union res;
+ __m64_union __res;
- res.as_int[0] = __i0;
- res.as_int[1] = __i1;
- return (res.as_m64);
+ __res.as_int[0] = __i0;
+ __res.as_int[1] = __i1;
+ return (__res.as_m64);
}
extern __inline __m64
@@ -1392,11 +1393,11 @@ extern __inline __m64
extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_set1_pi32(int __i) {
- __m64_union res;
+ __m64_union __res;
- res.as_int[0] = __i;
- res.as_int[1] = __i;
- return (res.as_m64);
+ __res.as_int[0] = __i;
+ __res.as_int[1] = __i;
+ return (__res.as_m64);
}
/* Creates a vector of four 16-bit values, all elements containing W. */
@@ -1409,13 +1410,13 @@ extern __inline __m64
w = (__vector signed short)vec_splats(__w);
return (__m64)((__vector long long)w)[0];
#else
- __m64_union res;
+ __m64_union __res;
- res.as_short[0] = __w;
- res.as_short[1] = __w;
- res.as_short[2] = __w;
- res.as_short[3] = __w;
- return (res.as_m64);
+ __res.as_short[0] = __w;
+ __res.as_short[1] = __w;
+ __res.as_short[2] = __w;
+ __res.as_short[3] = __w;
+ return (__res.as_m64);
#endif
}
@@ -1424,22 +1425,22 @@ extern __inline __m64
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_set1_pi8(signed char __b) {
#if _ARCH_PWR8
- __vector signed char b;
+ __vector signed char __res;
- b = (__vector signed char)vec_splats(__b);
- return (__m64)((__vector long long)b)[0];
+ __res = (__vector signed char)vec_splats(__b);
+ return (__m64)((__vector long long)__res)[0];
#else
- __m64_union res;
-
- res.as_char[0] = __b;
- res.as_char[1] = __b;
- res.as_char[2] = __b;
- res.as_char[3] = __b;
- res.as_char[4] = __b;
- res.as_char[5] = __b;
- res.as_char[6] = __b;
- res.as_char[7] = __b;
- return (res.as_m64);
+ __m64_union __res;
+
+ __res.as_char[0] = __b;
+ __res.as_char[1] = __b;
+ __res.as_char[2] = __b;
+ __res.as_char[3] = __b;
+ __res.as_char[4] = __b;
+ __res.as_char[5] = __b;
+ __res.as_char[6] = __b;
+ __res.as_char[7] = __b;
+ return (__res.as_m64);
#endif
}
diff --git a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/nmmintrin.h b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/nmmintrin.h
new file mode 100644
index 000000000000..789bba6bc0d3
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/nmmintrin.h
@@ -0,0 +1,26 @@
+/*===---- nmmintrin.h - Implementation of SSE4 intrinsics on PowerPC -------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef NO_WARN_X86_INTRINSICS
+/* This header is distributed to simplify porting x86_64 code that
+ makes explicit use of Intel intrinsics to powerpc64le.
+ It is the user's responsibility to determine if the results are
+ acceptable and make additional changes as necessary.
+ Note that much code that uses Intel intrinsics can be rewritten in
+ standard C or GNU C extensions, which are more portable and better
+ optimized across multiple targets. */
+#endif
+
+#ifndef NMMINTRIN_H_
+#define NMMINTRIN_H_
+
+/* We just include SSE4.1 header file. */
+#include <smmintrin.h>
+
+#endif /* NMMINTRIN_H_ */
diff --git a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/pmmintrin.h b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/pmmintrin.h
index 8d4046bd43f1..889f57ae89d8 100644
--- a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/pmmintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/pmmintrin.h
@@ -32,7 +32,8 @@
In the specific case of the monitor and mwait instructions there are
no direct equivalent in the PowerISA at this time. So those
intrinsics are not implemented. */
-#error "Please read comment above. Use -DNO_WARN_X86_INTRINSICS to disable this warning."
+#error \
+ "Please read comment above. Use -DNO_WARN_X86_INTRINSICS to disable this warning."
#endif
#ifndef PMMINTRIN_H_
@@ -43,102 +44,94 @@
/* We need definitions from the SSE2 and SSE header files*/
#include <emmintrin.h>
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_addsub_ps (__m128 __X, __m128 __Y)
-{
- const __v4sf even_n0 = {-0.0, 0.0, -0.0, 0.0};
- __v4sf even_neg_Y = vec_xor(__Y, even_n0);
- return (__m128) vec_add (__X, even_neg_Y);
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_addsub_ps(__m128 __X, __m128 __Y) {
+ const __v4sf __even_n0 = {-0.0, 0.0, -0.0, 0.0};
+ __v4sf __even_neg_Y = vec_xor(__Y, __even_n0);
+ return (__m128)vec_add(__X, __even_neg_Y);
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_addsub_pd (__m128d __X, __m128d __Y)
-{
- const __v2df even_n0 = {-0.0, 0.0};
- __v2df even_neg_Y = vec_xor(__Y, even_n0);
- return (__m128d) vec_add (__X, even_neg_Y);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_addsub_pd(__m128d __X, __m128d __Y) {
+ const __v2df __even_n0 = {-0.0, 0.0};
+ __v2df __even_neg_Y = vec_xor(__Y, __even_n0);
+ return (__m128d)vec_add(__X, __even_neg_Y);
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hadd_ps (__m128 __X, __m128 __Y)
-{
- __vector unsigned char xform2 = {
- 0x00, 0x01, 0x02, 0x03,
- 0x08, 0x09, 0x0A, 0x0B,
- 0x10, 0x11, 0x12, 0x13,
- 0x18, 0x19, 0x1A, 0x1B
- };
- __vector unsigned char xform1 = {
- 0x04, 0x05, 0x06, 0x07,
- 0x0C, 0x0D, 0x0E, 0x0F,
- 0x14, 0x15, 0x16, 0x17,
- 0x1C, 0x1D, 0x1E, 0x1F
- };
- return (__m128) vec_add (vec_perm ((__v4sf) __X, (__v4sf) __Y, xform2),
- vec_perm ((__v4sf) __X, (__v4sf) __Y, xform1));
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_hadd_ps(__m128 __X, __m128 __Y) {
+ __vector unsigned char __xform2 = {0x00, 0x01, 0x02, 0x03, 0x08, 0x09,
+ 0x0A, 0x0B, 0x10, 0x11, 0x12, 0x13,
+ 0x18, 0x19, 0x1A, 0x1B};
+ __vector unsigned char __xform1 = {0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D,
+ 0x0E, 0x0F, 0x14, 0x15, 0x16, 0x17,
+ 0x1C, 0x1D, 0x1E, 0x1F};
+ return (__m128)vec_add(vec_perm((__v4sf)__X, (__v4sf)__Y, __xform2),
+ vec_perm((__v4sf)__X, (__v4sf)__Y, __xform1));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hsub_ps (__m128 __X, __m128 __Y)
-{
- __vector unsigned char xform2 = {
- 0x00, 0x01, 0x02, 0x03,
- 0x08, 0x09, 0x0A, 0x0B,
- 0x10, 0x11, 0x12, 0x13,
- 0x18, 0x19, 0x1A, 0x1B
- };
- __vector unsigned char xform1 = {
- 0x04, 0x05, 0x06, 0x07,
- 0x0C, 0x0D, 0x0E, 0x0F,
- 0x14, 0x15, 0x16, 0x17,
- 0x1C, 0x1D, 0x1E, 0x1F
- };
- return (__m128) vec_sub (vec_perm ((__v4sf) __X, (__v4sf) __Y, xform2),
- vec_perm ((__v4sf) __X, (__v4sf) __Y, xform1));
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_hsub_ps(__m128 __X, __m128 __Y) {
+ __vector unsigned char __xform2 = {0x00, 0x01, 0x02, 0x03, 0x08, 0x09,
+ 0x0A, 0x0B, 0x10, 0x11, 0x12, 0x13,
+ 0x18, 0x19, 0x1A, 0x1B};
+ __vector unsigned char __xform1 = {0x04, 0x05, 0x06, 0x07, 0x0C, 0x0D,
+ 0x0E, 0x0F, 0x14, 0x15, 0x16, 0x17,
+ 0x1C, 0x1D, 0x1E, 0x1F};
+ return (__m128)vec_sub(vec_perm((__v4sf)__X, (__v4sf)__Y, __xform2),
+ vec_perm((__v4sf)__X, (__v4sf)__Y, __xform1));
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hadd_pd (__m128d __X, __m128d __Y)
-{
- return (__m128d) vec_add (vec_mergeh ((__v2df) __X, (__v2df)__Y),
- vec_mergel ((__v2df) __X, (__v2df)__Y));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_hadd_pd(__m128d __X, __m128d __Y) {
+ return (__m128d)vec_add(vec_mergeh((__v2df)__X, (__v2df)__Y),
+ vec_mergel((__v2df)__X, (__v2df)__Y));
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hsub_pd (__m128d __X, __m128d __Y)
-{
- return (__m128d) vec_sub (vec_mergeh ((__v2df) __X, (__v2df)__Y),
- vec_mergel ((__v2df) __X, (__v2df)__Y));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_hsub_pd(__m128d __X, __m128d __Y) {
+ return (__m128d)vec_sub(vec_mergeh((__v2df)__X, (__v2df)__Y),
+ vec_mergel((__v2df)__X, (__v2df)__Y));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_movehdup_ps (__m128 __X)
-{
- return (__m128)vec_mergeo ((__v4su)__X, (__v4su)__X);
+#ifdef _ARCH_PWR8
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_movehdup_ps(__m128 __X) {
+ return (__m128)vec_mergeo((__v4su)__X, (__v4su)__X);
}
+#endif
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_moveldup_ps (__m128 __X)
-{
- return (__m128)vec_mergee ((__v4su)__X, (__v4su)__X);
+#ifdef _ARCH_PWR8
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_moveldup_ps(__m128 __X) {
+ return (__m128)vec_mergee((__v4su)__X, (__v4su)__X);
}
+#endif
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loaddup_pd (double const *__P)
-{
- return (__m128d) vec_splats (*__P);
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_loaddup_pd(double const *__P) {
+ return (__m128d)vec_splats(*__P);
}
-extern __inline __m128d __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_movedup_pd (__m128d __X)
-{
- return _mm_shuffle_pd (__X, __X, _MM_SHUFFLE2 (0,0));
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_movedup_pd(__m128d __X) {
+ return _mm_shuffle_pd(__X, __X, _MM_SHUFFLE2(0, 0));
}
-extern __inline __m128i __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_lddqu_si128 (__m128i const *__P)
-{
- return (__m128i) (vec_vsx_ld(0, (signed int const *)__P));
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_lddqu_si128(__m128i const *__P) {
+ return (__m128i)(vec_vsx_ld(0, (signed int const *)__P));
}
/* POWER8 / POWER9 have no equivalent for _mm_monitor nor _mm_wait. */
diff --git a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/smmintrin.h b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/smmintrin.h
index 674703245a69..694d5aa06940 100644
--- a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/smmintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/smmintrin.h
@@ -34,6 +34,248 @@
#include <altivec.h>
#include <tmmintrin.h>
+/* Rounding mode macros. */
+#define _MM_FROUND_TO_NEAREST_INT 0x00
+#define _MM_FROUND_TO_ZERO 0x01
+#define _MM_FROUND_TO_POS_INF 0x02
+#define _MM_FROUND_TO_NEG_INF 0x03
+#define _MM_FROUND_CUR_DIRECTION 0x04
+
+#define _MM_FROUND_NINT (_MM_FROUND_TO_NEAREST_INT | _MM_FROUND_RAISE_EXC)
+#define _MM_FROUND_FLOOR (_MM_FROUND_TO_NEG_INF | _MM_FROUND_RAISE_EXC)
+#define _MM_FROUND_CEIL (_MM_FROUND_TO_POS_INF | _MM_FROUND_RAISE_EXC)
+#define _MM_FROUND_TRUNC (_MM_FROUND_TO_ZERO | _MM_FROUND_RAISE_EXC)
+#define _MM_FROUND_RINT (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_RAISE_EXC)
+#define _MM_FROUND_NEARBYINT (_MM_FROUND_CUR_DIRECTION | _MM_FROUND_NO_EXC)
+
+#define _MM_FROUND_RAISE_EXC 0x00
+#define _MM_FROUND_NO_EXC 0x08
+
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_round_pd(__m128d __A, int __rounding) {
+ __v2df __r;
+ union {
+ double __fr;
+ long long __fpscr;
+ } __enables_save, __fpscr_save;
+
+ if (__rounding & _MM_FROUND_NO_EXC) {
+ /* Save enabled exceptions, disable all exceptions,
+ and preserve the rounding mode. */
+#ifdef _ARCH_PWR9
+ __asm__("mffsce %0" : "=f"(__fpscr_save.__fr));
+ __enables_save.__fpscr = __fpscr_save.__fpscr & 0xf8;
+#else
+ __fpscr_save.__fr = __builtin_mffs();
+ __enables_save.__fpscr = __fpscr_save.__fpscr & 0xf8;
+ __fpscr_save.__fpscr &= ~0xf8;
+ __builtin_mtfsf(0b00000011, __fpscr_save.__fr);
+#endif
+ /* Insert an artificial "read/write" reference to the variable
+ read below, to ensure the compiler does not schedule
+ a read/use of the variable before the FPSCR is modified, above.
+ This can be removed if and when GCC PR102783 is fixed.
+ */
+ __asm__("" : "+wa"(__A));
+ }
+
+ switch (__rounding) {
+ case _MM_FROUND_TO_NEAREST_INT:
+ __fpscr_save.__fr = __builtin_mffsl();
+ __attribute__((fallthrough));
+ case _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC:
+ __builtin_set_fpscr_rn(0b00);
+ /* Insert an artificial "read/write" reference to the variable
+ read below, to ensure the compiler does not schedule
+ a read/use of the variable before the FPSCR is modified, above.
+ This can be removed if and when GCC PR102783 is fixed.
+ */
+ __asm__("" : "+wa"(__A));
+
+ __r = vec_rint((__v2df)__A);
+
+ /* Insert an artificial "read" reference to the variable written
+ above, to ensure the compiler does not schedule the computation
+ of the value after the manipulation of the FPSCR, below.
+ This can be removed if and when GCC PR102783 is fixed.
+ */
+ __asm__("" : : "wa"(__r));
+ __builtin_set_fpscr_rn(__fpscr_save.__fpscr);
+ break;
+ case _MM_FROUND_TO_NEG_INF:
+ case _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC:
+ __r = vec_floor((__v2df)__A);
+ break;
+ case _MM_FROUND_TO_POS_INF:
+ case _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC:
+ __r = vec_ceil((__v2df)__A);
+ break;
+ case _MM_FROUND_TO_ZERO:
+ case _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC:
+ __r = vec_trunc((__v2df)__A);
+ break;
+ case _MM_FROUND_CUR_DIRECTION:
+ __r = vec_rint((__v2df)__A);
+ break;
+ }
+ if (__rounding & _MM_FROUND_NO_EXC) {
+ /* Insert an artificial "read" reference to the variable written
+ above, to ensure the compiler does not schedule the computation
+ of the value after the manipulation of the FPSCR, below.
+ This can be removed if and when GCC PR102783 is fixed.
+ */
+ __asm__("" : : "wa"(__r));
+ /* Restore enabled exceptions. */
+ __fpscr_save.__fr = __builtin_mffsl();
+ __fpscr_save.__fpscr |= __enables_save.__fpscr;
+ __builtin_mtfsf(0b00000011, __fpscr_save.__fr);
+ }
+ return (__m128d)__r;
+}
+
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_round_sd(__m128d __A, __m128d __B, int __rounding) {
+ __B = _mm_round_pd(__B, __rounding);
+ __v2df __r = {((__v2df)__B)[0], ((__v2df)__A)[1]};
+ return (__m128d)__r;
+}
+
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_round_ps(__m128 __A, int __rounding) {
+ __v4sf __r;
+ union {
+ double __fr;
+ long long __fpscr;
+ } __enables_save, __fpscr_save;
+
+ if (__rounding & _MM_FROUND_NO_EXC) {
+ /* Save enabled exceptions, disable all exceptions,
+ and preserve the rounding mode. */
+#ifdef _ARCH_PWR9
+ __asm__("mffsce %0" : "=f"(__fpscr_save.__fr));
+ __enables_save.__fpscr = __fpscr_save.__fpscr & 0xf8;
+#else
+ __fpscr_save.__fr = __builtin_mffs();
+ __enables_save.__fpscr = __fpscr_save.__fpscr & 0xf8;
+ __fpscr_save.__fpscr &= ~0xf8;
+ __builtin_mtfsf(0b00000011, __fpscr_save.__fr);
+#endif
+ /* Insert an artificial "read/write" reference to the variable
+ read below, to ensure the compiler does not schedule
+ a read/use of the variable before the FPSCR is modified, above.
+ This can be removed if and when GCC PR102783 is fixed.
+ */
+ __asm__("" : "+wa"(__A));
+ }
+
+ switch (__rounding) {
+ case _MM_FROUND_TO_NEAREST_INT:
+ __fpscr_save.__fr = __builtin_mffsl();
+ __attribute__((fallthrough));
+ case _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC:
+ __builtin_set_fpscr_rn(0b00);
+ /* Insert an artificial "read/write" reference to the variable
+ read below, to ensure the compiler does not schedule
+ a read/use of the variable before the FPSCR is modified, above.
+ This can be removed if and when GCC PR102783 is fixed.
+ */
+ __asm__("" : "+wa"(__A));
+
+ __r = vec_rint((__v4sf)__A);
+
+ /* Insert an artificial "read" reference to the variable written
+ above, to ensure the compiler does not schedule the computation
+ of the value after the manipulation of the FPSCR, below.
+ This can be removed if and when GCC PR102783 is fixed.
+ */
+ __asm__("" : : "wa"(__r));
+ __builtin_set_fpscr_rn(__fpscr_save.__fpscr);
+ break;
+ case _MM_FROUND_TO_NEG_INF:
+ case _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC:
+ __r = vec_floor((__v4sf)__A);
+ break;
+ case _MM_FROUND_TO_POS_INF:
+ case _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC:
+ __r = vec_ceil((__v4sf)__A);
+ break;
+ case _MM_FROUND_TO_ZERO:
+ case _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC:
+ __r = vec_trunc((__v4sf)__A);
+ break;
+ case _MM_FROUND_CUR_DIRECTION:
+ __r = vec_rint((__v4sf)__A);
+ break;
+ }
+ if (__rounding & _MM_FROUND_NO_EXC) {
+ /* Insert an artificial "read" reference to the variable written
+ above, to ensure the compiler does not schedule the computation
+ of the value after the manipulation of the FPSCR, below.
+ This can be removed if and when GCC PR102783 is fixed.
+ */
+ __asm__("" : : "wa"(__r));
+ /* Restore enabled exceptions. */
+ __fpscr_save.__fr = __builtin_mffsl();
+ __fpscr_save.__fpscr |= __enables_save.__fpscr;
+ __builtin_mtfsf(0b00000011, __fpscr_save.__fr);
+ }
+ return (__m128)__r;
+}
+
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_round_ss(__m128 __A, __m128 __B, int __rounding) {
+ __B = _mm_round_ps(__B, __rounding);
+ __v4sf __r = (__v4sf)__A;
+ __r[0] = ((__v4sf)__B)[0];
+ return (__m128)__r;
+}
+
+#define _mm_ceil_pd(V) _mm_round_pd((V), _MM_FROUND_CEIL)
+#define _mm_ceil_sd(D, V) _mm_round_sd((D), (V), _MM_FROUND_CEIL)
+
+#define _mm_floor_pd(V) _mm_round_pd((V), _MM_FROUND_FLOOR)
+#define _mm_floor_sd(D, V) _mm_round_sd((D), (V), _MM_FROUND_FLOOR)
+
+#define _mm_ceil_ps(V) _mm_round_ps((V), _MM_FROUND_CEIL)
+#define _mm_ceil_ss(D, V) _mm_round_ss((D), (V), _MM_FROUND_CEIL)
+
+#define _mm_floor_ps(V) _mm_round_ps((V), _MM_FROUND_FLOOR)
+#define _mm_floor_ss(D, V) _mm_round_ss((D), (V), _MM_FROUND_FLOOR)
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_insert_epi8(__m128i const __A, int const __D, int const __N) {
+ __v16qi __result = (__v16qi)__A;
+
+ __result[__N & 0xf] = __D;
+
+ return (__m128i)__result;
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_insert_epi32(__m128i const __A, int const __D, int const __N) {
+ __v4si __result = (__v4si)__A;
+
+ __result[__N & 3] = __D;
+
+ return (__m128i)__result;
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_insert_epi64(__m128i const __A, long long const __D, int const __N) {
+ __v2di __result = (__v2di)__A;
+
+ __result[__N & 1] = __D;
+
+ return (__m128i)__result;
+}
+
extern __inline int
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_extract_epi8(__m128i __X, const int __N) {
@@ -58,6 +300,7 @@ extern __inline int
return ((__v4si)__X)[__N & 3];
}
+#ifdef _ARCH_PWR8
extern __inline __m128i
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_blend_epi16(__m128i __A, __m128i __B, const int __imm8) {
@@ -69,42 +312,351 @@ extern __inline __m128i
#endif
return (__m128i)vec_sel((__v8hu)__A, (__v8hu)__B, __shortmask);
}
+#endif
extern __inline __m128i
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
_mm_blendv_epi8(__m128i __A, __m128i __B, __m128i __mask) {
+#ifdef _ARCH_PWR10
+ return (__m128i)vec_blendv((__v16qi)__A, (__v16qi)__B, (__v16qu)__mask);
+#else
const __v16qu __seven = vec_splats((unsigned char)0x07);
__v16qu __lmask = vec_sra((__v16qu)__mask, __seven);
- return (__m128i)vec_sel((__v16qu)__A, (__v16qu)__B, __lmask);
+ return (__m128i)vec_sel((__v16qi)__A, (__v16qi)__B, __lmask);
+#endif
+}
+
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_blend_ps(__m128 __A, __m128 __B, const int __imm8) {
+ __v16qu __pcv[] = {
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
+ {16, 17, 18, 19, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
+ {0, 1, 2, 3, 20, 21, 22, 23, 8, 9, 10, 11, 12, 13, 14, 15},
+ {16, 17, 18, 19, 20, 21, 22, 23, 8, 9, 10, 11, 12, 13, 14, 15},
+ {0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 12, 13, 14, 15},
+ {16, 17, 18, 19, 4, 5, 6, 7, 24, 25, 26, 27, 12, 13, 14, 15},
+ {0, 1, 2, 3, 20, 21, 22, 23, 24, 25, 26, 27, 12, 13, 14, 15},
+ {16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 12, 13, 14, 15},
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 28, 29, 30, 31},
+ {16, 17, 18, 19, 4, 5, 6, 7, 8, 9, 10, 11, 28, 29, 30, 31},
+ {0, 1, 2, 3, 20, 21, 22, 23, 8, 9, 10, 11, 28, 29, 30, 31},
+ {16, 17, 18, 19, 20, 21, 22, 23, 8, 9, 10, 11, 28, 29, 30, 31},
+ {0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31},
+ {16, 17, 18, 19, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31},
+ {0, 1, 2, 3, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
+ {16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
+ };
+ __v16qu __r = vec_perm((__v16qu)__A, (__v16qu)__B, __pcv[__imm8]);
+ return (__m128)__r;
+}
+
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_blendv_ps(__m128 __A, __m128 __B, __m128 __mask) {
+#ifdef _ARCH_PWR10
+ return (__m128)vec_blendv((__v4sf)__A, (__v4sf)__B, (__v4su)__mask);
+#else
+ const __v4si __zero = {0};
+ const __vector __bool int __boolmask = vec_cmplt((__v4si)__mask, __zero);
+ return (__m128)vec_sel((__v4su)__A, (__v4su)__B, (__v4su)__boolmask);
+#endif
+}
+
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_blend_pd(__m128d __A, __m128d __B, const int __imm8) {
+ __v16qu __pcv[] = {
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
+ {16, 17, 18, 19, 20, 21, 22, 23, 8, 9, 10, 11, 12, 13, 14, 15},
+ {0, 1, 2, 3, 4, 5, 6, 7, 24, 25, 26, 27, 28, 29, 30, 31},
+ {16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}};
+ __v16qu __r = vec_perm((__v16qu)__A, (__v16qu)__B, __pcv[__imm8]);
+ return (__m128d)__r;
}
+#ifdef _ARCH_PWR8
+extern __inline __m128d
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_blendv_pd(__m128d __A, __m128d __B, __m128d __mask) {
+#ifdef _ARCH_PWR10
+ return (__m128d)vec_blendv((__v2df)__A, (__v2df)__B, (__v2du)__mask);
+#else
+ const __v2di __zero = {0};
+ const __vector __bool long long __boolmask =
+ vec_cmplt((__v2di)__mask, __zero);
+ return (__m128d)vec_sel((__v2du)__A, (__v2du)__B, (__v2du)__boolmask);
+#endif
+}
+#endif
+
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_testz_si128(__m128i __A, __m128i __B) {
+ /* Note: This implementation does NOT set "zero" or "carry" flags. */
+ const __v16qu __zero = {0};
+ return vec_all_eq(vec_and((__v16qu)__A, (__v16qu)__B), __zero);
+}
+
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_testc_si128(__m128i __A, __m128i __B) {
+ /* Note: This implementation does NOT set "zero" or "carry" flags. */
+ const __v16qu __zero = {0};
+ const __v16qu __notA = vec_nor((__v16qu)__A, (__v16qu)__A);
+ return vec_all_eq(vec_and((__v16qu)__notA, (__v16qu)__B), __zero);
+}
+
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_testnzc_si128(__m128i __A, __m128i __B) {
+ /* Note: This implementation does NOT set "zero" or "carry" flags. */
+ return _mm_testz_si128(__A, __B) == 0 && _mm_testc_si128(__A, __B) == 0;
+}
+
+#define _mm_test_all_zeros(M, V) _mm_testz_si128((M), (V))
+
+#define _mm_test_all_ones(V) _mm_testc_si128((V), _mm_cmpeq_epi32((V), (V)))
+
+#define _mm_test_mix_ones_zeros(M, V) _mm_testnzc_si128((M), (V))
+
+#ifdef _ARCH_PWR8
extern __inline __m128i
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
- _mm_insert_epi8(__m128i const __A, int const __D, int const __N) {
- __v16qi result = (__v16qi)__A;
- result[__N & 0xf] = __D;
- return (__m128i)result;
+ _mm_cmpeq_epi64(__m128i __X, __m128i __Y) {
+ return (__m128i)vec_cmpeq((__v2di)__X, (__v2di)__Y);
}
+#endif
extern __inline __m128i
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
- _mm_insert_epi32(__m128i const __A, int const __D, int const __N) {
- __v4si result = (__v4si)__A;
- result[__N & 3] = __D;
- return (__m128i)result;
+ _mm_min_epi8(__m128i __X, __m128i __Y) {
+ return (__m128i)vec_min((__v16qi)__X, (__v16qi)__Y);
}
extern __inline __m128i
__attribute__((__gnu_inline__, __always_inline__, __artificial__))
- _mm_insert_epi64(__m128i const __A, long long const __D, int const __N) {
- __v2di result = (__v2di)__A;
- result[__N & 1] = __D;
- return (__m128i)result;
+ _mm_min_epu16(__m128i __X, __m128i __Y) {
+ return (__m128i)vec_min((__v8hu)__X, (__v8hu)__Y);
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_min_epi32(__m128i __X, __m128i __Y) {
+ return (__m128i)vec_min((__v4si)__X, (__v4si)__Y);
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_min_epu32(__m128i __X, __m128i __Y) {
+ return (__m128i)vec_min((__v4su)__X, (__v4su)__Y);
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_max_epi8(__m128i __X, __m128i __Y) {
+ return (__m128i)vec_max((__v16qi)__X, (__v16qi)__Y);
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_max_epu16(__m128i __X, __m128i __Y) {
+ return (__m128i)vec_max((__v8hu)__X, (__v8hu)__Y);
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_max_epi32(__m128i __X, __m128i __Y) {
+ return (__m128i)vec_max((__v4si)__X, (__v4si)__Y);
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_max_epu32(__m128i __X, __m128i __Y) {
+ return (__m128i)vec_max((__v4su)__X, (__v4su)__Y);
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_mullo_epi32(__m128i __X, __m128i __Y) {
+ return (__m128i)vec_mul((__v4su)__X, (__v4su)__Y);
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_mul_epi32(__m128i __X, __m128i __Y) {
+ return (__m128i)vec_mule((__v4si)__X, (__v4si)__Y);
+}
+#endif
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtepi8_epi16(__m128i __A) {
+ return (__m128i)vec_unpackh((__v16qi)__A);
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtepi8_epi32(__m128i __A) {
+ __A = (__m128i)vec_unpackh((__v16qi)__A);
+ return (__m128i)vec_unpackh((__v8hi)__A);
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtepi8_epi64(__m128i __A) {
+ __A = (__m128i)vec_unpackh((__v16qi)__A);
+ __A = (__m128i)vec_unpackh((__v8hi)__A);
+ return (__m128i)vec_unpackh((__v4si)__A);
+}
+#endif
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtepi16_epi32(__m128i __A) {
+ return (__m128i)vec_unpackh((__v8hi)__A);
}
+#ifdef _ARCH_PWR8
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtepi16_epi64(__m128i __A) {
+ __A = (__m128i)vec_unpackh((__v8hi)__A);
+ return (__m128i)vec_unpackh((__v4si)__A);
+}
+#endif
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtepi32_epi64(__m128i __A) {
+ return (__m128i)vec_unpackh((__v4si)__A);
+}
+#endif
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtepu8_epi16(__m128i __A) {
+ const __v16qu __zero = {0};
+#ifdef __LITTLE_ENDIAN__
+ __A = (__m128i)vec_mergeh((__v16qu)__A, __zero);
+#else /* __BIG_ENDIAN__. */
+ __A = (__m128i)vec_mergeh(__zero, (__v16qu)__A);
+#endif /* __BIG_ENDIAN__. */
+ return __A;
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtepu8_epi32(__m128i __A) {
+ const __v16qu __zero = {0};
+#ifdef __LITTLE_ENDIAN__
+ __A = (__m128i)vec_mergeh((__v16qu)__A, __zero);
+ __A = (__m128i)vec_mergeh((__v8hu)__A, (__v8hu)__zero);
+#else /* __BIG_ENDIAN__. */
+ __A = (__m128i)vec_mergeh(__zero, (__v16qu)__A);
+ __A = (__m128i)vec_mergeh((__v8hu)__zero, (__v8hu)__A);
+#endif /* __BIG_ENDIAN__. */
+ return __A;
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtepu8_epi64(__m128i __A) {
+ const __v16qu __zero = {0};
+#ifdef __LITTLE_ENDIAN__
+ __A = (__m128i)vec_mergeh((__v16qu)__A, __zero);
+ __A = (__m128i)vec_mergeh((__v8hu)__A, (__v8hu)__zero);
+ __A = (__m128i)vec_mergeh((__v4su)__A, (__v4su)__zero);
+#else /* __BIG_ENDIAN__. */
+ __A = (__m128i)vec_mergeh(__zero, (__v16qu)__A);
+ __A = (__m128i)vec_mergeh((__v8hu)__zero, (__v8hu)__A);
+ __A = (__m128i)vec_mergeh((__v4su)__zero, (__v4su)__A);
+#endif /* __BIG_ENDIAN__. */
+ return __A;
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtepu16_epi32(__m128i __A) {
+ const __v8hu __zero = {0};
+#ifdef __LITTLE_ENDIAN__
+ __A = (__m128i)vec_mergeh((__v8hu)__A, __zero);
+#else /* __BIG_ENDIAN__. */
+ __A = (__m128i)vec_mergeh(__zero, (__v8hu)__A);
+#endif /* __BIG_ENDIAN__. */
+ return __A;
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtepu16_epi64(__m128i __A) {
+ const __v8hu __zero = {0};
+#ifdef __LITTLE_ENDIAN__
+ __A = (__m128i)vec_mergeh((__v8hu)__A, __zero);
+ __A = (__m128i)vec_mergeh((__v4su)__A, (__v4su)__zero);
+#else /* __BIG_ENDIAN__. */
+ __A = (__m128i)vec_mergeh(__zero, (__v8hu)__A);
+ __A = (__m128i)vec_mergeh((__v4su)__zero, (__v4su)__A);
+#endif /* __BIG_ENDIAN__. */
+ return __A;
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtepu32_epi64(__m128i __A) {
+ const __v4su __zero = {0};
+#ifdef __LITTLE_ENDIAN__
+ __A = (__m128i)vec_mergeh((__v4su)__A, __zero);
+#else /* __BIG_ENDIAN__. */
+ __A = (__m128i)vec_mergeh(__zero, (__v4su)__A);
+#endif /* __BIG_ENDIAN__. */
+ return __A;
+}
+
+/* Return horizontal packed word minimum and its index in bits [15:0]
+ and bits [18:16] respectively. */
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_minpos_epu16(__m128i __A) {
+ union __u {
+ __m128i __m;
+ __v8hu __uh;
+ };
+ union __u __u = {.__m = __A}, __r = {.__m = {0}};
+ unsigned short __ridx = 0;
+ unsigned short __rmin = __u.__uh[__ridx];
+ unsigned long __i;
+ for (__i = 1; __i < 8; __i++) {
+ if (__u.__uh[__i] < __rmin) {
+ __rmin = __u.__uh[__i];
+ __ridx = __i;
+ }
+ }
+ __r.__uh[0] = __rmin;
+ __r.__uh[1] = __ridx;
+ return __r.__m;
+}
+
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_packus_epi32(__m128i __X, __m128i __Y) {
+ return (__m128i)vec_packsu((__v4si)__X, (__v4si)__Y);
+}
+
+#ifdef _ARCH_PWR8
+extern __inline __m128i
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpgt_epi64(__m128i __X, __m128i __Y) {
+ return (__m128i)vec_cmpgt((__v2di)__X, (__v2di)__Y);
+}
+#endif
+
#else
#include_next <smmintrin.h>
#endif /* defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__)) \
*/
-#endif /* _SMMINTRIN_H_ */
+#endif /* SMMINTRIN_H_ */
diff --git a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/tmmintrin.h b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/tmmintrin.h
index ebef7b8192d7..1725eb9b8f64 100644
--- a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/tmmintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/tmmintrin.h
@@ -33,459 +33,415 @@
#include <pmmintrin.h>
extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_abs_epi16 (__m128i __A)
-{
- return (__m128i) vec_abs ((__v8hi) __A);
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_abs_epi16(__m128i __A) {
+ return (__m128i)vec_abs((__v8hi)__A);
}
extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_abs_epi32 (__m128i __A)
-{
- return (__m128i) vec_abs ((__v4si) __A);
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_abs_epi32(__m128i __A) {
+ return (__m128i)vec_abs((__v4si)__A);
}
extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_abs_epi8 (__m128i __A)
-{
- return (__m128i) vec_abs ((__v16qi) __A);
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_abs_epi8(__m128i __A) {
+ return (__m128i)vec_abs((__v16qi)__A);
}
extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_abs_pi16 (__m64 __A)
-{
- __v8hi __B = (__v8hi) (__v2du) { __A, __A };
- return (__m64) ((__v2du) vec_abs (__B))[0];
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_abs_pi16(__m64 __A) {
+ __v8hi __B = (__v8hi)(__v2du){__A, __A};
+ return (__m64)((__v2du)vec_abs(__B))[0];
}
extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_abs_pi32 (__m64 __A)
-{
- __v4si __B = (__v4si) (__v2du) { __A, __A };
- return (__m64) ((__v2du) vec_abs (__B))[0];
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_abs_pi32(__m64 __A) {
+ __v4si __B = (__v4si)(__v2du){__A, __A};
+ return (__m64)((__v2du)vec_abs(__B))[0];
}
extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_abs_pi8 (__m64 __A)
-{
- __v16qi __B = (__v16qi) (__v2du) { __A, __A };
- return (__m64) ((__v2du) vec_abs (__B))[0];
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_abs_pi8(__m64 __A) {
+ __v16qi __B = (__v16qi)(__v2du){__A, __A};
+ return (__m64)((__v2du)vec_abs(__B))[0];
}
extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_alignr_epi8 (__m128i __A, __m128i __B, const unsigned int __count)
-{
- if (__builtin_constant_p (__count) && __count < 16)
- {
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_alignr_epi8(__m128i __A, __m128i __B, const unsigned int __count) {
+ if (__builtin_constant_p(__count) && __count < 16) {
#ifdef __LITTLE_ENDIAN__
- __A = (__m128i) vec_reve ((__v16qu) __A);
- __B = (__m128i) vec_reve ((__v16qu) __B);
+ __A = (__m128i)vec_reve((__v16qu)__A);
+ __B = (__m128i)vec_reve((__v16qu)__B);
#endif
- __A = (__m128i) vec_sld ((__v16qu) __B, (__v16qu) __A, __count);
+ __A = (__m128i)vec_sld((__v16qu)__B, (__v16qu)__A, __count);
#ifdef __LITTLE_ENDIAN__
- __A = (__m128i) vec_reve ((__v16qu) __A);
+ __A = (__m128i)vec_reve((__v16qu)__A);
#endif
- return __A;
- }
+ return __A;
+ }
if (__count == 0)
return __B;
- if (__count >= 16)
- {
- if (__count >= 32)
- {
- const __v16qu zero = { 0 };
- return (__m128i) zero;
- }
- else
- {
- const __v16qu __shift =
- vec_splats ((unsigned char) ((__count - 16) * 8));
+ if (__count >= 16) {
+ if (__count >= 32) {
+ const __v16qu __zero = {0};
+ return (__m128i)__zero;
+ } else {
+ const __v16qu __shift = vec_splats((unsigned char)((__count - 16) * 8));
#ifdef __LITTLE_ENDIAN__
- return (__m128i) vec_sro ((__v16qu) __A, __shift);
+ return (__m128i)vec_sro((__v16qu)__A, __shift);
#else
- return (__m128i) vec_slo ((__v16qu) __A, __shift);
+ return (__m128i)vec_slo((__v16qu)__A, __shift);
#endif
- }
}
- else
- {
- const __v16qu __shiftA =
- vec_splats ((unsigned char) ((16 - __count) * 8));
- const __v16qu __shiftB = vec_splats ((unsigned char) (__count * 8));
+ } else {
+ const __v16qu __shiftA = vec_splats((unsigned char)((16 - __count) * 8));
+ const __v16qu __shiftB = vec_splats((unsigned char)(__count * 8));
#ifdef __LITTLE_ENDIAN__
- __A = (__m128i) vec_slo ((__v16qu) __A, __shiftA);
- __B = (__m128i) vec_sro ((__v16qu) __B, __shiftB);
+ __A = (__m128i)vec_slo((__v16qu)__A, __shiftA);
+ __B = (__m128i)vec_sro((__v16qu)__B, __shiftB);
#else
- __A = (__m128i) vec_sro ((__v16qu) __A, __shiftA);
- __B = (__m128i) vec_slo ((__v16qu) __B, __shiftB);
+ __A = (__m128i)vec_sro((__v16qu)__A, __shiftA);
+ __B = (__m128i)vec_slo((__v16qu)__B, __shiftB);
#endif
- return (__m128i) vec_or ((__v16qu) __A, (__v16qu) __B);
- }
+ return (__m128i)vec_or((__v16qu)__A, (__v16qu)__B);
+ }
}
extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_alignr_pi8 (__m64 __A, __m64 __B, unsigned int __count)
-{
- if (__count < 16)
- {
- __v2du __C = { __B, __A };
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_alignr_pi8(__m64 __A, __m64 __B, unsigned int __count) {
+ if (__count < 16) {
+ __v2du __C = {__B, __A};
#ifdef __LITTLE_ENDIAN__
- const __v4su __shift = { __count << 3, 0, 0, 0 };
- __C = (__v2du) vec_sro ((__v16qu) __C, (__v16qu) __shift);
+ const __v4su __shift = {__count << 3, 0, 0, 0};
+ __C = (__v2du)vec_sro((__v16qu)__C, (__v16qu)__shift);
#else
- const __v4su __shift = { 0, 0, 0, __count << 3 };
- __C = (__v2du) vec_slo ((__v16qu) __C, (__v16qu) __shift);
+ const __v4su __shift = {0, 0, 0, __count << 3};
+ __C = (__v2du)vec_slo((__v16qu)__C, (__v16qu)__shift);
#endif
- return (__m64) __C[0];
- }
- else
- {
- const __m64 __zero = { 0 };
- return __zero;
- }
+ return (__m64)__C[0];
+ } else {
+ const __m64 __zero = {0};
+ return __zero;
+ }
}
extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hadd_epi16 (__m128i __A, __m128i __B)
-{
- const __v16qu __P =
- { 0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29 };
- const __v16qu __Q =
- { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 };
- __v8hi __C = vec_perm ((__v8hi) __A, (__v8hi) __B, __P);
- __v8hi __D = vec_perm ((__v8hi) __A, (__v8hi) __B, __Q);
- return (__m128i) vec_add (__C, __D);
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_hadd_epi16(__m128i __A, __m128i __B) {
+ const __v16qu __P = {0, 1, 4, 5, 8, 9, 12, 13,
+ 16, 17, 20, 21, 24, 25, 28, 29};
+ const __v16qu __Q = {2, 3, 6, 7, 10, 11, 14, 15,
+ 18, 19, 22, 23, 26, 27, 30, 31};
+ __v8hi __C = vec_perm((__v8hi)__A, (__v8hi)__B, __P);
+ __v8hi __D = vec_perm((__v8hi)__A, (__v8hi)__B, __Q);
+ return (__m128i)vec_add(__C, __D);
}
extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hadd_epi32 (__m128i __A, __m128i __B)
-{
- const __v16qu __P =
- { 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27 };
- const __v16qu __Q =
- { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 };
- __v4si __C = vec_perm ((__v4si) __A, (__v4si) __B, __P);
- __v4si __D = vec_perm ((__v4si) __A, (__v4si) __B, __Q);
- return (__m128i) vec_add (__C, __D);
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_hadd_epi32(__m128i __A, __m128i __B) {
+ const __v16qu __P = {0, 1, 2, 3, 8, 9, 10, 11,
+ 16, 17, 18, 19, 24, 25, 26, 27};
+ const __v16qu __Q = {4, 5, 6, 7, 12, 13, 14, 15,
+ 20, 21, 22, 23, 28, 29, 30, 31};
+ __v4si __C = vec_perm((__v4si)__A, (__v4si)__B, __P);
+ __v4si __D = vec_perm((__v4si)__A, (__v4si)__B, __Q);
+ return (__m128i)vec_add(__C, __D);
}
extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hadd_pi16 (__m64 __A, __m64 __B)
-{
- __v8hi __C = (__v8hi) (__v2du) { __A, __B };
- const __v16qu __P =
- { 0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 4, 5, 8, 9, 12, 13 };
- const __v16qu __Q =
- { 2, 3, 6, 7, 10, 11, 14, 15, 2, 3, 6, 7, 10, 11, 14, 15 };
- __v8hi __D = vec_perm (__C, __C, __Q);
- __C = vec_perm (__C, __C, __P);
- __C = vec_add (__C, __D);
- return (__m64) ((__v2du) __C)[1];
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_hadd_pi16(__m64 __A, __m64 __B) {
+ __v8hi __C = (__v8hi)(__v2du){__A, __B};
+ const __v16qu __P = {0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 4, 5, 8, 9, 12, 13};
+ const __v16qu __Q = {2, 3, 6, 7, 10, 11, 14, 15, 2, 3, 6, 7, 10, 11, 14, 15};
+ __v8hi __D = vec_perm(__C, __C, __Q);
+ __C = vec_perm(__C, __C, __P);
+ __C = vec_add(__C, __D);
+ return (__m64)((__v2du)__C)[1];
}
extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hadd_pi32 (__m64 __A, __m64 __B)
-{
- __v4si __C = (__v4si) (__v2du) { __A, __B };
- const __v16qu __P =
- { 0, 1, 2, 3, 8, 9, 10, 11, 0, 1, 2, 3, 8, 9, 10, 11 };
- const __v16qu __Q =
- { 4, 5, 6, 7, 12, 13, 14, 15, 4, 5, 6, 7, 12, 13, 14, 15 };
- __v4si __D = vec_perm (__C, __C, __Q);
- __C = vec_perm (__C, __C, __P);
- __C = vec_add (__C, __D);
- return (__m64) ((__v2du) __C)[1];
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_hadd_pi32(__m64 __A, __m64 __B) {
+ __v4si __C = (__v4si)(__v2du){__A, __B};
+ const __v16qu __P = {0, 1, 2, 3, 8, 9, 10, 11, 0, 1, 2, 3, 8, 9, 10, 11};
+ const __v16qu __Q = {4, 5, 6, 7, 12, 13, 14, 15, 4, 5, 6, 7, 12, 13, 14, 15};
+ __v4si __D = vec_perm(__C, __C, __Q);
+ __C = vec_perm(__C, __C, __P);
+ __C = vec_add(__C, __D);
+ return (__m64)((__v2du)__C)[1];
}
extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hadds_epi16 (__m128i __A, __m128i __B)
-{
- __v4si __C = { 0 }, __D = { 0 };
- __C = vec_sum4s ((__v8hi) __A, __C);
- __D = vec_sum4s ((__v8hi) __B, __D);
- __C = (__v4si) vec_packs (__C, __D);
- return (__m128i) __C;
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_hadds_epi16(__m128i __A, __m128i __B) {
+ __v4si __C = {0}, __D = {0};
+ __C = vec_sum4s((__v8hi)__A, __C);
+ __D = vec_sum4s((__v8hi)__B, __D);
+ __C = (__v4si)vec_packs(__C, __D);
+ return (__m128i)__C;
}
extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hadds_pi16 (__m64 __A, __m64 __B)
-{
- const __v4si __zero = { 0 };
- __v8hi __C = (__v8hi) (__v2du) { __A, __B };
- __v4si __D = vec_sum4s (__C, __zero);
- __C = vec_packs (__D, __D);
- return (__m64) ((__v2du) __C)[1];
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_hadds_pi16(__m64 __A, __m64 __B) {
+ const __v4si __zero = {0};
+ __v8hi __C = (__v8hi)(__v2du){__A, __B};
+ __v4si __D = vec_sum4s(__C, __zero);
+ __C = vec_packs(__D, __D);
+ return (__m64)((__v2du)__C)[1];
}
extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hsub_epi16 (__m128i __A, __m128i __B)
-{
- const __v16qu __P =
- { 0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29 };
- const __v16qu __Q =
- { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 };
- __v8hi __C = vec_perm ((__v8hi) __A, (__v8hi) __B, __P);
- __v8hi __D = vec_perm ((__v8hi) __A, (__v8hi) __B, __Q);
- return (__m128i) vec_sub (__C, __D);
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_hsub_epi16(__m128i __A, __m128i __B) {
+ const __v16qu __P = {0, 1, 4, 5, 8, 9, 12, 13,
+ 16, 17, 20, 21, 24, 25, 28, 29};
+ const __v16qu __Q = {2, 3, 6, 7, 10, 11, 14, 15,
+ 18, 19, 22, 23, 26, 27, 30, 31};
+ __v8hi __C = vec_perm((__v8hi)__A, (__v8hi)__B, __P);
+ __v8hi __D = vec_perm((__v8hi)__A, (__v8hi)__B, __Q);
+ return (__m128i)vec_sub(__C, __D);
}
extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hsub_epi32 (__m128i __A, __m128i __B)
-{
- const __v16qu __P =
- { 0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27 };
- const __v16qu __Q =
- { 4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31 };
- __v4si __C = vec_perm ((__v4si) __A, (__v4si) __B, __P);
- __v4si __D = vec_perm ((__v4si) __A, (__v4si) __B, __Q);
- return (__m128i) vec_sub (__C, __D);
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_hsub_epi32(__m128i __A, __m128i __B) {
+ const __v16qu __P = {0, 1, 2, 3, 8, 9, 10, 11,
+ 16, 17, 18, 19, 24, 25, 26, 27};
+ const __v16qu __Q = {4, 5, 6, 7, 12, 13, 14, 15,
+ 20, 21, 22, 23, 28, 29, 30, 31};
+ __v4si __C = vec_perm((__v4si)__A, (__v4si)__B, __P);
+ __v4si __D = vec_perm((__v4si)__A, (__v4si)__B, __Q);
+ return (__m128i)vec_sub(__C, __D);
}
extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hsub_pi16 (__m64 __A, __m64 __B)
-{
- const __v16qu __P =
- { 0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 4, 5, 8, 9, 12, 13 };
- const __v16qu __Q =
- { 2, 3, 6, 7, 10, 11, 14, 15, 2, 3, 6, 7, 10, 11, 14, 15 };
- __v8hi __C = (__v8hi) (__v2du) { __A, __B };
- __v8hi __D = vec_perm (__C, __C, __Q);
- __C = vec_perm (__C, __C, __P);
- __C = vec_sub (__C, __D);
- return (__m64) ((__v2du) __C)[1];
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_hsub_pi16(__m64 __A, __m64 __B) {
+ const __v16qu __P = {0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 4, 5, 8, 9, 12, 13};
+ const __v16qu __Q = {2, 3, 6, 7, 10, 11, 14, 15, 2, 3, 6, 7, 10, 11, 14, 15};
+ __v8hi __C = (__v8hi)(__v2du){__A, __B};
+ __v8hi __D = vec_perm(__C, __C, __Q);
+ __C = vec_perm(__C, __C, __P);
+ __C = vec_sub(__C, __D);
+ return (__m64)((__v2du)__C)[1];
}
extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hsub_pi32 (__m64 __A, __m64 __B)
-{
- const __v16qu __P =
- { 0, 1, 2, 3, 8, 9, 10, 11, 0, 1, 2, 3, 8, 9, 10, 11 };
- const __v16qu __Q =
- { 4, 5, 6, 7, 12, 13, 14, 15, 4, 5, 6, 7, 12, 13, 14, 15 };
- __v4si __C = (__v4si) (__v2du) { __A, __B };
- __v4si __D = vec_perm (__C, __C, __Q);
- __C = vec_perm (__C, __C, __P);
- __C = vec_sub (__C, __D);
- return (__m64) ((__v2du) __C)[1];
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_hsub_pi32(__m64 __A, __m64 __B) {
+ const __v16qu __P = {0, 1, 2, 3, 8, 9, 10, 11, 0, 1, 2, 3, 8, 9, 10, 11};
+ const __v16qu __Q = {4, 5, 6, 7, 12, 13, 14, 15, 4, 5, 6, 7, 12, 13, 14, 15};
+ __v4si __C = (__v4si)(__v2du){__A, __B};
+ __v4si __D = vec_perm(__C, __C, __Q);
+ __C = vec_perm(__C, __C, __P);
+ __C = vec_sub(__C, __D);
+ return (__m64)((__v2du)__C)[1];
}
extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hsubs_epi16 (__m128i __A, __m128i __B)
-{
- const __v16qu __P =
- { 0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29 };
- const __v16qu __Q =
- { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 };
- __v8hi __C = vec_perm ((__v8hi) __A, (__v8hi) __B, __P);
- __v8hi __D = vec_perm ((__v8hi) __A, (__v8hi) __B, __Q);
- return (__m128i) vec_subs (__C, __D);
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_hsubs_epi16(__m128i __A, __m128i __B) {
+ const __v16qu __P = {0, 1, 4, 5, 8, 9, 12, 13,
+ 16, 17, 20, 21, 24, 25, 28, 29};
+ const __v16qu __Q = {2, 3, 6, 7, 10, 11, 14, 15,
+ 18, 19, 22, 23, 26, 27, 30, 31};
+ __v8hi __C = vec_perm((__v8hi)__A, (__v8hi)__B, __P);
+ __v8hi __D = vec_perm((__v8hi)__A, (__v8hi)__B, __Q);
+ return (__m128i)vec_subs(__C, __D);
}
extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_hsubs_pi16 (__m64 __A, __m64 __B)
-{
- const __v16qu __P =
- { 0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 4, 5, 8, 9, 12, 13 };
- const __v16qu __Q =
- { 2, 3, 6, 7, 10, 11, 14, 15, 2, 3, 6, 7, 10, 11, 14, 15 };
- __v8hi __C = (__v8hi) (__v2du) { __A, __B };
- __v8hi __D = vec_perm (__C, __C, __P);
- __v8hi __E = vec_perm (__C, __C, __Q);
- __C = vec_subs (__D, __E);
- return (__m64) ((__v2du) __C)[1];
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_hsubs_pi16(__m64 __A, __m64 __B) {
+ const __v16qu __P = {0, 1, 4, 5, 8, 9, 12, 13, 0, 1, 4, 5, 8, 9, 12, 13};
+ const __v16qu __Q = {2, 3, 6, 7, 10, 11, 14, 15, 2, 3, 6, 7, 10, 11, 14, 15};
+ __v8hi __C = (__v8hi)(__v2du){__A, __B};
+ __v8hi __D = vec_perm(__C, __C, __P);
+ __v8hi __E = vec_perm(__C, __C, __Q);
+ __C = vec_subs(__D, __E);
+ return (__m64)((__v2du)__C)[1];
}
extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_shuffle_epi8 (__m128i __A, __m128i __B)
-{
- const __v16qi __zero = { 0 };
- __vector __bool char __select = vec_cmplt ((__v16qi) __B, __zero);
- __v16qi __C = vec_perm ((__v16qi) __A, (__v16qi) __A, (__v16qu) __B);
- return (__m128i) vec_sel (__C, __zero, __select);
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_shuffle_epi8(__m128i __A, __m128i __B) {
+ const __v16qi __zero = {0};
+ __vector __bool char __select = vec_cmplt((__v16qi)__B, __zero);
+ __v16qi __C = vec_perm((__v16qi)__A, (__v16qi)__A, (__v16qu)__B);
+ return (__m128i)vec_sel(__C, __zero, __select);
}
extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_shuffle_pi8 (__m64 __A, __m64 __B)
-{
- const __v16qi __zero = { 0 };
- __v16qi __C = (__v16qi) (__v2du) { __A, __A };
- __v16qi __D = (__v16qi) (__v2du) { __B, __B };
- __vector __bool char __select = vec_cmplt ((__v16qi) __D, __zero);
- __C = vec_perm ((__v16qi) __C, (__v16qi) __C, (__v16qu) __D);
- __C = vec_sel (__C, __zero, __select);
- return (__m64) ((__v2du) (__C))[0];
-}
-
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_shuffle_pi8(__m64 __A, __m64 __B) {
+ const __v16qi __zero = {0};
+ __v16qi __C = (__v16qi)(__v2du){__A, __A};
+ __v16qi __D = (__v16qi)(__v2du){__B, __B};
+ __vector __bool char __select = vec_cmplt((__v16qi)__D, __zero);
+ __C = vec_perm((__v16qi)__C, (__v16qi)__C, (__v16qu)__D);
+ __C = vec_sel(__C, __zero, __select);
+ return (__m64)((__v2du)(__C))[0];
+}
+
+#ifdef _ARCH_PWR8
extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sign_epi8 (__m128i __A, __m128i __B)
-{
- const __v16qi __zero = { 0 };
- __v16qi __selectneg = (__v16qi) vec_cmplt ((__v16qi) __B, __zero);
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sign_epi8(__m128i __A, __m128i __B) {
+ const __v16qi __zero = {0};
+ __v16qi __selectneg = (__v16qi)vec_cmplt((__v16qi)__B, __zero);
__v16qi __selectpos =
- (__v16qi) vec_neg ((__v16qi) vec_cmpgt ((__v16qi) __B, __zero));
- __v16qi __conv = vec_add (__selectneg, __selectpos);
- return (__m128i) vec_mul ((__v16qi) __A, (__v16qi) __conv);
+ (__v16qi)vec_neg((__v16qi)vec_cmpgt((__v16qi)__B, __zero));
+ __v16qi __conv = vec_add(__selectneg, __selectpos);
+ return (__m128i)vec_mul((__v16qi)__A, (__v16qi)__conv);
}
+#endif
+#ifdef _ARCH_PWR8
extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sign_epi16 (__m128i __A, __m128i __B)
-{
- const __v8hi __zero = { 0 };
- __v8hi __selectneg = (__v8hi) vec_cmplt ((__v8hi) __B, __zero);
- __v8hi __selectpos =
- (__v8hi) vec_neg ((__v8hi) vec_cmpgt ((__v8hi) __B, __zero));
- __v8hi __conv = vec_add (__selectneg, __selectpos);
- return (__m128i) vec_mul ((__v8hi) __A, (__v8hi) __conv);
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sign_epi16(__m128i __A, __m128i __B) {
+ const __v8hi __zero = {0};
+ __v8hi __selectneg = (__v8hi)vec_cmplt((__v8hi)__B, __zero);
+ __v8hi __selectpos = (__v8hi)vec_neg((__v8hi)vec_cmpgt((__v8hi)__B, __zero));
+ __v8hi __conv = vec_add(__selectneg, __selectpos);
+ return (__m128i)vec_mul((__v8hi)__A, (__v8hi)__conv);
}
+#endif
+#ifdef _ARCH_PWR8
extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sign_epi32 (__m128i __A, __m128i __B)
-{
- const __v4si __zero = { 0 };
- __v4si __selectneg = (__v4si) vec_cmplt ((__v4si) __B, __zero);
- __v4si __selectpos =
- (__v4si) vec_neg ((__v4si) vec_cmpgt ((__v4si) __B, __zero));
- __v4si __conv = vec_add (__selectneg, __selectpos);
- return (__m128i) vec_mul ((__v4si) __A, (__v4si) __conv);
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sign_epi32(__m128i __A, __m128i __B) {
+ const __v4si __zero = {0};
+ __v4si __selectneg = (__v4si)vec_cmplt((__v4si)__B, __zero);
+ __v4si __selectpos = (__v4si)vec_neg((__v4si)vec_cmpgt((__v4si)__B, __zero));
+ __v4si __conv = vec_add(__selectneg, __selectpos);
+ return (__m128i)vec_mul((__v4si)__A, (__v4si)__conv);
}
+#endif
+#ifdef _ARCH_PWR8
extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sign_pi8 (__m64 __A, __m64 __B)
-{
- const __v16qi __zero = { 0 };
- __v16qi __C = (__v16qi) (__v2du) { __A, __A };
- __v16qi __D = (__v16qi) (__v2du) { __B, __B };
- __C = (__v16qi) _mm_sign_epi8 ((__m128i) __C, (__m128i) __D);
- return (__m64) ((__v2du) (__C))[0];
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sign_pi8(__m64 __A, __m64 __B) {
+ const __v16qi __zero = {0};
+ __v16qi __C = (__v16qi)(__v2du){__A, __A};
+ __v16qi __D = (__v16qi)(__v2du){__B, __B};
+ __C = (__v16qi)_mm_sign_epi8((__m128i)__C, (__m128i)__D);
+ return (__m64)((__v2du)(__C))[0];
}
+#endif
+#ifdef _ARCH_PWR8
extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sign_pi16 (__m64 __A, __m64 __B)
-{
- const __v8hi __zero = { 0 };
- __v8hi __C = (__v8hi) (__v2du) { __A, __A };
- __v8hi __D = (__v8hi) (__v2du) { __B, __B };
- __C = (__v8hi) _mm_sign_epi16 ((__m128i) __C, (__m128i) __D);
- return (__m64) ((__v2du) (__C))[0];
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sign_pi16(__m64 __A, __m64 __B) {
+ const __v8hi __zero = {0};
+ __v8hi __C = (__v8hi)(__v2du){__A, __A};
+ __v8hi __D = (__v8hi)(__v2du){__B, __B};
+ __C = (__v8hi)_mm_sign_epi16((__m128i)__C, (__m128i)__D);
+ return (__m64)((__v2du)(__C))[0];
}
+#endif
+#ifdef _ARCH_PWR8
extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sign_pi32 (__m64 __A, __m64 __B)
-{
- const __v4si __zero = { 0 };
- __v4si __C = (__v4si) (__v2du) { __A, __A };
- __v4si __D = (__v4si) (__v2du) { __B, __B };
- __C = (__v4si) _mm_sign_epi32 ((__m128i) __C, (__m128i) __D);
- return (__m64) ((__v2du) (__C))[0];
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sign_pi32(__m64 __A, __m64 __B) {
+ const __v4si __zero = {0};
+ __v4si __C = (__v4si)(__v2du){__A, __A};
+ __v4si __D = (__v4si)(__v2du){__B, __B};
+ __C = (__v4si)_mm_sign_epi32((__m128i)__C, (__m128i)__D);
+ return (__m64)((__v2du)(__C))[0];
}
+#endif
extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maddubs_epi16 (__m128i __A, __m128i __B)
-{
- __v8hi __unsigned = vec_splats ((signed short) 0x00ff);
- __v8hi __C = vec_and (vec_unpackh ((__v16qi) __A), __unsigned);
- __v8hi __D = vec_and (vec_unpackl ((__v16qi) __A), __unsigned);
- __v8hi __E = vec_unpackh ((__v16qi) __B);
- __v8hi __F = vec_unpackl ((__v16qi) __B);
- __C = vec_mul (__C, __E);
- __D = vec_mul (__D, __F);
- const __v16qu __odds =
- { 0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29 };
- const __v16qu __evens =
- { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 };
- __E = vec_perm (__C, __D, __odds);
- __F = vec_perm (__C, __D, __evens);
- return (__m128i) vec_adds (__E, __F);
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_maddubs_epi16(__m128i __A, __m128i __B) {
+ __v8hi __unsigned = vec_splats((signed short)0x00ff);
+ __v8hi __C = vec_and(vec_unpackh((__v16qi)__A), __unsigned);
+ __v8hi __D = vec_and(vec_unpackl((__v16qi)__A), __unsigned);
+ __v8hi __E = vec_unpackh((__v16qi)__B);
+ __v8hi __F = vec_unpackl((__v16qi)__B);
+ __C = vec_mul(__C, __E);
+ __D = vec_mul(__D, __F);
+ const __v16qu __odds = {0, 1, 4, 5, 8, 9, 12, 13,
+ 16, 17, 20, 21, 24, 25, 28, 29};
+ const __v16qu __evens = {2, 3, 6, 7, 10, 11, 14, 15,
+ 18, 19, 22, 23, 26, 27, 30, 31};
+ __E = vec_perm(__C, __D, __odds);
+ __F = vec_perm(__C, __D, __evens);
+ return (__m128i)vec_adds(__E, __F);
}
extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maddubs_pi16 (__m64 __A, __m64 __B)
-{
- __v8hi __C = (__v8hi) (__v2du) { __A, __A };
- __C = vec_unpackl ((__v16qi) __C);
- const __v8hi __unsigned = vec_splats ((signed short) 0x00ff);
- __C = vec_and (__C, __unsigned);
- __v8hi __D = (__v8hi) (__v2du) { __B, __B };
- __D = vec_unpackl ((__v16qi) __D);
- __D = vec_mul (__C, __D);
- const __v16qu __odds =
- { 0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29 };
- const __v16qu __evens =
- { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 };
- __C = vec_perm (__D, __D, __odds);
- __D = vec_perm (__D, __D, __evens);
- __C = vec_adds (__C, __D);
- return (__m64) ((__v2du) (__C))[0];
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_maddubs_pi16(__m64 __A, __m64 __B) {
+ __v8hi __C = (__v8hi)(__v2du){__A, __A};
+ __C = vec_unpackl((__v16qi)__C);
+ const __v8hi __unsigned = vec_splats((signed short)0x00ff);
+ __C = vec_and(__C, __unsigned);
+ __v8hi __D = (__v8hi)(__v2du){__B, __B};
+ __D = vec_unpackl((__v16qi)__D);
+ __D = vec_mul(__C, __D);
+ const __v16qu __odds = {0, 1, 4, 5, 8, 9, 12, 13,
+ 16, 17, 20, 21, 24, 25, 28, 29};
+ const __v16qu __evens = {2, 3, 6, 7, 10, 11, 14, 15,
+ 18, 19, 22, 23, 26, 27, 30, 31};
+ __C = vec_perm(__D, __D, __odds);
+ __D = vec_perm(__D, __D, __evens);
+ __C = vec_adds(__C, __D);
+ return (__m64)((__v2du)(__C))[0];
}
extern __inline __m128i
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mulhrs_epi16 (__m128i __A, __m128i __B)
-{
- __v4si __C = vec_unpackh ((__v8hi) __A);
- __v4si __D = vec_unpackh ((__v8hi) __B);
- __C = vec_mul (__C, __D);
- __D = vec_unpackl ((__v8hi) __A);
- __v4si __E = vec_unpackl ((__v8hi) __B);
- __D = vec_mul (__D, __E);
- const __v4su __shift = vec_splats ((unsigned int) 14);
- __C = vec_sr (__C, __shift);
- __D = vec_sr (__D, __shift);
- const __v4si __ones = vec_splats ((signed int) 1);
- __C = vec_add (__C, __ones);
- __C = vec_sr (__C, (__v4su) __ones);
- __D = vec_add (__D, __ones);
- __D = vec_sr (__D, (__v4su) __ones);
- return (__m128i) vec_pack (__C, __D);
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_mulhrs_epi16(__m128i __A, __m128i __B) {
+ __v4si __C = vec_unpackh((__v8hi)__A);
+ __v4si __D = vec_unpackh((__v8hi)__B);
+ __C = vec_mul(__C, __D);
+ __D = vec_unpackl((__v8hi)__A);
+ __v4si __E = vec_unpackl((__v8hi)__B);
+ __D = vec_mul(__D, __E);
+ const __v4su __shift = vec_splats((unsigned int)14);
+ __C = vec_sr(__C, __shift);
+ __D = vec_sr(__D, __shift);
+ const __v4si __ones = vec_splats((signed int)1);
+ __C = vec_add(__C, __ones);
+ __C = vec_sr(__C, (__v4su)__ones);
+ __D = vec_add(__D, __ones);
+ __D = vec_sr(__D, (__v4su)__ones);
+ return (__m128i)vec_pack(__C, __D);
}
extern __inline __m64
-__attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mulhrs_pi16 (__m64 __A, __m64 __B)
-{
- __v4si __C = (__v4si) (__v2du) { __A, __A };
- __C = vec_unpackh ((__v8hi) __C);
- __v4si __D = (__v4si) (__v2du) { __B, __B };
- __D = vec_unpackh ((__v8hi) __D);
- __C = vec_mul (__C, __D);
- const __v4su __shift = vec_splats ((unsigned int) 14);
- __C = vec_sr (__C, __shift);
- const __v4si __ones = vec_splats ((signed int) 1);
- __C = vec_add (__C, __ones);
- __C = vec_sr (__C, (__v4su) __ones);
- __v8hi __E = vec_pack (__C, __D);
- return (__m64) ((__v2du) (__E))[0];
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_mulhrs_pi16(__m64 __A, __m64 __B) {
+ __v4si __C = (__v4si)(__v2du){__A, __A};
+ __C = vec_unpackh((__v8hi)__C);
+ __v4si __D = (__v4si)(__v2du){__B, __B};
+ __D = vec_unpackh((__v8hi)__D);
+ __C = vec_mul(__C, __D);
+ const __v4su __shift = vec_splats((unsigned int)14);
+ __C = vec_sr(__C, __shift);
+ const __v4si __ones = vec_splats((signed int)1);
+ __C = vec_add(__C, __ones);
+ __C = vec_sr(__C, (__v4su)__ones);
+ __v8hi __E = vec_pack(__C, __D);
+ return (__m64)((__v2du)(__E))[0];
}
#else
diff --git a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/x86gprintrin.h b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/x86gprintrin.h
new file mode 100644
index 000000000000..cbfac262395c
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/x86gprintrin.h
@@ -0,0 +1,17 @@
+/*===--- x86gprintrin.h - Implementation of X86 GPR intrinsics on PowerPC --===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef X86GPRINTRIN_H_
+#define X86GPRINTRIN_H_
+
+#include <bmiintrin.h>
+
+#include <bmi2intrin.h>
+
+#endif /* X86GPRINTRIN_H_ */
diff --git a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/x86intrin.h b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/x86intrin.h
new file mode 100644
index 000000000000..f5c201262e69
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/x86intrin.h
@@ -0,0 +1,28 @@
+/*===---- x86intrin.h - Implementation of X86 intrinsics on PowerPC --------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+
+#ifndef NO_WARN_X86_INTRINSICS
+/* This header is distributed to simplify porting x86_64 code that
+ makes explicit use of Intel intrinsics to powerpc64le.
+ It is the user's responsibility to determine if the results are
+ acceptable and make additional changes as necessary.
+ Note that much code that uses Intel intrinsics can be rewritten in
+ standard C or GNU C extensions, which are more portable and better
+ optimized across multiple targets. */
+#error "Please read comment above. Use -DNO_WARN_X86_INTRINSICS to disable this error."
+#endif
+
+#ifndef X86INTRIN_H_
+#define X86INTRIN_H_
+
+#ifdef __ALTIVEC__
+#include <immintrin.h>
+#endif /* __ALTIVEC__ */
+
+#endif /* X86INTRIN_H_ */
diff --git a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/xmmintrin.h b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/xmmintrin.h
index 956603d36408..8bf29777b79c 100644
--- a/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/xmmintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/ppc_wrappers/xmmintrin.h
@@ -28,25 +28,26 @@
Most SSE scalar float intrinsic operations can be performed more
efficiently as C language float scalar operations or optimized to
use vector SIMD operations. We recommend this for new applications. */
-#error "Please read comment above. Use -DNO_WARN_X86_INTRINSICS to disable this error."
+#error \
+ "Please read comment above. Use -DNO_WARN_X86_INTRINSICS to disable this error."
#endif
-#ifndef _XMMINTRIN_H_INCLUDED
-#define _XMMINTRIN_H_INCLUDED
+#ifndef XMMINTRIN_H_
+#define XMMINTRIN_H_
#if defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__))
/* Define four value permute mask */
-#define _MM_SHUFFLE(w,x,y,z) (((w) << 6) | ((x) << 4) | ((y) << 2) | (z))
+#define _MM_SHUFFLE(w, x, y, z) (((w) << 6) | ((x) << 4) | ((y) << 2) | (z))
#include <altivec.h>
/* Avoid collisions between altivec.h and strict adherence to C++ and
C11 standards. This should eventually be done inside altivec.h itself,
but only after testing a full distro build. */
-#if defined(__STRICT_ANSI__) && (defined(__cplusplus) || \
- (defined(__STDC_VERSION__) && \
- __STDC_VERSION__ >= 201112L))
+#if defined(__STRICT_ANSI__) && \
+ (defined(__cplusplus) || \
+ (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L))
#undef vector
#undef pixel
#undef bool
@@ -71,145 +72,145 @@ typedef vector float __m128_u __attribute__((__may_alias__, __aligned__(1)));
typedef vector float __v4sf;
/* Create an undefined vector. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_undefined_ps (void)
-{
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_undefined_ps(void) {
__m128 __Y = __Y;
return __Y;
}
/* Create a vector of zeros. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_setzero_ps (void)
-{
- return __extension__ (__m128){ 0.0f, 0.0f, 0.0f, 0.0f };
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_setzero_ps(void) {
+ return __extension__(__m128){0.0f, 0.0f, 0.0f, 0.0f};
}
/* Load four SPFP values from P. The address must be 16-byte aligned. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_load_ps (float const *__P)
-{
- return ((__m128)vec_ld(0, (__v4sf*)__P));
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_load_ps(float const *__P) {
+ return ((__m128)vec_ld(0, (__v4sf *)__P));
}
/* Load four SPFP values from P. The address need not be 16-byte aligned. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loadu_ps (float const *__P)
-{
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_loadu_ps(float const *__P) {
return (vec_vsx_ld(0, __P));
}
/* Load four SPFP values in reverse order. The address must be aligned. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loadr_ps (float const *__P)
-{
- __v4sf __tmp;
- __m128 result;
- static const __vector unsigned char permute_vector =
- { 0x1C, 0x1D, 0x1E, 0x1F, 0x18, 0x19, 0x1A, 0x1B, 0x14, 0x15, 0x16,
- 0x17, 0x10, 0x11, 0x12, 0x13 };
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_loadr_ps(float const *__P) {
+ __v4sf __tmp;
+ __m128 __result;
+ static const __vector unsigned char __permute_vector = {
+ 0x1C, 0x1D, 0x1E, 0x1F, 0x18, 0x19, 0x1A, 0x1B,
+ 0x14, 0x15, 0x16, 0x17, 0x10, 0x11, 0x12, 0x13};
- __tmp = vec_ld (0, (__v4sf *) __P);
- result = (__m128) vec_perm (__tmp, __tmp, permute_vector);
- return result;
+ __tmp = vec_ld(0, (__v4sf *)__P);
+ __result = (__m128)vec_perm(__tmp, __tmp, __permute_vector);
+ return __result;
}
/* Create a vector with all four elements equal to F. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set1_ps (float __F)
-{
- return __extension__ (__m128)(__v4sf){ __F, __F, __F, __F };
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set1_ps(float __F) {
+ return __extension__(__m128)(__v4sf){__F, __F, __F, __F};
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_ps1 (float __F)
-{
- return _mm_set1_ps (__F);
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set_ps1(float __F) {
+ return _mm_set1_ps(__F);
}
/* Create the vector [Z Y X W]. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_ps (const float __Z, const float __Y, const float __X, const float __W)
-{
- return __extension__ (__m128)(__v4sf){ __W, __X, __Y, __Z };
+extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__,
+ __artificial__))
+_mm_set_ps(const float __Z, const float __Y, const float __X, const float __W) {
+ return __extension__(__m128)(__v4sf){__W, __X, __Y, __Z};
}
/* Create the vector [W X Y Z]. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_setr_ps (float __Z, float __Y, float __X, float __W)
-{
- return __extension__ (__m128)(__v4sf){ __Z, __Y, __X, __W };
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_setr_ps(float __Z, float __Y, float __X, float __W) {
+ return __extension__(__m128)(__v4sf){__Z, __Y, __X, __W};
}
/* Store four SPFP values. The address must be 16-byte aligned. */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_store_ps (float *__P, __m128 __A)
-{
- vec_st((__v4sf)__A, 0, (__v4sf*)__P);
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_store_ps(float *__P, __m128 __A) {
+ vec_st((__v4sf)__A, 0, (__v4sf *)__P);
}
/* Store four SPFP values. The address need not be 16-byte aligned. */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_storeu_ps (float *__P, __m128 __A)
-{
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_storeu_ps(float *__P, __m128 __A) {
*(__m128_u *)__P = __A;
}
/* Store four SPFP values in reverse order. The address must be aligned. */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_storer_ps (float *__P, __m128 __A)
-{
- __v4sf __tmp;
- static const __vector unsigned char permute_vector =
- { 0x1C, 0x1D, 0x1E, 0x1F, 0x18, 0x19, 0x1A, 0x1B, 0x14, 0x15, 0x16,
- 0x17, 0x10, 0x11, 0x12, 0x13 };
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_storer_ps(float *__P, __m128 __A) {
+ __v4sf __tmp;
+ static const __vector unsigned char __permute_vector = {
+ 0x1C, 0x1D, 0x1E, 0x1F, 0x18, 0x19, 0x1A, 0x1B,
+ 0x14, 0x15, 0x16, 0x17, 0x10, 0x11, 0x12, 0x13};
- __tmp = (__m128) vec_perm (__A, __A, permute_vector);
+ __tmp = (__m128)vec_perm(__A, __A, __permute_vector);
- _mm_store_ps (__P, __tmp);
+ _mm_store_ps(__P, __tmp);
}
/* Store the lower SPFP value across four words. */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_store1_ps (float *__P, __m128 __A)
-{
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_store1_ps(float *__P, __m128 __A) {
__v4sf __va = vec_splat((__v4sf)__A, 0);
- _mm_store_ps (__P, __va);
+ _mm_store_ps(__P, __va);
}
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_store_ps1 (float *__P, __m128 __A)
-{
- _mm_store1_ps (__P, __A);
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_store_ps1(float *__P, __m128 __A) {
+ _mm_store1_ps(__P, __A);
}
/* Create a vector with element 0 as F and the rest zero. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_set_ss (float __F)
-{
- return __extension__ (__m128)(__v4sf){ __F, 0.0f, 0.0f, 0.0f };
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_set_ss(float __F) {
+ return __extension__(__m128)(__v4sf){__F, 0.0f, 0.0f, 0.0f};
}
/* Sets the low SPFP value of A from the low value of B. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_move_ss (__m128 __A, __m128 __B)
-{
- static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_move_ss(__m128 __A, __m128 __B) {
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
- return (vec_sel ((__v4sf)__A, (__v4sf)__B, mask));
+ return (vec_sel((__v4sf)__A, (__v4sf)__B, __mask));
}
/* Create a vector with element 0 as *P and the rest zero. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_load_ss (float const *__P)
-{
- return _mm_set_ss (*__P);
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_load_ss(float const *__P) {
+ return _mm_set_ss(*__P);
}
/* Stores the lower SPFP value. */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_store_ss (float *__P, __m128 __A)
-{
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_store_ss(float *__P, __m128 __A) {
*__P = ((__v4sf)__A)[0];
}
@@ -217,612 +218,600 @@ _mm_store_ss (float *__P, __m128 __A)
floating-point) values of A and B; the upper three SPFP values are
passed through from A. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_add_ss (__m128 __A, __m128 __B)
-{
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_add_ss(__m128 __A, __m128 __B) {
#ifdef _ARCH_PWR7
- __m128 a, b, c;
- static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
+ __m128 __a, __b, __c;
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
/* PowerISA VSX does not allow partial (for just lower double)
results. So to insure we don't generate spurious exceptions
(from the upper double values) we splat the lower double
before we to the operation. */
- a = vec_splat (__A, 0);
- b = vec_splat (__B, 0);
- c = a + b;
+ __a = vec_splat(__A, 0);
+ __b = vec_splat(__B, 0);
+ __c = __a + __b;
/* Then we merge the lower float result with the original upper
float elements from __A. */
- return (vec_sel (__A, c, mask));
+ return (vec_sel(__A, __c, __mask));
#else
__A[0] = __A[0] + __B[0];
return (__A);
#endif
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sub_ss (__m128 __A, __m128 __B)
-{
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sub_ss(__m128 __A, __m128 __B) {
#ifdef _ARCH_PWR7
- __m128 a, b, c;
- static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
+ __m128 __a, __b, __c;
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
/* PowerISA VSX does not allow partial (for just lower double)
results. So to insure we don't generate spurious exceptions
(from the upper double values) we splat the lower double
before we to the operation. */
- a = vec_splat (__A, 0);
- b = vec_splat (__B, 0);
- c = a - b;
+ __a = vec_splat(__A, 0);
+ __b = vec_splat(__B, 0);
+ __c = __a - __b;
/* Then we merge the lower float result with the original upper
float elements from __A. */
- return (vec_sel (__A, c, mask));
+ return (vec_sel(__A, __c, __mask));
#else
__A[0] = __A[0] - __B[0];
return (__A);
#endif
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mul_ss (__m128 __A, __m128 __B)
-{
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_mul_ss(__m128 __A, __m128 __B) {
#ifdef _ARCH_PWR7
- __m128 a, b, c;
- static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
+ __m128 __a, __b, __c;
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
/* PowerISA VSX does not allow partial (for just lower double)
results. So to insure we don't generate spurious exceptions
(from the upper double values) we splat the lower double
before we to the operation. */
- a = vec_splat (__A, 0);
- b = vec_splat (__B, 0);
- c = a * b;
+ __a = vec_splat(__A, 0);
+ __b = vec_splat(__B, 0);
+ __c = __a * __b;
/* Then we merge the lower float result with the original upper
float elements from __A. */
- return (vec_sel (__A, c, mask));
+ return (vec_sel(__A, __c, __mask));
#else
__A[0] = __A[0] * __B[0];
return (__A);
#endif
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_div_ss (__m128 __A, __m128 __B)
-{
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_div_ss(__m128 __A, __m128 __B) {
#ifdef _ARCH_PWR7
- __m128 a, b, c;
- static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
+ __m128 __a, __b, __c;
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
/* PowerISA VSX does not allow partial (for just lower double)
results. So to insure we don't generate spurious exceptions
(from the upper double values) we splat the lower double
before we to the operation. */
- a = vec_splat (__A, 0);
- b = vec_splat (__B, 0);
- c = a / b;
+ __a = vec_splat(__A, 0);
+ __b = vec_splat(__B, 0);
+ __c = __a / __b;
/* Then we merge the lower float result with the original upper
float elements from __A. */
- return (vec_sel (__A, c, mask));
+ return (vec_sel(__A, __c, __mask));
#else
__A[0] = __A[0] / __B[0];
return (__A);
#endif
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sqrt_ss (__m128 __A)
-{
- __m128 a, c;
- static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sqrt_ss(__m128 __A) {
+ __m128 __a, __c;
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
/* PowerISA VSX does not allow partial (for just lower double)
* results. So to insure we don't generate spurious exceptions
* (from the upper double values) we splat the lower double
* before we to the operation. */
- a = vec_splat (__A, 0);
- c = vec_sqrt (a);
+ __a = vec_splat(__A, 0);
+ __c = vec_sqrt(__a);
/* Then we merge the lower float result with the original upper
* float elements from __A. */
- return (vec_sel (__A, c, mask));
+ return (vec_sel(__A, __c, __mask));
}
/* Perform the respective operation on the four SPFP values in A and B. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_add_ps (__m128 __A, __m128 __B)
-{
- return (__m128) ((__v4sf)__A + (__v4sf)__B);
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_add_ps(__m128 __A, __m128 __B) {
+ return (__m128)((__v4sf)__A + (__v4sf)__B);
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sub_ps (__m128 __A, __m128 __B)
-{
- return (__m128) ((__v4sf)__A - (__v4sf)__B);
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sub_ps(__m128 __A, __m128 __B) {
+ return (__m128)((__v4sf)__A - (__v4sf)__B);
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mul_ps (__m128 __A, __m128 __B)
-{
- return (__m128) ((__v4sf)__A * (__v4sf)__B);
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_mul_ps(__m128 __A, __m128 __B) {
+ return (__m128)((__v4sf)__A * (__v4sf)__B);
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_div_ps (__m128 __A, __m128 __B)
-{
- return (__m128) ((__v4sf)__A / (__v4sf)__B);
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_div_ps(__m128 __A, __m128 __B) {
+ return (__m128)((__v4sf)__A / (__v4sf)__B);
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sqrt_ps (__m128 __A)
-{
- return (vec_sqrt ((__v4sf)__A));
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sqrt_ps(__m128 __A) {
+ return (vec_sqrt((__v4sf)__A));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_rcp_ps (__m128 __A)
-{
- return (vec_re ((__v4sf)__A));
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_rcp_ps(__m128 __A) {
+ return (vec_re((__v4sf)__A));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_rsqrt_ps (__m128 __A)
-{
- return (vec_rsqrte (__A));
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_rsqrt_ps(__m128 __A) {
+ return (vec_rsqrte(__A));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_rcp_ss (__m128 __A)
-{
- __m128 a, c;
- static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_rcp_ss(__m128 __A) {
+ __m128 __a, __c;
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
/* PowerISA VSX does not allow partial (for just lower double)
* results. So to insure we don't generate spurious exceptions
* (from the upper double values) we splat the lower double
* before we to the operation. */
- a = vec_splat (__A, 0);
- c = _mm_rcp_ps (a);
+ __a = vec_splat(__A, 0);
+ __c = _mm_rcp_ps(__a);
/* Then we merge the lower float result with the original upper
* float elements from __A. */
- return (vec_sel (__A, c, mask));
+ return (vec_sel(__A, __c, __mask));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_rsqrt_ss (__m128 __A)
-{
- __m128 a, c;
- static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_rsqrt_ss(__m128 __A) {
+ __m128 __a, __c;
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
/* PowerISA VSX does not allow partial (for just lower double)
* results. So to insure we don't generate spurious exceptions
* (from the upper double values) we splat the lower double
* before we to the operation. */
- a = vec_splat (__A, 0);
- c = vec_rsqrte (a);
+ __a = vec_splat(__A, 0);
+ __c = vec_rsqrte(__a);
/* Then we merge the lower float result with the original upper
* float elements from __A. */
- return (vec_sel (__A, c, mask));
+ return (vec_sel(__A, __c, __mask));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_min_ss (__m128 __A, __m128 __B)
-{
- __v4sf a, b, c;
- static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_min_ss(__m128 __A, __m128 __B) {
+ __v4sf __a, __b, __c;
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
/* PowerISA VSX does not allow partial (for just lower float)
* results. So to insure we don't generate spurious exceptions
* (from the upper float values) we splat the lower float
* before we to the operation. */
- a = vec_splat ((__v4sf)__A, 0);
- b = vec_splat ((__v4sf)__B, 0);
- c = vec_min (a, b);
+ __a = vec_splat((__v4sf)__A, 0);
+ __b = vec_splat((__v4sf)__B, 0);
+ __c = vec_min(__a, __b);
/* Then we merge the lower float result with the original upper
* float elements from __A. */
- return (vec_sel ((__v4sf)__A, c, mask));
+ return (vec_sel((__v4sf)__A, __c, __mask));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_max_ss (__m128 __A, __m128 __B)
-{
- __v4sf a, b, c;
- static const __vector unsigned int mask = {0xffffffff, 0, 0, 0};
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_max_ss(__m128 __A, __m128 __B) {
+ __v4sf __a, __b, __c;
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
/* PowerISA VSX does not allow partial (for just lower float)
* results. So to insure we don't generate spurious exceptions
* (from the upper float values) we splat the lower float
* before we to the operation. */
- a = vec_splat (__A, 0);
- b = vec_splat (__B, 0);
- c = vec_max (a, b);
+ __a = vec_splat(__A, 0);
+ __b = vec_splat(__B, 0);
+ __c = vec_max(__a, __b);
/* Then we merge the lower float result with the original upper
* float elements from __A. */
- return (vec_sel ((__v4sf)__A, c, mask));
+ return (vec_sel((__v4sf)__A, __c, __mask));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_min_ps (__m128 __A, __m128 __B)
-{
- __vector __bool int m = vec_cmpgt ((__v4sf) __B, (__v4sf) __A);
- return vec_sel (__B, __A, m);
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_min_ps(__m128 __A, __m128 __B) {
+ __vector __bool int __m = vec_cmpgt((__v4sf)__B, (__v4sf)__A);
+ return vec_sel(__B, __A, __m);
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_max_ps (__m128 __A, __m128 __B)
-{
- __vector __bool int m = vec_cmpgt ((__v4sf) __A, (__v4sf) __B);
- return vec_sel (__B, __A, m);
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_max_ps(__m128 __A, __m128 __B) {
+ __vector __bool int __m = vec_cmpgt((__v4sf)__A, (__v4sf)__B);
+ return vec_sel(__B, __A, __m);
}
/* Perform logical bit-wise operations on 128-bit values. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_and_ps (__m128 __A, __m128 __B)
-{
- return ((__m128)vec_and ((__v4sf)__A, (__v4sf)__B));
-// return __builtin_ia32_andps (__A, __B);
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_and_ps(__m128 __A, __m128 __B) {
+ return ((__m128)vec_and((__v4sf)__A, (__v4sf)__B));
+ // return __builtin_ia32_andps (__A, __B);
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_andnot_ps (__m128 __A, __m128 __B)
-{
- return ((__m128)vec_andc ((__v4sf)__B, (__v4sf)__A));
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_andnot_ps(__m128 __A, __m128 __B) {
+ return ((__m128)vec_andc((__v4sf)__B, (__v4sf)__A));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_or_ps (__m128 __A, __m128 __B)
-{
- return ((__m128)vec_or ((__v4sf)__A, (__v4sf)__B));
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_or_ps(__m128 __A, __m128 __B) {
+ return ((__m128)vec_or((__v4sf)__A, (__v4sf)__B));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_xor_ps (__m128 __A, __m128 __B)
-{
- return ((__m128)vec_xor ((__v4sf)__A, (__v4sf)__B));
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_xor_ps(__m128 __A, __m128 __B) {
+ return ((__m128)vec_xor((__v4sf)__A, (__v4sf)__B));
}
/* Perform a comparison on the four SPFP values of A and B. For each
element, if the comparison is true, place a mask of all ones in the
result, otherwise a mask of zeros. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpeq_ps (__m128 __A, __m128 __B)
-{
- return ((__m128)vec_cmpeq ((__v4sf)__A,(__v4sf) __B));
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpeq_ps(__m128 __A, __m128 __B) {
+ return ((__m128)vec_cmpeq((__v4sf)__A, (__v4sf)__B));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmplt_ps (__m128 __A, __m128 __B)
-{
- return ((__m128)vec_cmplt ((__v4sf)__A, (__v4sf)__B));
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmplt_ps(__m128 __A, __m128 __B) {
+ return ((__m128)vec_cmplt((__v4sf)__A, (__v4sf)__B));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmple_ps (__m128 __A, __m128 __B)
-{
- return ((__m128)vec_cmple ((__v4sf)__A, (__v4sf)__B));
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmple_ps(__m128 __A, __m128 __B) {
+ return ((__m128)vec_cmple((__v4sf)__A, (__v4sf)__B));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpgt_ps (__m128 __A, __m128 __B)
-{
- return ((__m128)vec_cmpgt ((__v4sf)__A, (__v4sf)__B));
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpgt_ps(__m128 __A, __m128 __B) {
+ return ((__m128)vec_cmpgt((__v4sf)__A, (__v4sf)__B));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpge_ps (__m128 __A, __m128 __B)
-{
- return ((__m128)vec_cmpge ((__v4sf)__A, (__v4sf)__B));
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpge_ps(__m128 __A, __m128 __B) {
+ return ((__m128)vec_cmpge((__v4sf)__A, (__v4sf)__B));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpneq_ps (__m128 __A, __m128 __B)
-{
- __v4sf temp = (__v4sf ) vec_cmpeq ((__v4sf) __A, (__v4sf)__B);
- return ((__m128)vec_nor (temp, temp));
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpneq_ps(__m128 __A, __m128 __B) {
+ __v4sf __temp = (__v4sf)vec_cmpeq((__v4sf)__A, (__v4sf)__B);
+ return ((__m128)vec_nor(__temp, __temp));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnlt_ps (__m128 __A, __m128 __B)
-{
- return ((__m128)vec_cmpge ((__v4sf)__A, (__v4sf)__B));
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpnlt_ps(__m128 __A, __m128 __B) {
+ return ((__m128)vec_cmpge((__v4sf)__A, (__v4sf)__B));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnle_ps (__m128 __A, __m128 __B)
-{
- return ((__m128)vec_cmpgt ((__v4sf)__A, (__v4sf)__B));
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpnle_ps(__m128 __A, __m128 __B) {
+ return ((__m128)vec_cmpgt((__v4sf)__A, (__v4sf)__B));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpngt_ps (__m128 __A, __m128 __B)
-{
- return ((__m128)vec_cmple ((__v4sf)__A, (__v4sf)__B));
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpngt_ps(__m128 __A, __m128 __B) {
+ return ((__m128)vec_cmple((__v4sf)__A, (__v4sf)__B));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnge_ps (__m128 __A, __m128 __B)
-{
- return ((__m128)vec_cmplt ((__v4sf)__A, (__v4sf)__B));
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpnge_ps(__m128 __A, __m128 __B) {
+ return ((__m128)vec_cmplt((__v4sf)__A, (__v4sf)__B));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpord_ps (__m128 __A, __m128 __B)
-{
- __vector unsigned int a, b;
- __vector unsigned int c, d;
- static const __vector unsigned int float_exp_mask =
- { 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000 };
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpord_ps(__m128 __A, __m128 __B) {
+ __vector unsigned int __a, __b;
+ __vector unsigned int __c, __d;
+ static const __vector unsigned int __float_exp_mask = {
+ 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000};
- a = (__vector unsigned int) vec_abs ((__v4sf)__A);
- b = (__vector unsigned int) vec_abs ((__v4sf)__B);
- c = (__vector unsigned int) vec_cmpgt (float_exp_mask, a);
- d = (__vector unsigned int) vec_cmpgt (float_exp_mask, b);
- return ((__m128 ) vec_and (c, d));
+ __a = (__vector unsigned int)vec_abs((__v4sf)__A);
+ __b = (__vector unsigned int)vec_abs((__v4sf)__B);
+ __c = (__vector unsigned int)vec_cmpgt(__float_exp_mask, __a);
+ __d = (__vector unsigned int)vec_cmpgt(__float_exp_mask, __b);
+ return ((__m128)vec_and(__c, __d));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpunord_ps (__m128 __A, __m128 __B)
-{
- __vector unsigned int a, b;
- __vector unsigned int c, d;
- static const __vector unsigned int float_exp_mask =
- { 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000 };
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpunord_ps(__m128 __A, __m128 __B) {
+ __vector unsigned int __a, __b;
+ __vector unsigned int __c, __d;
+ static const __vector unsigned int __float_exp_mask = {
+ 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000};
- a = (__vector unsigned int) vec_abs ((__v4sf)__A);
- b = (__vector unsigned int) vec_abs ((__v4sf)__B);
- c = (__vector unsigned int) vec_cmpgt (a, float_exp_mask);
- d = (__vector unsigned int) vec_cmpgt (b, float_exp_mask);
- return ((__m128 ) vec_or (c, d));
+ __a = (__vector unsigned int)vec_abs((__v4sf)__A);
+ __b = (__vector unsigned int)vec_abs((__v4sf)__B);
+ __c = (__vector unsigned int)vec_cmpgt(__a, __float_exp_mask);
+ __d = (__vector unsigned int)vec_cmpgt(__b, __float_exp_mask);
+ return ((__m128)vec_or(__c, __d));
}
/* Perform a comparison on the lower SPFP values of A and B. If the
comparison is true, place a mask of all ones in the result, otherwise a
mask of zeros. The upper three SPFP values are passed through from A. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpeq_ss (__m128 __A, __m128 __B)
-{
- static const __vector unsigned int mask =
- { 0xffffffff, 0, 0, 0 };
- __v4sf a, b, c;
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpeq_ss(__m128 __A, __m128 __B) {
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+ __v4sf __a, __b, __c;
/* PowerISA VMX does not allow partial (for just element 0)
* results. So to insure we don't generate spurious exceptions
* (from the upper elements) we splat the lower float
* before we to the operation. */
- a = vec_splat ((__v4sf) __A, 0);
- b = vec_splat ((__v4sf) __B, 0);
- c = (__v4sf) vec_cmpeq(a, b);
+ __a = vec_splat((__v4sf)__A, 0);
+ __b = vec_splat((__v4sf)__B, 0);
+ __c = (__v4sf)vec_cmpeq(__a, __b);
/* Then we merge the lower float result with the original upper
* float elements from __A. */
- return ((__m128)vec_sel ((__v4sf)__A, c, mask));
+ return ((__m128)vec_sel((__v4sf)__A, __c, __mask));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmplt_ss (__m128 __A, __m128 __B)
-{
- static const __vector unsigned int mask =
- { 0xffffffff, 0, 0, 0 };
- __v4sf a, b, c;
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmplt_ss(__m128 __A, __m128 __B) {
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+ __v4sf __a, __b, __c;
/* PowerISA VMX does not allow partial (for just element 0)
* results. So to insure we don't generate spurious exceptions
* (from the upper elements) we splat the lower float
* before we to the operation. */
- a = vec_splat ((__v4sf) __A, 0);
- b = vec_splat ((__v4sf) __B, 0);
- c = (__v4sf) vec_cmplt(a, b);
+ __a = vec_splat((__v4sf)__A, 0);
+ __b = vec_splat((__v4sf)__B, 0);
+ __c = (__v4sf)vec_cmplt(__a, __b);
/* Then we merge the lower float result with the original upper
* float elements from __A. */
- return ((__m128)vec_sel ((__v4sf)__A, c, mask));
+ return ((__m128)vec_sel((__v4sf)__A, __c, __mask));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmple_ss (__m128 __A, __m128 __B)
-{
- static const __vector unsigned int mask =
- { 0xffffffff, 0, 0, 0 };
- __v4sf a, b, c;
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmple_ss(__m128 __A, __m128 __B) {
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+ __v4sf __a, __b, __c;
/* PowerISA VMX does not allow partial (for just element 0)
* results. So to insure we don't generate spurious exceptions
* (from the upper elements) we splat the lower float
* before we to the operation. */
- a = vec_splat ((__v4sf) __A, 0);
- b = vec_splat ((__v4sf) __B, 0);
- c = (__v4sf) vec_cmple(a, b);
+ __a = vec_splat((__v4sf)__A, 0);
+ __b = vec_splat((__v4sf)__B, 0);
+ __c = (__v4sf)vec_cmple(__a, __b);
/* Then we merge the lower float result with the original upper
* float elements from __A. */
- return ((__m128)vec_sel ((__v4sf)__A, c, mask));
+ return ((__m128)vec_sel((__v4sf)__A, __c, __mask));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpgt_ss (__m128 __A, __m128 __B)
-{
- static const __vector unsigned int mask =
- { 0xffffffff, 0, 0, 0 };
- __v4sf a, b, c;
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpgt_ss(__m128 __A, __m128 __B) {
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+ __v4sf __a, __b, __c;
/* PowerISA VMX does not allow partial (for just element 0)
* results. So to insure we don't generate spurious exceptions
* (from the upper elements) we splat the lower float
* before we to the operation. */
- a = vec_splat ((__v4sf) __A, 0);
- b = vec_splat ((__v4sf) __B, 0);
- c = (__v4sf) vec_cmpgt(a, b);
+ __a = vec_splat((__v4sf)__A, 0);
+ __b = vec_splat((__v4sf)__B, 0);
+ __c = (__v4sf)vec_cmpgt(__a, __b);
/* Then we merge the lower float result with the original upper
* float elements from __A. */
- return ((__m128)vec_sel ((__v4sf)__A, c, mask));
+ return ((__m128)vec_sel((__v4sf)__A, __c, __mask));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpge_ss (__m128 __A, __m128 __B)
-{
- static const __vector unsigned int mask =
- { 0xffffffff, 0, 0, 0 };
- __v4sf a, b, c;
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpge_ss(__m128 __A, __m128 __B) {
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+ __v4sf __a, __b, __c;
/* PowerISA VMX does not allow partial (for just element 0)
* results. So to insure we don't generate spurious exceptions
* (from the upper elements) we splat the lower float
* before we to the operation. */
- a = vec_splat ((__v4sf) __A, 0);
- b = vec_splat ((__v4sf) __B, 0);
- c = (__v4sf) vec_cmpge(a, b);
+ __a = vec_splat((__v4sf)__A, 0);
+ __b = vec_splat((__v4sf)__B, 0);
+ __c = (__v4sf)vec_cmpge(__a, __b);
/* Then we merge the lower float result with the original upper
* float elements from __A. */
- return ((__m128)vec_sel ((__v4sf)__A, c, mask));
+ return ((__m128)vec_sel((__v4sf)__A, __c, __mask));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpneq_ss (__m128 __A, __m128 __B)
-{
- static const __vector unsigned int mask =
- { 0xffffffff, 0, 0, 0 };
- __v4sf a, b, c;
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpneq_ss(__m128 __A, __m128 __B) {
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+ __v4sf __a, __b, __c;
/* PowerISA VMX does not allow partial (for just element 0)
* results. So to insure we don't generate spurious exceptions
* (from the upper elements) we splat the lower float
* before we to the operation. */
- a = vec_splat ((__v4sf) __A, 0);
- b = vec_splat ((__v4sf) __B, 0);
- c = (__v4sf) vec_cmpeq(a, b);
- c = vec_nor (c, c);
+ __a = vec_splat((__v4sf)__A, 0);
+ __b = vec_splat((__v4sf)__B, 0);
+ __c = (__v4sf)vec_cmpeq(__a, __b);
+ __c = vec_nor(__c, __c);
/* Then we merge the lower float result with the original upper
* float elements from __A. */
- return ((__m128)vec_sel ((__v4sf)__A, c, mask));
+ return ((__m128)vec_sel((__v4sf)__A, __c, __mask));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnlt_ss (__m128 __A, __m128 __B)
-{
- static const __vector unsigned int mask =
- { 0xffffffff, 0, 0, 0 };
- __v4sf a, b, c;
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpnlt_ss(__m128 __A, __m128 __B) {
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+ __v4sf __a, __b, __c;
/* PowerISA VMX does not allow partial (for just element 0)
* results. So to insure we don't generate spurious exceptions
* (from the upper elements) we splat the lower float
* before we to the operation. */
- a = vec_splat ((__v4sf) __A, 0);
- b = vec_splat ((__v4sf) __B, 0);
- c = (__v4sf) vec_cmpge(a, b);
+ __a = vec_splat((__v4sf)__A, 0);
+ __b = vec_splat((__v4sf)__B, 0);
+ __c = (__v4sf)vec_cmpge(__a, __b);
/* Then we merge the lower float result with the original upper
* float elements from __A. */
- return ((__m128)vec_sel ((__v4sf)__A, c, mask));
+ return ((__m128)vec_sel((__v4sf)__A, __c, __mask));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnle_ss (__m128 __A, __m128 __B)
-{
- static const __vector unsigned int mask =
- { 0xffffffff, 0, 0, 0 };
- __v4sf a, b, c;
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpnle_ss(__m128 __A, __m128 __B) {
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+ __v4sf __a, __b, __c;
/* PowerISA VMX does not allow partial (for just element 0)
* results. So to insure we don't generate spurious exceptions
* (from the upper elements) we splat the lower float
* before we to the operation. */
- a = vec_splat ((__v4sf) __A, 0);
- b = vec_splat ((__v4sf) __B, 0);
- c = (__v4sf) vec_cmpgt(a, b);
+ __a = vec_splat((__v4sf)__A, 0);
+ __b = vec_splat((__v4sf)__B, 0);
+ __c = (__v4sf)vec_cmpgt(__a, __b);
/* Then we merge the lower float result with the original upper
* float elements from __A. */
- return ((__m128)vec_sel ((__v4sf)__A, c, mask));
+ return ((__m128)vec_sel((__v4sf)__A, __c, __mask));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpngt_ss (__m128 __A, __m128 __B)
-{
- static const __vector unsigned int mask =
- { 0xffffffff, 0, 0, 0 };
- __v4sf a, b, c;
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpngt_ss(__m128 __A, __m128 __B) {
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+ __v4sf __a, __b, __c;
/* PowerISA VMX does not allow partial (for just element 0)
* results. So to insure we don't generate spurious exceptions
* (from the upper elements) we splat the lower float
* before we to the operation. */
- a = vec_splat ((__v4sf) __A, 0);
- b = vec_splat ((__v4sf) __B, 0);
- c = (__v4sf) vec_cmple(a, b);
+ __a = vec_splat((__v4sf)__A, 0);
+ __b = vec_splat((__v4sf)__B, 0);
+ __c = (__v4sf)vec_cmple(__a, __b);
/* Then we merge the lower float result with the original upper
* float elements from __A. */
- return ((__m128)vec_sel ((__v4sf)__A, c, mask));
+ return ((__m128)vec_sel((__v4sf)__A, __c, __mask));
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpnge_ss (__m128 __A, __m128 __B)
-{
- static const __vector unsigned int mask =
- { 0xffffffff, 0, 0, 0 };
- __v4sf a, b, c;
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpnge_ss(__m128 __A, __m128 __B) {
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+ __v4sf __a, __b, __c;
/* PowerISA VMX does not allow partial (for just element 0)
* results. So to insure we don't generate spurious exceptions
* (from the upper elements) we splat the lower float
* before we do the operation. */
- a = vec_splat ((__v4sf) __A, 0);
- b = vec_splat ((__v4sf) __B, 0);
- c = (__v4sf) vec_cmplt(a, b);
+ __a = vec_splat((__v4sf)__A, 0);
+ __b = vec_splat((__v4sf)__B, 0);
+ __c = (__v4sf)vec_cmplt(__a, __b);
/* Then we merge the lower float result with the original upper
* float elements from __A. */
- return ((__m128)vec_sel ((__v4sf)__A, c, mask));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpord_ss (__m128 __A, __m128 __B)
-{
- __vector unsigned int a, b;
- __vector unsigned int c, d;
- static const __vector unsigned int float_exp_mask =
- { 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000 };
- static const __vector unsigned int mask =
- { 0xffffffff, 0, 0, 0 };
-
- a = (__vector unsigned int) vec_abs ((__v4sf)__A);
- b = (__vector unsigned int) vec_abs ((__v4sf)__B);
- c = (__vector unsigned int) vec_cmpgt (float_exp_mask, a);
- d = (__vector unsigned int) vec_cmpgt (float_exp_mask, b);
- c = vec_and (c, d);
+ return ((__m128)vec_sel((__v4sf)__A, __c, __mask));
+}
+
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpord_ss(__m128 __A, __m128 __B) {
+ __vector unsigned int __a, __b;
+ __vector unsigned int __c, __d;
+ static const __vector unsigned int __float_exp_mask = {
+ 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000};
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+
+ __a = (__vector unsigned int)vec_abs((__v4sf)__A);
+ __b = (__vector unsigned int)vec_abs((__v4sf)__B);
+ __c = (__vector unsigned int)vec_cmpgt(__float_exp_mask, __a);
+ __d = (__vector unsigned int)vec_cmpgt(__float_exp_mask, __b);
+ __c = vec_and(__c, __d);
/* Then we merge the lower float result with the original upper
* float elements from __A. */
- return ((__m128)vec_sel ((__v4sf)__A, (__v4sf)c, mask));
-}
-
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cmpunord_ss (__m128 __A, __m128 __B)
-{
- __vector unsigned int a, b;
- __vector unsigned int c, d;
- static const __vector unsigned int float_exp_mask =
- { 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000 };
- static const __vector unsigned int mask =
- { 0xffffffff, 0, 0, 0 };
-
- a = (__vector unsigned int) vec_abs ((__v4sf)__A);
- b = (__vector unsigned int) vec_abs ((__v4sf)__B);
- c = (__vector unsigned int) vec_cmpgt (a, float_exp_mask);
- d = (__vector unsigned int) vec_cmpgt (b, float_exp_mask);
- c = vec_or (c, d);
+ return ((__m128)vec_sel((__v4sf)__A, (__v4sf)__c, __mask));
+}
+
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cmpunord_ss(__m128 __A, __m128 __B) {
+ __vector unsigned int __a, __b;
+ __vector unsigned int __c, __d;
+ static const __vector unsigned int __float_exp_mask = {
+ 0x7f800000, 0x7f800000, 0x7f800000, 0x7f800000};
+ static const __vector unsigned int __mask = {0xffffffff, 0, 0, 0};
+
+ __a = (__vector unsigned int)vec_abs((__v4sf)__A);
+ __b = (__vector unsigned int)vec_abs((__v4sf)__B);
+ __c = (__vector unsigned int)vec_cmpgt(__a, __float_exp_mask);
+ __d = (__vector unsigned int)vec_cmpgt(__b, __float_exp_mask);
+ __c = vec_or(__c, __d);
/* Then we merge the lower float result with the original upper
* float elements from __A. */
- return ((__m128)vec_sel ((__v4sf)__A, (__v4sf)c, mask));
+ return ((__m128)vec_sel((__v4sf)__A, (__v4sf)__c, __mask));
}
/* Compare the lower SPFP values of A and B and return 1 if true
and 0 if false. */
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comieq_ss (__m128 __A, __m128 __B)
-{
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_comieq_ss(__m128 __A, __m128 __B) {
return (__A[0] == __B[0]);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comilt_ss (__m128 __A, __m128 __B)
-{
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_comilt_ss(__m128 __A, __m128 __B) {
return (__A[0] < __B[0]);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comile_ss (__m128 __A, __m128 __B)
-{
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_comile_ss(__m128 __A, __m128 __B) {
return (__A[0] <= __B[0]);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comigt_ss (__m128 __A, __m128 __B)
-{
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_comigt_ss(__m128 __A, __m128 __B) {
return (__A[0] > __B[0]);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comige_ss (__m128 __A, __m128 __B)
-{
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_comige_ss(__m128 __A, __m128 __B) {
return (__A[0] >= __B[0]);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_comineq_ss (__m128 __A, __m128 __B)
-{
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_comineq_ss(__m128 __A, __m128 __B) {
return (__A[0] != __B[0]);
}
@@ -834,56 +823,56 @@ _mm_comineq_ss (__m128 __A, __m128 __B)
* compare and signal for QNaNs.
* The __mm_ucomieq_sd et all should be OK, as is.
*/
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomieq_ss (__m128 __A, __m128 __B)
-{
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_ucomieq_ss(__m128 __A, __m128 __B) {
return (__A[0] == __B[0]);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomilt_ss (__m128 __A, __m128 __B)
-{
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_ucomilt_ss(__m128 __A, __m128 __B) {
return (__A[0] < __B[0]);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomile_ss (__m128 __A, __m128 __B)
-{
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_ucomile_ss(__m128 __A, __m128 __B) {
return (__A[0] <= __B[0]);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomigt_ss (__m128 __A, __m128 __B)
-{
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_ucomigt_ss(__m128 __A, __m128 __B) {
return (__A[0] > __B[0]);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomige_ss (__m128 __A, __m128 __B)
-{
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_ucomige_ss(__m128 __A, __m128 __B) {
return (__A[0] >= __B[0]);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_ucomineq_ss (__m128 __A, __m128 __B)
-{
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_ucomineq_ss(__m128 __A, __m128 __B) {
return (__A[0] != __B[0]);
}
-extern __inline float __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtss_f32 (__m128 __A)
-{
+extern __inline float
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtss_f32(__m128 __A) {
return ((__v4sf)__A)[0];
}
/* Convert the lower SPFP value to a 32-bit integer according to the current
rounding mode. */
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtss_si32 (__m128 __A)
-{
- __m64 res = 0;
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtss_si32(__m128 __A) {
+ int __res;
#ifdef _ARCH_PWR8
- double dtmp;
+ double __dtmp;
__asm__(
#ifdef __LITTLE_ENDIAN__
"xxsldwi %x0,%x0,%x0,3;\n"
@@ -891,32 +880,30 @@ _mm_cvtss_si32 (__m128 __A)
"xscvspdp %x2,%x0;\n"
"fctiw %2,%2;\n"
"mfvsrd %1,%x2;\n"
- : "+wa" (__A),
- "=r" (res),
- "=f" (dtmp)
- : );
+ : "+wa"(__A), "=r"(__res), "=f"(__dtmp)
+ :);
#else
- res = __builtin_rint(__A[0]);
+ __res = __builtin_rint(__A[0]);
#endif
- return (res);
+ return __res;
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_ss2si (__m128 __A)
-{
- return _mm_cvtss_si32 (__A);
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvt_ss2si(__m128 __A) {
+ return _mm_cvtss_si32(__A);
}
/* Convert the lower SPFP value to a 32-bit integer according to the
current rounding mode. */
/* Intel intrinsic. */
-extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtss_si64 (__m128 __A)
-{
- __m64 res = 0;
-#ifdef _ARCH_PWR8
- double dtmp;
+extern __inline long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtss_si64(__m128 __A) {
+ long long __res;
+#if defined(_ARCH_PWR8) && defined(__powerpc64__)
+ double __dtmp;
__asm__(
#ifdef __LITTLE_ENDIAN__
"xxsldwi %x0,%x0,%x0,3;\n"
@@ -924,26 +911,23 @@ _mm_cvtss_si64 (__m128 __A)
"xscvspdp %x2,%x0;\n"
"fctid %2,%2;\n"
"mfvsrd %1,%x2;\n"
- : "+wa" (__A),
- "=r" (res),
- "=f" (dtmp)
- : );
+ : "+wa"(__A), "=r"(__res), "=f"(__dtmp)
+ :);
#else
- res = __builtin_llrint(__A[0]);
+ __res = __builtin_llrint(__A[0]);
#endif
- return (res);
+ return __res;
}
/* Microsoft intrinsic. */
-extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtss_si64x (__m128 __A)
-{
- return _mm_cvtss_si64 ((__v4sf) __A);
+extern __inline long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtss_si64x(__m128 __A) {
+ return _mm_cvtss_si64((__v4sf)__A);
}
/* Constants for use with _mm_prefetch. */
-enum _mm_hint
-{
+enum _mm_hint {
/* _MM_HINT_ET is _MM_HINT_T with set 3rd bit. */
_MM_HINT_ET0 = 7,
_MM_HINT_ET1 = 6,
@@ -955,368 +939,365 @@ enum _mm_hint
/* Loads one cache line from address P to a location "closer" to the
processor. The selector I specifies the type of prefetch operation. */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_prefetch (const void *__P, enum _mm_hint __I)
-{
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_prefetch(const void *__P, enum _mm_hint __I) {
/* Current PowerPC will ignores the hint parameters. */
- __builtin_prefetch (__P);
+ __builtin_prefetch(__P);
}
/* Convert the two lower SPFP values to 32-bit integers according to the
current rounding mode. Return the integers in packed form. */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtps_pi32 (__m128 __A)
-{
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtps_pi32(__m128 __A) {
/* Splat two lower SPFP values to both halves. */
- __v4sf temp, rounded;
- __vector unsigned long long result;
+ __v4sf __temp, __rounded;
+ __vector unsigned long long __result;
/* Splat two lower SPFP values to both halves. */
- temp = (__v4sf) vec_splat ((__vector long long)__A, 0);
- rounded = vec_rint(temp);
- result = (__vector unsigned long long) vec_cts (rounded, 0);
+ __temp = (__v4sf)vec_splat((__vector long long)__A, 0);
+ __rounded = vec_rint(__temp);
+ __result = (__vector unsigned long long)vec_cts(__rounded, 0);
- return (__m64) ((__vector long long) result)[0];
+ return (__m64)((__vector long long)__result)[0];
}
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_ps2pi (__m128 __A)
-{
- return _mm_cvtps_pi32 (__A);
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvt_ps2pi(__m128 __A) {
+ return _mm_cvtps_pi32(__A);
}
/* Truncate the lower SPFP value to a 32-bit integer. */
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttss_si32 (__m128 __A)
-{
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvttss_si32(__m128 __A) {
/* Extract the lower float element. */
- float temp = __A[0];
+ float __temp = __A[0];
/* truncate to 32-bit integer and return. */
- return temp;
+ return __temp;
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtt_ss2si (__m128 __A)
-{
- return _mm_cvttss_si32 (__A);
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtt_ss2si(__m128 __A) {
+ return _mm_cvttss_si32(__A);
}
/* Intel intrinsic. */
-extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttss_si64 (__m128 __A)
-{
+extern __inline long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvttss_si64(__m128 __A) {
/* Extract the lower float element. */
- float temp = __A[0];
+ float __temp = __A[0];
/* truncate to 32-bit integer and return. */
- return temp;
+ return __temp;
}
/* Microsoft intrinsic. */
-extern __inline long long __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttss_si64x (__m128 __A)
-{
+extern __inline long long
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvttss_si64x(__m128 __A) {
/* Extract the lower float element. */
- float temp = __A[0];
+ float __temp = __A[0];
/* truncate to 32-bit integer and return. */
- return temp;
+ return __temp;
}
/* Truncate the two lower SPFP values to 32-bit integers. Return the
integers in packed form. */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvttps_pi32 (__m128 __A)
-{
- __v4sf temp;
- __vector unsigned long long result;
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvttps_pi32(__m128 __A) {
+ __v4sf __temp;
+ __vector unsigned long long __result;
/* Splat two lower SPFP values to both halves. */
- temp = (__v4sf) vec_splat ((__vector long long)__A, 0);
- result = (__vector unsigned long long) vec_cts (temp, 0);
+ __temp = (__v4sf)vec_splat((__vector long long)__A, 0);
+ __result = (__vector unsigned long long)vec_cts(__temp, 0);
- return (__m64) ((__vector long long) result)[0];
+ return (__m64)((__vector long long)__result)[0];
}
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtt_ps2pi (__m128 __A)
-{
- return _mm_cvttps_pi32 (__A);
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtt_ps2pi(__m128 __A) {
+ return _mm_cvttps_pi32(__A);
}
/* Convert B to a SPFP value and insert it as element zero in A. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi32_ss (__m128 __A, int __B)
-{
- float temp = __B;
- __A[0] = temp;
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsi32_ss(__m128 __A, int __B) {
+ float __temp = __B;
+ __A[0] = __temp;
return __A;
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_si2ss (__m128 __A, int __B)
-{
- return _mm_cvtsi32_ss (__A, __B);
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvt_si2ss(__m128 __A, int __B) {
+ return _mm_cvtsi32_ss(__A, __B);
}
/* Convert B to a SPFP value and insert it as element zero in A. */
/* Intel intrinsic. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi64_ss (__m128 __A, long long __B)
-{
- float temp = __B;
- __A[0] = temp;
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsi64_ss(__m128 __A, long long __B) {
+ float __temp = __B;
+ __A[0] = __temp;
return __A;
}
/* Microsoft intrinsic. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtsi64x_ss (__m128 __A, long long __B)
-{
- return _mm_cvtsi64_ss (__A, __B);
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtsi64x_ss(__m128 __A, long long __B) {
+ return _mm_cvtsi64_ss(__A, __B);
}
/* Convert the two 32-bit values in B to SPFP form and insert them
as the two lower elements in A. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtpi32_ps (__m128 __A, __m64 __B)
-{
- __vector signed int vm1;
- __vector float vf1;
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtpi32_ps(__m128 __A, __m64 __B) {
+ __vector signed int __vm1;
+ __vector float __vf1;
- vm1 = (__vector signed int) (__vector unsigned long long) {__B, __B};
- vf1 = (__vector float) vec_ctf (vm1, 0);
+ __vm1 = (__vector signed int)(__vector unsigned long long){__B, __B};
+ __vf1 = (__vector float)vec_ctf(__vm1, 0);
- return ((__m128) (__vector unsigned long long)
- { ((__vector unsigned long long)vf1) [0],
- ((__vector unsigned long long)__A) [1]});
+ return ((__m128)(__vector unsigned long long){
+ ((__vector unsigned long long)__vf1)[0],
+ ((__vector unsigned long long)__A)[1]});
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvt_pi2ps (__m128 __A, __m64 __B)
-{
- return _mm_cvtpi32_ps (__A, __B);
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvt_pi2ps(__m128 __A, __m64 __B) {
+ return _mm_cvtpi32_ps(__A, __B);
}
/* Convert the four signed 16-bit values in A to SPFP form. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtpi16_ps (__m64 __A)
-{
- __vector signed short vs8;
- __vector signed int vi4;
- __vector float vf1;
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtpi16_ps(__m64 __A) {
+ __vector signed short __vs8;
+ __vector signed int __vi4;
+ __vector float __vf1;
- vs8 = (__vector signed short) (__vector unsigned long long) { __A, __A };
- vi4 = vec_vupklsh (vs8);
- vf1 = (__vector float) vec_ctf (vi4, 0);
+ __vs8 = (__vector signed short)(__vector unsigned long long){__A, __A};
+ __vi4 = vec_vupklsh(__vs8);
+ __vf1 = (__vector float)vec_ctf(__vi4, 0);
- return (__m128) vf1;
+ return (__m128)__vf1;
}
/* Convert the four unsigned 16-bit values in A to SPFP form. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtpu16_ps (__m64 __A)
-{
- const __vector unsigned short zero =
- { 0, 0, 0, 0, 0, 0, 0, 0 };
- __vector unsigned short vs8;
- __vector unsigned int vi4;
- __vector float vf1;
-
- vs8 = (__vector unsigned short) (__vector unsigned long long) { __A, __A };
- vi4 = (__vector unsigned int) vec_mergel
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtpu16_ps(__m64 __A) {
+ const __vector unsigned short __zero = {0, 0, 0, 0, 0, 0, 0, 0};
+ __vector unsigned short __vs8;
+ __vector unsigned int __vi4;
+ __vector float __vf1;
+
+ __vs8 = (__vector unsigned short)(__vector unsigned long long){__A, __A};
+ __vi4 = (__vector unsigned int)vec_mergel
#ifdef __LITTLE_ENDIAN__
- (vs8, zero);
+ (__vs8, __zero);
#else
- (zero, vs8);
+ (__zero, __vs8);
#endif
- vf1 = (__vector float) vec_ctf (vi4, 0);
+ __vf1 = (__vector float)vec_ctf(__vi4, 0);
- return (__m128) vf1;
+ return (__m128)__vf1;
}
/* Convert the low four signed 8-bit values in A to SPFP form. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtpi8_ps (__m64 __A)
-{
- __vector signed char vc16;
- __vector signed short vs8;
- __vector signed int vi4;
- __vector float vf1;
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtpi8_ps(__m64 __A) {
+ __vector signed char __vc16;
+ __vector signed short __vs8;
+ __vector signed int __vi4;
+ __vector float __vf1;
- vc16 = (__vector signed char) (__vector unsigned long long) { __A, __A };
- vs8 = vec_vupkhsb (vc16);
- vi4 = vec_vupkhsh (vs8);
- vf1 = (__vector float) vec_ctf (vi4, 0);
+ __vc16 = (__vector signed char)(__vector unsigned long long){__A, __A};
+ __vs8 = vec_vupkhsb(__vc16);
+ __vi4 = vec_vupkhsh(__vs8);
+ __vf1 = (__vector float)vec_ctf(__vi4, 0);
- return (__m128) vf1;
+ return (__m128)__vf1;
}
/* Convert the low four unsigned 8-bit values in A to SPFP form. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-
-_mm_cvtpu8_ps (__m64 __A)
-{
- const __vector unsigned char zero =
- { 0, 0, 0, 0, 0, 0, 0, 0 };
- __vector unsigned char vc16;
- __vector unsigned short vs8;
- __vector unsigned int vi4;
- __vector float vf1;
-
- vc16 = (__vector unsigned char) (__vector unsigned long long) { __A, __A };
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+
+ _mm_cvtpu8_ps(__m64 __A) {
+ const __vector unsigned char __zero = {0, 0, 0, 0, 0, 0, 0, 0};
+ __vector unsigned char __vc16;
+ __vector unsigned short __vs8;
+ __vector unsigned int __vi4;
+ __vector float __vf1;
+
+ __vc16 = (__vector unsigned char)(__vector unsigned long long){__A, __A};
#ifdef __LITTLE_ENDIAN__
- vs8 = (__vector unsigned short) vec_mergel (vc16, zero);
- vi4 = (__vector unsigned int) vec_mergeh (vs8,
- (__vector unsigned short) zero);
+ __vs8 = (__vector unsigned short)vec_mergel(__vc16, __zero);
+ __vi4 =
+ (__vector unsigned int)vec_mergeh(__vs8, (__vector unsigned short)__zero);
#else
- vs8 = (__vector unsigned short) vec_mergel (zero, vc16);
- vi4 = (__vector unsigned int) vec_mergeh ((__vector unsigned short) zero,
- vs8);
+ __vs8 = (__vector unsigned short)vec_mergel(__zero, __vc16);
+ __vi4 =
+ (__vector unsigned int)vec_mergeh((__vector unsigned short)__zero, __vs8);
#endif
- vf1 = (__vector float) vec_ctf (vi4, 0);
+ __vf1 = (__vector float)vec_ctf(__vi4, 0);
- return (__m128) vf1;
+ return (__m128)__vf1;
}
/* Convert the four signed 32-bit values in A and B to SPFP form. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtpi32x2_ps (__m64 __A, __m64 __B)
-{
- __vector signed int vi4;
- __vector float vf4;
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtpi32x2_ps(__m64 __A, __m64 __B) {
+ __vector signed int __vi4;
+ __vector float __vf4;
- vi4 = (__vector signed int) (__vector unsigned long long) { __A, __B };
- vf4 = (__vector float) vec_ctf (vi4, 0);
- return (__m128) vf4;
+ __vi4 = (__vector signed int)(__vector unsigned long long){__A, __B};
+ __vf4 = (__vector float)vec_ctf(__vi4, 0);
+ return (__m128)__vf4;
}
/* Convert the four SPFP values in A to four signed 16-bit integers. */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtps_pi16 (__m128 __A)
-{
- __v4sf rounded;
- __vector signed int temp;
- __vector unsigned long long result;
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtps_pi16(__m128 __A) {
+ __v4sf __rounded;
+ __vector signed int __temp;
+ __vector unsigned long long __result;
- rounded = vec_rint(__A);
- temp = vec_cts (rounded, 0);
- result = (__vector unsigned long long) vec_pack (temp, temp);
+ __rounded = vec_rint(__A);
+ __temp = vec_cts(__rounded, 0);
+ __result = (__vector unsigned long long)vec_pack(__temp, __temp);
- return (__m64) ((__vector long long) result)[0];
+ return (__m64)((__vector long long)__result)[0];
}
/* Convert the four SPFP values in A to four signed 8-bit integers. */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_cvtps_pi8 (__m128 __A)
-{
- __v4sf rounded;
- __vector signed int tmp_i;
- static const __vector signed int zero = {0, 0, 0, 0};
- __vector signed short tmp_s;
- __vector signed char res_v;
-
- rounded = vec_rint(__A);
- tmp_i = vec_cts (rounded, 0);
- tmp_s = vec_pack (tmp_i, zero);
- res_v = vec_pack (tmp_s, tmp_s);
- return (__m64) ((__vector long long) res_v)[0];
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_cvtps_pi8(__m128 __A) {
+ __v4sf __rounded;
+ __vector signed int __tmp_i;
+ static const __vector signed int __zero = {0, 0, 0, 0};
+ __vector signed short __tmp_s;
+ __vector signed char __res_v;
+
+ __rounded = vec_rint(__A);
+ __tmp_i = vec_cts(__rounded, 0);
+ __tmp_s = vec_pack(__tmp_i, __zero);
+ __res_v = vec_pack(__tmp_s, __tmp_s);
+ return (__m64)((__vector long long)__res_v)[0];
}
/* Selects four specific SPFP values from A and B based on MASK. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-
-_mm_shuffle_ps (__m128 __A, __m128 __B, int const __mask)
-{
- unsigned long element_selector_10 = __mask & 0x03;
- unsigned long element_selector_32 = (__mask >> 2) & 0x03;
- unsigned long element_selector_54 = (__mask >> 4) & 0x03;
- unsigned long element_selector_76 = (__mask >> 6) & 0x03;
- static const unsigned int permute_selectors[4] =
- {
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+
+ _mm_shuffle_ps(__m128 __A, __m128 __B, int const __mask) {
+ unsigned long __element_selector_10 = __mask & 0x03;
+ unsigned long __element_selector_32 = (__mask >> 2) & 0x03;
+ unsigned long __element_selector_54 = (__mask >> 4) & 0x03;
+ unsigned long __element_selector_76 = (__mask >> 6) & 0x03;
+ static const unsigned int __permute_selectors[4] = {
#ifdef __LITTLE_ENDIAN__
0x03020100, 0x07060504, 0x0B0A0908, 0x0F0E0D0C
#else
0x00010203, 0x04050607, 0x08090A0B, 0x0C0D0E0F
#endif
- };
- __vector unsigned int t;
+ };
+ __vector unsigned int __t;
- t[0] = permute_selectors[element_selector_10];
- t[1] = permute_selectors[element_selector_32];
- t[2] = permute_selectors[element_selector_54] + 0x10101010;
- t[3] = permute_selectors[element_selector_76] + 0x10101010;
- return vec_perm ((__v4sf) __A, (__v4sf)__B, (__vector unsigned char)t);
+ __t[0] = __permute_selectors[__element_selector_10];
+ __t[1] = __permute_selectors[__element_selector_32];
+ __t[2] = __permute_selectors[__element_selector_54] + 0x10101010;
+ __t[3] = __permute_selectors[__element_selector_76] + 0x10101010;
+ return vec_perm((__v4sf)__A, (__v4sf)__B, (__vector unsigned char)__t);
}
/* Selects and interleaves the upper two SPFP values from A and B. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpackhi_ps (__m128 __A, __m128 __B)
-{
- return (__m128) vec_vmrglw ((__v4sf) __A, (__v4sf)__B);
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_unpackhi_ps(__m128 __A, __m128 __B) {
+ return (__m128)vec_vmrglw((__v4sf)__A, (__v4sf)__B);
}
/* Selects and interleaves the lower two SPFP values from A and B. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_unpacklo_ps (__m128 __A, __m128 __B)
-{
- return (__m128) vec_vmrghw ((__v4sf) __A, (__v4sf)__B);
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_unpacklo_ps(__m128 __A, __m128 __B) {
+ return (__m128)vec_vmrghw((__v4sf)__A, (__v4sf)__B);
}
/* Sets the upper two SPFP values with 64-bits of data loaded from P;
the lower two values are passed through from A. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loadh_pi (__m128 __A, __m64 const *__P)
-{
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_loadh_pi(__m128 __A, __m64 const *__P) {
__vector unsigned long long __a = (__vector unsigned long long)__A;
__vector unsigned long long __p = vec_splats(*__P);
- __a [1] = __p [1];
+ __a[1] = __p[1];
return (__m128)__a;
}
/* Stores the upper two SPFP values of A into P. */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_storeh_pi (__m64 *__P, __m128 __A)
-{
- __vector unsigned long long __a = (__vector unsigned long long) __A;
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_storeh_pi(__m64 *__P, __m128 __A) {
+ __vector unsigned long long __a = (__vector unsigned long long)__A;
*__P = __a[1];
}
/* Moves the upper two values of B into the lower two values of A. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_movehl_ps (__m128 __A, __m128 __B)
-{
- return (__m128) vec_mergel ((__vector unsigned long long)__B,
- (__vector unsigned long long)__A);
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_movehl_ps(__m128 __A, __m128 __B) {
+ return (__m128)vec_mergel((__vector unsigned long long)__B,
+ (__vector unsigned long long)__A);
}
/* Moves the lower two values of B into the upper two values of A. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_movelh_ps (__m128 __A, __m128 __B)
-{
- return (__m128) vec_mergeh ((__vector unsigned long long)__A,
- (__vector unsigned long long)__B);
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_movelh_ps(__m128 __A, __m128 __B) {
+ return (__m128)vec_mergeh((__vector unsigned long long)__A,
+ (__vector unsigned long long)__B);
}
/* Sets the lower two SPFP values with 64-bits of data loaded from P;
the upper two values are passed through from A. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_loadl_pi (__m128 __A, __m64 const *__P)
-{
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_loadl_pi(__m128 __A, __m64 const *__P) {
__vector unsigned long long __a = (__vector unsigned long long)__A;
__vector unsigned long long __p = vec_splats(*__P);
- __a [0] = __p [0];
+ __a[0] = __p[0];
return (__m128)__a;
}
/* Stores the lower two SPFP values of A into P. */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_storel_pi (__m64 *__P, __m128 __A)
-{
- __vector unsigned long long __a = (__vector unsigned long long) __A;
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_storel_pi(__m64 *__P, __m128 __A) {
+ __vector unsigned long long __a = (__vector unsigned long long)__A;
*__P = __a[0];
}
@@ -1325,453 +1306,456 @@ _mm_storel_pi (__m64 *__P, __m128 __A)
/* Intrinsic functions that require PowerISA 2.07 minimum. */
/* Creates a 4-bit mask from the most significant bits of the SPFP values. */
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_movemask_ps (__m128 __A)
-{
- __vector unsigned long long result;
- static const __vector unsigned int perm_mask =
- {
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_movemask_ps(__m128 __A) {
+#ifdef _ARCH_PWR10
+ return vec_extractm((__vector unsigned int)__A);
+#else
+ __vector unsigned long long __result;
+ static const __vector unsigned int __perm_mask = {
#ifdef __LITTLE_ENDIAN__
- 0x00204060, 0x80808080, 0x80808080, 0x80808080
+ 0x00204060, 0x80808080, 0x80808080, 0x80808080
#else
0x80808080, 0x80808080, 0x80808080, 0x00204060
#endif
- };
+ };
- result = ((__vector unsigned long long)
- vec_vbpermq ((__vector unsigned char) __A,
- (__vector unsigned char) perm_mask));
+ __result = ((__vector unsigned long long)vec_vbpermq(
+ (__vector unsigned char)__A, (__vector unsigned char)__perm_mask));
#ifdef __LITTLE_ENDIAN__
- return result[1];
+ return __result[1];
#else
- return result[0];
+ return __result[0];
#endif
+#endif /* !_ARCH_PWR10 */
}
#endif /* _ARCH_PWR8 */
/* Create a vector with all four elements equal to *P. */
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_load1_ps (float const *__P)
-{
- return _mm_set1_ps (*__P);
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_load1_ps(float const *__P) {
+ return _mm_set1_ps(*__P);
}
-extern __inline __m128 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_load_ps1 (float const *__P)
-{
- return _mm_load1_ps (__P);
+extern __inline __m128
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_load_ps1(float const *__P) {
+ return _mm_load1_ps(__P);
}
/* Extracts one of the four words of A. The selector N must be immediate. */
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_extract_pi16 (__m64 const __A, int const __N)
-{
- unsigned int shiftr = __N & 3;
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_extract_pi16(__m64 const __A, int const __N) {
+ unsigned int __shiftr = __N & 3;
#ifdef __BIG_ENDIAN__
- shiftr = 3 - shiftr;
+ __shiftr = 3 - __shiftr;
#endif
- return ((__A >> (shiftr * 16)) & 0xffff);
+ return ((__A >> (__shiftr * 16)) & 0xffff);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pextrw (__m64 const __A, int const __N)
-{
- return _mm_extract_pi16 (__A, __N);
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pextrw(__m64 const __A, int const __N) {
+ return _mm_extract_pi16(__A, __N);
}
/* Inserts word D into one of four words of A. The selector N must be
immediate. */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_insert_pi16 (__m64 const __A, int const __D, int const __N)
-{
- const int shiftl = (__N & 3) * 16;
- const __m64 shiftD = (const __m64) __D << shiftl;
- const __m64 mask = 0xffffUL << shiftl;
- __m64 result = (__A & (~mask)) | (shiftD & mask);
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_insert_pi16(__m64 const __A, int const __D, int const __N) {
+ const int __shiftl = (__N & 3) * 16;
+ const __m64 __shiftD = (const __m64)__D << __shiftl;
+ const __m64 __mask = 0xffffUL << __shiftl;
+ __m64 __result = (__A & (~__mask)) | (__shiftD & __mask);
- return (result);
+ return __result;
}
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pinsrw (__m64 const __A, int const __D, int const __N)
-{
- return _mm_insert_pi16 (__A, __D, __N);
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pinsrw(__m64 const __A, int const __D, int const __N) {
+ return _mm_insert_pi16(__A, __D, __N);
}
/* Compute the element-wise maximum of signed 16-bit values. */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_max_pi16 (__m64 __A, __m64 __B)
-{
+ _mm_max_pi16(__m64 __A, __m64 __B) {
#if _ARCH_PWR8
- __vector signed short a, b, r;
- __vector __bool short c;
-
- a = (__vector signed short)vec_splats (__A);
- b = (__vector signed short)vec_splats (__B);
- c = (__vector __bool short)vec_cmpgt (a, b);
- r = vec_sel (b, a, c);
- return (__m64) ((__vector long long) r)[0];
+ __vector signed short __a, __b, __r;
+ __vector __bool short __c;
+
+ __a = (__vector signed short)vec_splats(__A);
+ __b = (__vector signed short)vec_splats(__B);
+ __c = (__vector __bool short)vec_cmpgt(__a, __b);
+ __r = vec_sel(__b, __a, __c);
+ return (__m64)((__vector long long)__r)[0];
#else
- __m64_union m1, m2, res;
+ __m64_union __m1, __m2, __res;
- m1.as_m64 = __A;
- m2.as_m64 = __B;
+ __m1.as_m64 = __A;
+ __m2.as_m64 = __B;
- res.as_short[0] =
- (m1.as_short[0] > m2.as_short[0]) ? m1.as_short[0] : m2.as_short[0];
- res.as_short[1] =
- (m1.as_short[1] > m2.as_short[1]) ? m1.as_short[1] : m2.as_short[1];
- res.as_short[2] =
- (m1.as_short[2] > m2.as_short[2]) ? m1.as_short[2] : m2.as_short[2];
- res.as_short[3] =
- (m1.as_short[3] > m2.as_short[3]) ? m1.as_short[3] : m2.as_short[3];
+ __res.as_short[0] = (__m1.as_short[0] > __m2.as_short[0]) ? __m1.as_short[0]
+ : __m2.as_short[0];
+ __res.as_short[1] = (__m1.as_short[1] > __m2.as_short[1]) ? __m1.as_short[1]
+ : __m2.as_short[1];
+ __res.as_short[2] = (__m1.as_short[2] > __m2.as_short[2]) ? __m1.as_short[2]
+ : __m2.as_short[2];
+ __res.as_short[3] = (__m1.as_short[3] > __m2.as_short[3]) ? __m1.as_short[3]
+ : __m2.as_short[3];
- return (__m64) res.as_m64;
+ return (__m64)__res.as_m64;
#endif
}
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pmaxsw (__m64 __A, __m64 __B)
-{
- return _mm_max_pi16 (__A, __B);
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pmaxsw(__m64 __A, __m64 __B) {
+ return _mm_max_pi16(__A, __B);
}
/* Compute the element-wise maximum of unsigned 8-bit values. */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_max_pu8 (__m64 __A, __m64 __B)
-{
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_max_pu8(__m64 __A, __m64 __B) {
#if _ARCH_PWR8
- __vector unsigned char a, b, r;
- __vector __bool char c;
-
- a = (__vector unsigned char)vec_splats (__A);
- b = (__vector unsigned char)vec_splats (__B);
- c = (__vector __bool char)vec_cmpgt (a, b);
- r = vec_sel (b, a, c);
- return (__m64) ((__vector long long) r)[0];
+ __vector unsigned char __a, __b, __r;
+ __vector __bool char __c;
+
+ __a = (__vector unsigned char)vec_splats(__A);
+ __b = (__vector unsigned char)vec_splats(__B);
+ __c = (__vector __bool char)vec_cmpgt(__a, __b);
+ __r = vec_sel(__b, __a, __c);
+ return (__m64)((__vector long long)__r)[0];
#else
- __m64_union m1, m2, res;
- long i;
-
- m1.as_m64 = __A;
- m2.as_m64 = __B;
+ __m64_union __m1, __m2, __res;
+ long __i;
+ __m1.as_m64 = __A;
+ __m2.as_m64 = __B;
- for (i = 0; i < 8; i++)
- res.as_char[i] =
- ((unsigned char) m1.as_char[i] > (unsigned char) m2.as_char[i]) ?
- m1.as_char[i] : m2.as_char[i];
+ for (__i = 0; __i < 8; __i++)
+ __res.as_char[__i] =
+ ((unsigned char)__m1.as_char[__i] > (unsigned char)__m2.as_char[__i])
+ ? __m1.as_char[__i]
+ : __m2.as_char[__i];
- return (__m64) res.as_m64;
+ return (__m64)__res.as_m64;
#endif
}
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pmaxub (__m64 __A, __m64 __B)
-{
- return _mm_max_pu8 (__A, __B);
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pmaxub(__m64 __A, __m64 __B) {
+ return _mm_max_pu8(__A, __B);
}
/* Compute the element-wise minimum of signed 16-bit values. */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_min_pi16 (__m64 __A, __m64 __B)
-{
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_min_pi16(__m64 __A, __m64 __B) {
#if _ARCH_PWR8
- __vector signed short a, b, r;
- __vector __bool short c;
-
- a = (__vector signed short)vec_splats (__A);
- b = (__vector signed short)vec_splats (__B);
- c = (__vector __bool short)vec_cmplt (a, b);
- r = vec_sel (b, a, c);
- return (__m64) ((__vector long long) r)[0];
+ __vector signed short __a, __b, __r;
+ __vector __bool short __c;
+
+ __a = (__vector signed short)vec_splats(__A);
+ __b = (__vector signed short)vec_splats(__B);
+ __c = (__vector __bool short)vec_cmplt(__a, __b);
+ __r = vec_sel(__b, __a, __c);
+ return (__m64)((__vector long long)__r)[0];
#else
- __m64_union m1, m2, res;
+ __m64_union __m1, __m2, __res;
- m1.as_m64 = __A;
- m2.as_m64 = __B;
+ __m1.as_m64 = __A;
+ __m2.as_m64 = __B;
- res.as_short[0] =
- (m1.as_short[0] < m2.as_short[0]) ? m1.as_short[0] : m2.as_short[0];
- res.as_short[1] =
- (m1.as_short[1] < m2.as_short[1]) ? m1.as_short[1] : m2.as_short[1];
- res.as_short[2] =
- (m1.as_short[2] < m2.as_short[2]) ? m1.as_short[2] : m2.as_short[2];
- res.as_short[3] =
- (m1.as_short[3] < m2.as_short[3]) ? m1.as_short[3] : m2.as_short[3];
+ __res.as_short[0] = (__m1.as_short[0] < __m2.as_short[0]) ? __m1.as_short[0]
+ : __m2.as_short[0];
+ __res.as_short[1] = (__m1.as_short[1] < __m2.as_short[1]) ? __m1.as_short[1]
+ : __m2.as_short[1];
+ __res.as_short[2] = (__m1.as_short[2] < __m2.as_short[2]) ? __m1.as_short[2]
+ : __m2.as_short[2];
+ __res.as_short[3] = (__m1.as_short[3] < __m2.as_short[3]) ? __m1.as_short[3]
+ : __m2.as_short[3];
- return (__m64) res.as_m64;
+ return (__m64)__res.as_m64;
#endif
}
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pminsw (__m64 __A, __m64 __B)
-{
- return _mm_min_pi16 (__A, __B);
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pminsw(__m64 __A, __m64 __B) {
+ return _mm_min_pi16(__A, __B);
}
/* Compute the element-wise minimum of unsigned 8-bit values. */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_min_pu8 (__m64 __A, __m64 __B)
-{
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_min_pu8(__m64 __A, __m64 __B) {
#if _ARCH_PWR8
- __vector unsigned char a, b, r;
- __vector __bool char c;
-
- a = (__vector unsigned char)vec_splats (__A);
- b = (__vector unsigned char)vec_splats (__B);
- c = (__vector __bool char)vec_cmplt (a, b);
- r = vec_sel (b, a, c);
- return (__m64) ((__vector long long) r)[0];
+ __vector unsigned char __a, __b, __r;
+ __vector __bool char __c;
+
+ __a = (__vector unsigned char)vec_splats(__A);
+ __b = (__vector unsigned char)vec_splats(__B);
+ __c = (__vector __bool char)vec_cmplt(__a, __b);
+ __r = vec_sel(__b, __a, __c);
+ return (__m64)((__vector long long)__r)[0];
#else
- __m64_union m1, m2, res;
- long i;
+ __m64_union __m1, __m2, __res;
+ long __i;
- m1.as_m64 = __A;
- m2.as_m64 = __B;
+ __m1.as_m64 = __A;
+ __m2.as_m64 = __B;
+ for (__i = 0; __i < 8; __i++)
+ __res.as_char[__i] =
+ ((unsigned char)__m1.as_char[__i] < (unsigned char)__m2.as_char[__i])
+ ? __m1.as_char[__i]
+ : __m2.as_char[__i];
- for (i = 0; i < 8; i++)
- res.as_char[i] =
- ((unsigned char) m1.as_char[i] < (unsigned char) m2.as_char[i]) ?
- m1.as_char[i] : m2.as_char[i];
-
- return (__m64) res.as_m64;
+ return (__m64)__res.as_m64;
#endif
}
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pminub (__m64 __A, __m64 __B)
-{
- return _mm_min_pu8 (__A, __B);
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pminub(__m64 __A, __m64 __B) {
+ return _mm_min_pu8(__A, __B);
}
/* Create an 8-bit mask of the signs of 8-bit values. */
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_movemask_pi8 (__m64 __A)
-{
- unsigned long long p =
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_movemask_pi8(__m64 __A) {
+#ifdef __powerpc64__
+ unsigned long long __p =
+#ifdef __LITTLE_ENDIAN__
+ 0x0008101820283038UL; // permute control for sign bits
+#else
+ 0x3830282018100800UL; // permute control for sign bits
+#endif
+ return __builtin_bpermd(__p, __A);
+#else
#ifdef __LITTLE_ENDIAN__
- 0x0008101820283038UL; // permute control for sign bits
+ unsigned int __mask = 0x20283038UL;
+ unsigned int __r1 = __builtin_bpermd(__mask, __A) & 0xf;
+ unsigned int __r2 = __builtin_bpermd(__mask, __A >> 32) & 0xf;
#else
- 0x3830282018100800UL; // permute control for sign bits
+ unsigned int __mask = 0x38302820UL;
+ unsigned int __r1 = __builtin_bpermd(__mask, __A >> 32) & 0xf;
+ unsigned int __r2 = __builtin_bpermd(__mask, __A) & 0xf;
+#endif
+ return (__r2 << 4) | __r1;
#endif
- return __builtin_bpermd (p, __A);
}
-extern __inline int __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pmovmskb (__m64 __A)
-{
- return _mm_movemask_pi8 (__A);
+extern __inline int
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pmovmskb(__m64 __A) {
+ return _mm_movemask_pi8(__A);
}
/* Multiply four unsigned 16-bit values in A by four unsigned 16-bit values
in B and produce the high 16 bits of the 32-bit results. */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_mulhi_pu16 (__m64 __A, __m64 __B)
-{
- __vector unsigned short a, b;
- __vector unsigned short c;
- __vector unsigned int w0, w1;
- __vector unsigned char xform1 = {
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_mulhi_pu16(__m64 __A, __m64 __B) {
+ __vector unsigned short __a, __b;
+ __vector unsigned short __c;
+ __vector unsigned int __w0, __w1;
+ __vector unsigned char __xform1 = {
#ifdef __LITTLE_ENDIAN__
- 0x02, 0x03, 0x12, 0x13, 0x06, 0x07, 0x16, 0x17,
- 0x0A, 0x0B, 0x1A, 0x1B, 0x0E, 0x0F, 0x1E, 0x1F
+ 0x02, 0x03, 0x12, 0x13, 0x06, 0x07, 0x16, 0x17, 0x0A,
+ 0x0B, 0x1A, 0x1B, 0x0E, 0x0F, 0x1E, 0x1F
#else
- 0x00, 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15,
- 0x00, 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15
+ 0x00, 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15, 0x00,
+ 0x01, 0x10, 0x11, 0x04, 0x05, 0x14, 0x15
#endif
- };
+ };
- a = (__vector unsigned short)vec_splats (__A);
- b = (__vector unsigned short)vec_splats (__B);
+ __a = (__vector unsigned short)vec_splats(__A);
+ __b = (__vector unsigned short)vec_splats(__B);
- w0 = vec_vmuleuh (a, b);
- w1 = vec_vmulouh (a, b);
- c = (__vector unsigned short)vec_perm (w0, w1, xform1);
+ __w0 = vec_vmuleuh(__a, __b);
+ __w1 = vec_vmulouh(__a, __b);
+ __c = (__vector unsigned short)vec_perm(__w0, __w1, __xform1);
- return (__m64) ((__vector long long) c)[0];
+ return (__m64)((__vector long long)__c)[0];
}
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pmulhuw (__m64 __A, __m64 __B)
-{
- return _mm_mulhi_pu16 (__A, __B);
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pmulhuw(__m64 __A, __m64 __B) {
+ return _mm_mulhi_pu16(__A, __B);
}
/* Return a combination of the four 16-bit values in A. The selector
must be an immediate. */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_shuffle_pi16 (__m64 __A, int const __N)
-{
- unsigned long element_selector_10 = __N & 0x03;
- unsigned long element_selector_32 = (__N >> 2) & 0x03;
- unsigned long element_selector_54 = (__N >> 4) & 0x03;
- unsigned long element_selector_76 = (__N >> 6) & 0x03;
- static const unsigned short permute_selectors[4] =
- {
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_shuffle_pi16(__m64 __A, int const __N) {
+ unsigned long __element_selector_10 = __N & 0x03;
+ unsigned long __element_selector_32 = (__N >> 2) & 0x03;
+ unsigned long __element_selector_54 = (__N >> 4) & 0x03;
+ unsigned long __element_selector_76 = (__N >> 6) & 0x03;
+ static const unsigned short __permute_selectors[4] = {
#ifdef __LITTLE_ENDIAN__
- 0x0908, 0x0B0A, 0x0D0C, 0x0F0E
+ 0x0908, 0x0B0A, 0x0D0C, 0x0F0E
#else
- 0x0607, 0x0405, 0x0203, 0x0001
+ 0x0607, 0x0405, 0x0203, 0x0001
#endif
- };
- __m64_union t;
- __vector unsigned long long a, p, r;
+ };
+ __m64_union __t;
+ __vector unsigned long long __a, __p, __r;
#ifdef __LITTLE_ENDIAN__
- t.as_short[0] = permute_selectors[element_selector_10];
- t.as_short[1] = permute_selectors[element_selector_32];
- t.as_short[2] = permute_selectors[element_selector_54];
- t.as_short[3] = permute_selectors[element_selector_76];
+ __t.as_short[0] = __permute_selectors[__element_selector_10];
+ __t.as_short[1] = __permute_selectors[__element_selector_32];
+ __t.as_short[2] = __permute_selectors[__element_selector_54];
+ __t.as_short[3] = __permute_selectors[__element_selector_76];
#else
- t.as_short[3] = permute_selectors[element_selector_10];
- t.as_short[2] = permute_selectors[element_selector_32];
- t.as_short[1] = permute_selectors[element_selector_54];
- t.as_short[0] = permute_selectors[element_selector_76];
+ __t.as_short[3] = __permute_selectors[__element_selector_10];
+ __t.as_short[2] = __permute_selectors[__element_selector_32];
+ __t.as_short[1] = __permute_selectors[__element_selector_54];
+ __t.as_short[0] = __permute_selectors[__element_selector_76];
#endif
- p = vec_splats (t.as_m64);
- a = vec_splats (__A);
- r = vec_perm (a, a, (__vector unsigned char)p);
- return (__m64) ((__vector long long) r)[0];
+ __p = vec_splats(__t.as_m64);
+ __a = vec_splats(__A);
+ __r = vec_perm(__a, __a, (__vector unsigned char)__p);
+ return (__m64)((__vector long long)__r)[0];
}
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pshufw (__m64 __A, int const __N)
-{
- return _mm_shuffle_pi16 (__A, __N);
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pshufw(__m64 __A, int const __N) {
+ return _mm_shuffle_pi16(__A, __N);
}
/* Conditionally store byte elements of A into P. The high bit of each
byte in the selector N determines whether the corresponding byte from
A is stored. */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_maskmove_si64 (__m64 __A, __m64 __N, char *__P)
-{
- __m64 hibit = 0x8080808080808080UL;
- __m64 mask, tmp;
- __m64 *p = (__m64*)__P;
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_maskmove_si64(__m64 __A, __m64 __N, char *__P) {
+ __m64 __hibit = 0x8080808080808080UL;
+ __m64 __mask, __tmp;
+ __m64 *__p = (__m64 *)__P;
- tmp = *p;
- mask = _mm_cmpeq_pi8 ((__N & hibit), hibit);
- tmp = (tmp & (~mask)) | (__A & mask);
- *p = tmp;
+ __tmp = *__p;
+ __mask = _mm_cmpeq_pi8((__N & __hibit), __hibit);
+ __tmp = (__tmp & (~__mask)) | (__A & __mask);
+ *__p = __tmp;
}
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_maskmovq (__m64 __A, __m64 __N, char *__P)
-{
- _mm_maskmove_si64 (__A, __N, __P);
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_maskmovq(__m64 __A, __m64 __N, char *__P) {
+ _mm_maskmove_si64(__A, __N, __P);
}
/* Compute the rounded averages of the unsigned 8-bit values in A and B. */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_avg_pu8 (__m64 __A, __m64 __B)
-{
- __vector unsigned char a, b, c;
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_avg_pu8(__m64 __A, __m64 __B) {
+ __vector unsigned char __a, __b, __c;
- a = (__vector unsigned char)vec_splats (__A);
- b = (__vector unsigned char)vec_splats (__B);
- c = vec_avg (a, b);
- return (__m64) ((__vector long long) c)[0];
+ __a = (__vector unsigned char)vec_splats(__A);
+ __b = (__vector unsigned char)vec_splats(__B);
+ __c = vec_avg(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
}
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pavgb (__m64 __A, __m64 __B)
-{
- return _mm_avg_pu8 (__A, __B);
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pavgb(__m64 __A, __m64 __B) {
+ return _mm_avg_pu8(__A, __B);
}
/* Compute the rounded averages of the unsigned 16-bit values in A and B. */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_avg_pu16 (__m64 __A, __m64 __B)
-{
- __vector unsigned short a, b, c;
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_avg_pu16(__m64 __A, __m64 __B) {
+ __vector unsigned short __a, __b, __c;
- a = (__vector unsigned short)vec_splats (__A);
- b = (__vector unsigned short)vec_splats (__B);
- c = vec_avg (a, b);
- return (__m64) ((__vector long long) c)[0];
+ __a = (__vector unsigned short)vec_splats(__A);
+ __b = (__vector unsigned short)vec_splats(__B);
+ __c = vec_avg(__a, __b);
+ return (__m64)((__vector long long)__c)[0];
}
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_pavgw (__m64 __A, __m64 __B)
-{
- return _mm_avg_pu16 (__A, __B);
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_pavgw(__m64 __A, __m64 __B) {
+ return _mm_avg_pu16(__A, __B);
}
/* Compute the sum of the absolute differences of the unsigned 8-bit
values in A and B. Return the value in the lower 16-bit word; the
upper words are cleared. */
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sad_pu8 (__m64 __A, __m64 __B)
-{
- __vector unsigned char a, b;
- __vector unsigned char vmin, vmax, vabsdiff;
- __vector signed int vsum;
- const __vector unsigned int zero =
- { 0, 0, 0, 0 };
- __m64_union result = {0};
-
- a = (__vector unsigned char) (__vector unsigned long long) { 0UL, __A };
- b = (__vector unsigned char) (__vector unsigned long long) { 0UL, __B };
- vmin = vec_min (a, b);
- vmax = vec_max (a, b);
- vabsdiff = vec_sub (vmax, vmin);
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sad_pu8(__m64 __A, __m64 __B) {
+ __vector unsigned char __a, __b;
+ __vector unsigned char __vmin, __vmax, __vabsdiff;
+ __vector signed int __vsum;
+ const __vector unsigned int __zero = {0, 0, 0, 0};
+ __m64_union __result = {0};
+
+ __a = (__vector unsigned char)(__vector unsigned long long){0UL, __A};
+ __b = (__vector unsigned char)(__vector unsigned long long){0UL, __B};
+ __vmin = vec_min(__a, __b);
+ __vmax = vec_max(__a, __b);
+ __vabsdiff = vec_sub(__vmax, __vmin);
/* Sum four groups of bytes into integers. */
- vsum = (__vector signed int) vec_sum4s (vabsdiff, zero);
+ __vsum = (__vector signed int)vec_sum4s(__vabsdiff, __zero);
/* Sum across four integers with integer result. */
- vsum = vec_sums (vsum, (__vector signed int) zero);
+ __vsum = vec_sums(__vsum, (__vector signed int)__zero);
/* The sum is in the right most 32-bits of the vector result.
Transfer to a GPR and truncate to 16 bits. */
- result.as_short[0] = vsum[3];
- return result.as_m64;
+ __result.as_short[0] = __vsum[3];
+ return __result.as_m64;
}
-extern __inline __m64 __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_m_psadbw (__m64 __A, __m64 __B)
-{
- return _mm_sad_pu8 (__A, __B);
+extern __inline __m64
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _m_psadbw(__m64 __A, __m64 __B) {
+ return _mm_sad_pu8(__A, __B);
}
/* Stores the data in A to the address P without polluting the caches. */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_stream_pi (__m64 *__P, __m64 __A)
-{
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_stream_pi(__m64 *__P, __m64 __A) {
/* Use the data cache block touch for store transient. */
- __asm__ (
- " dcbtstt 0,%0"
- :
- : "b" (__P)
- : "memory"
- );
+ __asm__(" dcbtstt 0,%0" : : "b"(__P) : "memory");
*__P = __A;
}
/* Likewise. The address must be 16-byte aligned. */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_stream_ps (float *__P, __m128 __A)
-{
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_stream_ps(float *__P, __m128 __A) {
/* Use the data cache block touch for store transient. */
- __asm__ (
- " dcbtstt 0,%0"
- :
- : "b" (__P)
- : "memory"
- );
- _mm_store_ps (__P, __A);
+ __asm__(" dcbtstt 0,%0" : : "b"(__P) : "memory");
+ _mm_store_ps(__P, __A);
}
/* Guarantees that every preceding store is globally visible before
any subsequent store. */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_sfence (void)
-{
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_sfence(void) {
/* Generate a light weight sync. */
- __atomic_thread_fence (__ATOMIC_RELEASE);
+ __atomic_thread_fence(__ATOMIC_RELEASE);
}
/* The execution of the next instruction is delayed by an implementation
@@ -1779,9 +1763,9 @@ _mm_sfence (void)
architectural state. This is after the pop_options pragma because
it does not require SSE support in the processor--the encoding is a
nop on processors that do not support it. */
-extern __inline void __attribute__((__gnu_inline__, __always_inline__, __artificial__))
-_mm_pause (void)
-{
+extern __inline void
+ __attribute__((__gnu_inline__, __always_inline__, __artificial__))
+ _mm_pause(void) {
/* There is no exact match with this construct, but the following is
close to the desired effect. */
#if _ARCH_PWR8
@@ -1797,41 +1781,39 @@ _mm_pause (void)
PRI and continue execution. */
unsigned long __PPR;
- __asm__ volatile (
- " mfppr %0;"
- " or 31,31,31;"
- " isync;"
- " lwsync;"
- " isync;"
- " mtppr %0;"
- : "=r" (__PPR)
- :
- : "memory"
- );
+ __asm__ volatile(" mfppr %0;"
+ " or 31,31,31;"
+ " isync;"
+ " lwsync;"
+ " isync;"
+ " mtppr %0;"
+ : "=r"(__PPR)
+ :
+ : "memory");
#else
/* For older processor where we may not even have Program Priority
controls we can only depend on Heavy Weight Sync. */
- __atomic_thread_fence (__ATOMIC_SEQ_CST);
+ __atomic_thread_fence(__ATOMIC_SEQ_CST);
#endif
}
/* Transpose the 4x4 matrix composed of row[0-3]. */
-#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
-do { \
- __v4sf __r0 = (row0), __r1 = (row1), __r2 = (row2), __r3 = (row3); \
- __v4sf __t0 = vec_vmrghw (__r0, __r1); \
- __v4sf __t1 = vec_vmrghw (__r2, __r3); \
- __v4sf __t2 = vec_vmrglw (__r0, __r1); \
- __v4sf __t3 = vec_vmrglw (__r2, __r3); \
- (row0) = (__v4sf)vec_mergeh ((__vector long long)__t0, \
- (__vector long long)__t1); \
- (row1) = (__v4sf)vec_mergel ((__vector long long)__t0, \
- (__vector long long)__t1); \
- (row2) = (__v4sf)vec_mergeh ((__vector long long)__t2, \
- (__vector long long)__t3); \
- (row3) = (__v4sf)vec_mergel ((__vector long long)__t2, \
- (__vector long long)__t3); \
-} while (0)
+#define _MM_TRANSPOSE4_PS(row0, row1, row2, row3) \
+ do { \
+ __v4sf __r0 = (row0), __r1 = (row1), __r2 = (row2), __r3 = (row3); \
+ __v4sf __t0 = vec_vmrghw(__r0, __r1); \
+ __v4sf __t1 = vec_vmrghw(__r2, __r3); \
+ __v4sf __t2 = vec_vmrglw(__r0, __r1); \
+ __v4sf __t3 = vec_vmrglw(__r2, __r3); \
+ (row0) = (__v4sf)vec_mergeh((__vector long long)__t0, \
+ (__vector long long)__t1); \
+ (row1) = (__v4sf)vec_mergel((__vector long long)__t0, \
+ (__vector long long)__t1); \
+ (row2) = (__v4sf)vec_mergeh((__vector long long)__t2, \
+ (__vector long long)__t3); \
+ (row3) = (__v4sf)vec_mergel((__vector long long)__t2, \
+ (__vector long long)__t3); \
+ } while (0)
/* For backward source compatibility. */
//# include <emmintrin.h>
@@ -1841,4 +1823,4 @@ do { \
#endif /* defined(__ppc64__) && (defined(__linux__) || defined(__FreeBSD__)) \
*/
-#endif /* _XMMINTRIN_H_INCLUDED */
+#endif /* XMMINTRIN_H_ */
diff --git a/contrib/llvm-project/clang/lib/Headers/rdseedintrin.h b/contrib/llvm-project/clang/lib/Headers/rdseedintrin.h
index ccb3d2dd2294..405bc2451eb8 100644
--- a/contrib/llvm-project/clang/lib/Headers/rdseedintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/rdseedintrin.h
@@ -20,20 +20,20 @@
static __inline__ int __DEFAULT_FN_ATTRS
_rdseed16_step(unsigned short *__p)
{
- return __builtin_ia32_rdseed16_step(__p);
+ return (int) __builtin_ia32_rdseed16_step(__p);
}
static __inline__ int __DEFAULT_FN_ATTRS
_rdseed32_step(unsigned int *__p)
{
- return __builtin_ia32_rdseed32_step(__p);
+ return (int) __builtin_ia32_rdseed32_step(__p);
}
#ifdef __x86_64__
static __inline__ int __DEFAULT_FN_ATTRS
_rdseed64_step(unsigned long long *__p)
{
- return __builtin_ia32_rdseed64_step(__p);
+ return (int) __builtin_ia32_rdseed64_step(__p);
}
#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/rtmintrin.h b/contrib/llvm-project/clang/lib/Headers/rtmintrin.h
index 36ff5835173f..a3ec81e3f740 100644
--- a/contrib/llvm-project/clang/lib/Headers/rtmintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/rtmintrin.h
@@ -29,7 +29,7 @@
static __inline__ unsigned int __DEFAULT_FN_ATTRS
_xbegin(void)
{
- return __builtin_ia32_xbegin();
+ return (unsigned int)__builtin_ia32_xbegin();
}
static __inline__ void __DEFAULT_FN_ATTRS
diff --git a/contrib/llvm-project/clang/lib/Headers/smmintrin.h b/contrib/llvm-project/clang/lib/Headers/smmintrin.h
index 0df59c5fcc59..46fb7bcd4e09 100644
--- a/contrib/llvm-project/clang/lib/Headers/smmintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/smmintrin.h
@@ -17,23 +17,25 @@
#include <tmmintrin.h>
/* Define the default attributes for the functions in this file. */
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4.1"), __min_vector_width__(128)))
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, __target__("sse4.1"), \
+ __min_vector_width__(128)))
/* SSE4 Rounding macros. */
-#define _MM_FROUND_TO_NEAREST_INT 0x00
-#define _MM_FROUND_TO_NEG_INF 0x01
-#define _MM_FROUND_TO_POS_INF 0x02
-#define _MM_FROUND_TO_ZERO 0x03
-#define _MM_FROUND_CUR_DIRECTION 0x04
-
-#define _MM_FROUND_RAISE_EXC 0x00
-#define _MM_FROUND_NO_EXC 0x08
-
-#define _MM_FROUND_NINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEAREST_INT)
-#define _MM_FROUND_FLOOR (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEG_INF)
-#define _MM_FROUND_CEIL (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_POS_INF)
-#define _MM_FROUND_TRUNC (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_ZERO)
-#define _MM_FROUND_RINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_CUR_DIRECTION)
+#define _MM_FROUND_TO_NEAREST_INT 0x00
+#define _MM_FROUND_TO_NEG_INF 0x01
+#define _MM_FROUND_TO_POS_INF 0x02
+#define _MM_FROUND_TO_ZERO 0x03
+#define _MM_FROUND_CUR_DIRECTION 0x04
+
+#define _MM_FROUND_RAISE_EXC 0x00
+#define _MM_FROUND_NO_EXC 0x08
+
+#define _MM_FROUND_NINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEAREST_INT)
+#define _MM_FROUND_FLOOR (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEG_INF)
+#define _MM_FROUND_CEIL (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_POS_INF)
+#define _MM_FROUND_TRUNC (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_ZERO)
+#define _MM_FROUND_RINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_CUR_DIRECTION)
#define _MM_FROUND_NEARBYINT (_MM_FROUND_NO_EXC | _MM_FROUND_CUR_DIRECTION)
/// Rounds up each element of the 128-bit vector of [4 x float] to an
@@ -51,7 +53,7 @@
/// \param X
/// A 128-bit vector of [4 x float] values to be rounded up.
/// \returns A 128-bit vector of [4 x float] containing the rounded values.
-#define _mm_ceil_ps(X) _mm_round_ps((X), _MM_FROUND_CEIL)
+#define _mm_ceil_ps(X) _mm_round_ps((X), _MM_FROUND_CEIL)
/// Rounds up each element of the 128-bit vector of [2 x double] to an
/// integer and returns the rounded values in a 128-bit vector of
@@ -68,7 +70,7 @@
/// \param X
/// A 128-bit vector of [2 x double] values to be rounded up.
/// \returns A 128-bit vector of [2 x double] containing the rounded values.
-#define _mm_ceil_pd(X) _mm_round_pd((X), _MM_FROUND_CEIL)
+#define _mm_ceil_pd(X) _mm_round_pd((X), _MM_FROUND_CEIL)
/// Copies three upper elements of the first 128-bit vector operand to
/// the corresponding three upper elements of the 128-bit result vector of
@@ -93,7 +95,7 @@
/// of the result.
/// \returns A 128-bit vector of [4 x float] containing the copied and rounded
/// values.
-#define _mm_ceil_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_CEIL)
+#define _mm_ceil_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_CEIL)
/// Copies the upper element of the first 128-bit vector operand to the
/// corresponding upper element of the 128-bit result vector of [2 x double].
@@ -118,7 +120,7 @@
/// of the result.
/// \returns A 128-bit vector of [2 x double] containing the copied and rounded
/// values.
-#define _mm_ceil_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_CEIL)
+#define _mm_ceil_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_CEIL)
/// Rounds down each element of the 128-bit vector of [4 x float] to an
/// an integer and returns the rounded values in a 128-bit vector of
@@ -135,7 +137,7 @@
/// \param X
/// A 128-bit vector of [4 x float] values to be rounded down.
/// \returns A 128-bit vector of [4 x float] containing the rounded values.
-#define _mm_floor_ps(X) _mm_round_ps((X), _MM_FROUND_FLOOR)
+#define _mm_floor_ps(X) _mm_round_ps((X), _MM_FROUND_FLOOR)
/// Rounds down each element of the 128-bit vector of [2 x double] to an
/// integer and returns the rounded values in a 128-bit vector of
@@ -152,7 +154,7 @@
/// \param X
/// A 128-bit vector of [2 x double].
/// \returns A 128-bit vector of [2 x double] containing the rounded values.
-#define _mm_floor_pd(X) _mm_round_pd((X), _MM_FROUND_FLOOR)
+#define _mm_floor_pd(X) _mm_round_pd((X), _MM_FROUND_FLOOR)
/// Copies three upper elements of the first 128-bit vector operand to
/// the corresponding three upper elements of the 128-bit result vector of
@@ -177,7 +179,7 @@
/// of the result.
/// \returns A 128-bit vector of [4 x float] containing the copied and rounded
/// values.
-#define _mm_floor_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_FLOOR)
+#define _mm_floor_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_FLOOR)
/// Copies the upper element of the first 128-bit vector operand to the
/// corresponding upper element of the 128-bit result vector of [2 x double].
@@ -202,7 +204,7 @@
/// of the result.
/// \returns A 128-bit vector of [2 x double] containing the copied and rounded
/// values.
-#define _mm_floor_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_FLOOR)
+#define _mm_floor_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_FLOOR)
/// Rounds each element of the 128-bit vector of [4 x float] to an
/// integer value according to the rounding control specified by the second
@@ -234,7 +236,7 @@
/// 10: Upward (toward positive infinity) \n
/// 11: Truncated
/// \returns A 128-bit vector of [4 x float] containing the rounded values.
-#define _mm_round_ps(X, M) \
+#define _mm_round_ps(X, M) \
((__m128)__builtin_ia32_roundps((__v4sf)(__m128)(X), (M)))
/// Copies three upper elements of the first 128-bit vector operand to
@@ -275,9 +277,9 @@
/// 11: Truncated
/// \returns A 128-bit vector of [4 x float] containing the copied and rounded
/// values.
-#define _mm_round_ss(X, Y, M) \
- ((__m128)__builtin_ia32_roundss((__v4sf)(__m128)(X), \
- (__v4sf)(__m128)(Y), (M)))
+#define _mm_round_ss(X, Y, M) \
+ ((__m128)__builtin_ia32_roundss((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), \
+ (M)))
/// Rounds each element of the 128-bit vector of [2 x double] to an
/// integer value according to the rounding control specified by the second
@@ -309,7 +311,7 @@
/// 10: Upward (toward positive infinity) \n
/// 11: Truncated
/// \returns A 128-bit vector of [2 x double] containing the rounded values.
-#define _mm_round_pd(X, M) \
+#define _mm_round_pd(X, M) \
((__m128d)__builtin_ia32_roundpd((__v2df)(__m128d)(X), (M)))
/// Copies the upper element of the first 128-bit vector operand to the
@@ -350,9 +352,9 @@
/// 11: Truncated
/// \returns A 128-bit vector of [2 x double] containing the copied and rounded
/// values.
-#define _mm_round_sd(X, Y, M) \
- ((__m128d)__builtin_ia32_roundsd((__v2df)(__m128d)(X), \
- (__v2df)(__m128d)(Y), (M)))
+#define _mm_round_sd(X, Y, M) \
+ ((__m128d)__builtin_ia32_roundsd((__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), \
+ (M)))
/* SSE4 Packed Blending Intrinsics. */
/// Returns a 128-bit vector of [2 x double] where the values are
@@ -379,9 +381,9 @@
/// When a mask bit is 1, the corresponding 64-bit element in operand \a V2
/// is copied to the same position in the result.
/// \returns A 128-bit vector of [2 x double] containing the copied values.
-#define _mm_blend_pd(V1, V2, M) \
- ((__m128d) __builtin_ia32_blendpd ((__v2df)(__m128d)(V1), \
- (__v2df)(__m128d)(V2), (int)(M)))
+#define _mm_blend_pd(V1, V2, M) \
+ ((__m128d)__builtin_ia32_blendpd((__v2df)(__m128d)(V1), \
+ (__v2df)(__m128d)(V2), (int)(M)))
/// Returns a 128-bit vector of [4 x float] where the values are selected
/// from either the first or second operand as specified by the third
@@ -407,9 +409,9 @@
/// When a mask bit is 1, the corresponding 32-bit element in operand \a V2
/// is copied to the same position in the result.
/// \returns A 128-bit vector of [4 x float] containing the copied values.
-#define _mm_blend_ps(V1, V2, M) \
- ((__m128) __builtin_ia32_blendps ((__v4sf)(__m128)(V1), \
- (__v4sf)(__m128)(V2), (int)(M)))
+#define _mm_blend_ps(V1, V2, M) \
+ ((__m128)__builtin_ia32_blendps((__v4sf)(__m128)(V1), (__v4sf)(__m128)(V2), \
+ (int)(M)))
/// Returns a 128-bit vector of [2 x double] where the values are
/// selected from either the first or second operand as specified by the
@@ -431,11 +433,11 @@
/// position in the result. When a mask bit is 1, the corresponding 64-bit
/// element in operand \a __V2 is copied to the same position in the result.
/// \returns A 128-bit vector of [2 x double] containing the copied values.
-static __inline__ __m128d __DEFAULT_FN_ATTRS
-_mm_blendv_pd (__m128d __V1, __m128d __V2, __m128d __M)
-{
- return (__m128d) __builtin_ia32_blendvpd ((__v2df)__V1, (__v2df)__V2,
- (__v2df)__M);
+static __inline__ __m128d __DEFAULT_FN_ATTRS _mm_blendv_pd(__m128d __V1,
+ __m128d __V2,
+ __m128d __M) {
+ return (__m128d)__builtin_ia32_blendvpd((__v2df)__V1, (__v2df)__V2,
+ (__v2df)__M);
}
/// Returns a 128-bit vector of [4 x float] where the values are
@@ -458,11 +460,11 @@ _mm_blendv_pd (__m128d __V1, __m128d __V2, __m128d __M)
/// position in the result. When a mask bit is 1, the corresponding 32-bit
/// element in operand \a __V2 is copied to the same position in the result.
/// \returns A 128-bit vector of [4 x float] containing the copied values.
-static __inline__ __m128 __DEFAULT_FN_ATTRS
-_mm_blendv_ps (__m128 __V1, __m128 __V2, __m128 __M)
-{
- return (__m128) __builtin_ia32_blendvps ((__v4sf)__V1, (__v4sf)__V2,
- (__v4sf)__M);
+static __inline__ __m128 __DEFAULT_FN_ATTRS _mm_blendv_ps(__m128 __V1,
+ __m128 __V2,
+ __m128 __M) {
+ return (__m128)__builtin_ia32_blendvps((__v4sf)__V1, (__v4sf)__V2,
+ (__v4sf)__M);
}
/// Returns a 128-bit vector of [16 x i8] where the values are selected
@@ -485,11 +487,11 @@ _mm_blendv_ps (__m128 __V1, __m128 __V2, __m128 __M)
/// position in the result. When a mask bit is 1, the corresponding 8-bit
/// element in operand \a __V2 is copied to the same position in the result.
/// \returns A 128-bit vector of [16 x i8] containing the copied values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_blendv_epi8 (__m128i __V1, __m128i __V2, __m128i __M)
-{
- return (__m128i) __builtin_ia32_pblendvb128 ((__v16qi)__V1, (__v16qi)__V2,
- (__v16qi)__M);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_blendv_epi8(__m128i __V1,
+ __m128i __V2,
+ __m128i __M) {
+ return (__m128i)__builtin_ia32_pblendvb128((__v16qi)__V1, (__v16qi)__V2,
+ (__v16qi)__M);
}
/// Returns a 128-bit vector of [8 x i16] where the values are selected
@@ -516,9 +518,9 @@ _mm_blendv_epi8 (__m128i __V1, __m128i __V2, __m128i __M)
/// When a mask bit is 1, the corresponding 16-bit element in operand \a V2
/// is copied to the same position in the result.
/// \returns A 128-bit vector of [8 x i16] containing the copied values.
-#define _mm_blend_epi16(V1, V2, M) \
- ((__m128i) __builtin_ia32_pblendw128 ((__v8hi)(__m128i)(V1), \
- (__v8hi)(__m128i)(V2), (int)(M)))
+#define _mm_blend_epi16(V1, V2, M) \
+ ((__m128i)__builtin_ia32_pblendw128((__v8hi)(__m128i)(V1), \
+ (__v8hi)(__m128i)(V2), (int)(M)))
/* SSE4 Dword Multiply Instructions. */
/// Multiples corresponding elements of two 128-bit vectors of [4 x i32]
@@ -534,10 +536,9 @@ _mm_blendv_epi8 (__m128i __V1, __m128i __V2, __m128i __M)
/// \param __V2
/// A 128-bit integer vector.
/// \returns A 128-bit integer vector containing the products of both operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mullo_epi32 (__m128i __V1, __m128i __V2)
-{
- return (__m128i) ((__v4su)__V1 * (__v4su)__V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mullo_epi32(__m128i __V1,
+ __m128i __V2) {
+ return (__m128i)((__v4su)__V1 * (__v4su)__V2);
}
/// Multiplies corresponding even-indexed elements of two 128-bit
@@ -554,10 +555,9 @@ _mm_mullo_epi32 (__m128i __V1, __m128i __V2)
/// A 128-bit vector of [4 x i32].
/// \returns A 128-bit vector of [2 x i64] containing the products of both
/// operands.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_mul_epi32 (__m128i __V1, __m128i __V2)
-{
- return (__m128i) __builtin_ia32_pmuldq128 ((__v4si)__V1, (__v4si)__V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_mul_epi32(__m128i __V1,
+ __m128i __V2) {
+ return (__m128i)__builtin_ia32_pmuldq128((__v4si)__V1, (__v4si)__V2);
}
/* SSE4 Floating Point Dot Product Instructions. */
@@ -593,9 +593,8 @@ _mm_mul_epi32 (__m128i __V1, __m128i __V2)
/// each [4 x float] subvector. If a bit is set, the dot product is returned
/// in the corresponding element; otherwise that element is set to zero.
/// \returns A 128-bit vector of [4 x float] containing the dot product.
-#define _mm_dp_ps(X, Y, M) \
- ((__m128) __builtin_ia32_dpps((__v4sf)(__m128)(X), \
- (__v4sf)(__m128)(Y), (M)))
+#define _mm_dp_ps(X, Y, M) \
+ ((__m128)__builtin_ia32_dpps((__v4sf)(__m128)(X), (__v4sf)(__m128)(Y), (M)))
/// Computes the dot product of the two 128-bit vectors of [2 x double]
/// and returns it in the elements of the 128-bit result vector of
@@ -628,9 +627,9 @@ _mm_mul_epi32 (__m128i __V1, __m128i __V2)
/// to the lowest element and bit [1] corresponding to the highest element of
/// each [2 x double] vector. If a bit is set, the dot product is returned in
/// the corresponding element; otherwise that element is set to zero.
-#define _mm_dp_pd(X, Y, M) \
- ((__m128d) __builtin_ia32_dppd((__v2df)(__m128d)(X), \
- (__v2df)(__m128d)(Y), (M)))
+#define _mm_dp_pd(X, Y, M) \
+ ((__m128d)__builtin_ia32_dppd((__v2df)(__m128d)(X), (__v2df)(__m128d)(Y), \
+ (M)))
/* SSE4 Streaming Load Hint Instruction. */
/// Loads integer values from a 128-bit aligned memory location to a
@@ -645,10 +644,9 @@ _mm_mul_epi32 (__m128i __V1, __m128i __V2)
/// values.
/// \returns A 128-bit integer vector containing the data stored at the
/// specified memory location.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_stream_load_si128 (__m128i const *__V)
-{
- return (__m128i) __builtin_nontemporal_load ((const __v2di *) __V);
+static __inline__ __m128i __DEFAULT_FN_ATTRS
+_mm_stream_load_si128(__m128i const *__V) {
+ return (__m128i)__builtin_nontemporal_load((const __v2di *)__V);
}
/* SSE4 Packed Integer Min/Max Instructions. */
@@ -665,10 +663,9 @@ _mm_stream_load_si128 (__m128i const *__V)
/// \param __V2
/// A 128-bit vector of [16 x i8]
/// \returns A 128-bit vector of [16 x i8] containing the lesser values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_min_epi8 (__m128i __V1, __m128i __V2)
-{
- return (__m128i) __builtin_elementwise_min((__v16qs) __V1, (__v16qs) __V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epi8(__m128i __V1,
+ __m128i __V2) {
+ return (__m128i)__builtin_elementwise_min((__v16qs)__V1, (__v16qs)__V2);
}
/// Compares the corresponding elements of two 128-bit vectors of
@@ -684,10 +681,9 @@ _mm_min_epi8 (__m128i __V1, __m128i __V2)
/// \param __V2
/// A 128-bit vector of [16 x i8].
/// \returns A 128-bit vector of [16 x i8] containing the greater values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_max_epi8 (__m128i __V1, __m128i __V2)
-{
- return (__m128i) __builtin_elementwise_max((__v16qs) __V1, (__v16qs) __V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epi8(__m128i __V1,
+ __m128i __V2) {
+ return (__m128i)__builtin_elementwise_max((__v16qs)__V1, (__v16qs)__V2);
}
/// Compares the corresponding elements of two 128-bit vectors of
@@ -703,10 +699,9 @@ _mm_max_epi8 (__m128i __V1, __m128i __V2)
/// \param __V2
/// A 128-bit vector of [8 x u16].
/// \returns A 128-bit vector of [8 x u16] containing the lesser values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_min_epu16 (__m128i __V1, __m128i __V2)
-{
- return (__m128i) __builtin_elementwise_min((__v8hu) __V1, (__v8hu) __V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epu16(__m128i __V1,
+ __m128i __V2) {
+ return (__m128i)__builtin_elementwise_min((__v8hu)__V1, (__v8hu)__V2);
}
/// Compares the corresponding elements of two 128-bit vectors of
@@ -722,10 +717,9 @@ _mm_min_epu16 (__m128i __V1, __m128i __V2)
/// \param __V2
/// A 128-bit vector of [8 x u16].
/// \returns A 128-bit vector of [8 x u16] containing the greater values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_max_epu16 (__m128i __V1, __m128i __V2)
-{
- return (__m128i) __builtin_elementwise_max((__v8hu) __V1, (__v8hu) __V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epu16(__m128i __V1,
+ __m128i __V2) {
+ return (__m128i)__builtin_elementwise_max((__v8hu)__V1, (__v8hu)__V2);
}
/// Compares the corresponding elements of two 128-bit vectors of
@@ -741,10 +735,9 @@ _mm_max_epu16 (__m128i __V1, __m128i __V2)
/// \param __V2
/// A 128-bit vector of [4 x i32].
/// \returns A 128-bit vector of [4 x i32] containing the lesser values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_min_epi32 (__m128i __V1, __m128i __V2)
-{
- return (__m128i) __builtin_elementwise_min((__v4si) __V1, (__v4si) __V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epi32(__m128i __V1,
+ __m128i __V2) {
+ return (__m128i)__builtin_elementwise_min((__v4si)__V1, (__v4si)__V2);
}
/// Compares the corresponding elements of two 128-bit vectors of
@@ -760,10 +753,9 @@ _mm_min_epi32 (__m128i __V1, __m128i __V2)
/// \param __V2
/// A 128-bit vector of [4 x i32].
/// \returns A 128-bit vector of [4 x i32] containing the greater values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_max_epi32 (__m128i __V1, __m128i __V2)
-{
- return (__m128i) __builtin_elementwise_max((__v4si) __V1, (__v4si) __V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epi32(__m128i __V1,
+ __m128i __V2) {
+ return (__m128i)__builtin_elementwise_max((__v4si)__V1, (__v4si)__V2);
}
/// Compares the corresponding elements of two 128-bit vectors of
@@ -779,10 +771,9 @@ _mm_max_epi32 (__m128i __V1, __m128i __V2)
/// \param __V2
/// A 128-bit vector of [4 x u32].
/// \returns A 128-bit vector of [4 x u32] containing the lesser values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_min_epu32 (__m128i __V1, __m128i __V2)
-{
- return (__m128i) __builtin_elementwise_min((__v4su) __V1, (__v4su) __V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_min_epu32(__m128i __V1,
+ __m128i __V2) {
+ return (__m128i)__builtin_elementwise_min((__v4su)__V1, (__v4su)__V2);
}
/// Compares the corresponding elements of two 128-bit vectors of
@@ -798,10 +789,9 @@ _mm_min_epu32 (__m128i __V1, __m128i __V2)
/// \param __V2
/// A 128-bit vector of [4 x u32].
/// \returns A 128-bit vector of [4 x u32] containing the greater values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_max_epu32 (__m128i __V1, __m128i __V2)
-{
- return (__m128i) __builtin_elementwise_max((__v4su) __V1, (__v4su) __V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_max_epu32(__m128i __V1,
+ __m128i __V2) {
+ return (__m128i)__builtin_elementwise_max((__v4su)__V1, (__v4su)__V2);
}
/* SSE4 Insertion and Extraction from XMM Register Instructions. */
@@ -869,21 +859,24 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// 10: Bits [95:64] of parameter \a X are returned. \n
/// 11: Bits [127:96] of parameter \a X are returned.
/// \returns A 32-bit integer containing the extracted 32 bits of float data.
-#define _mm_extract_ps(X, N) \
- __builtin_bit_cast(int, __builtin_ia32_vec_ext_v4sf((__v4sf)(__m128)(X), (int)(N)))
+#define _mm_extract_ps(X, N) \
+ __builtin_bit_cast( \
+ int, __builtin_ia32_vec_ext_v4sf((__v4sf)(__m128)(X), (int)(N)))
/* Miscellaneous insert and extract macros. */
/* Extract a single-precision float from X at index N into D. */
-#define _MM_EXTRACT_FLOAT(D, X, N) \
- do { (D) = __builtin_ia32_vec_ext_v4sf((__v4sf)(__m128)(X), (int)(N)); } while (0)
+#define _MM_EXTRACT_FLOAT(D, X, N) \
+ do { \
+ (D) = __builtin_ia32_vec_ext_v4sf((__v4sf)(__m128)(X), (int)(N)); \
+ } while (0)
/* Or together 2 sets of indexes (X and Y) with the zeroing bits (Z) to create
an index suitable for _mm_insert_ps. */
#define _MM_MK_INSERTPS_NDX(X, Y, Z) (((X) << 6) | ((Y) << 4) | (Z))
/* Extract a float from X at index N into the first index of the return. */
-#define _MM_PICK_OUT_PS(X, N) _mm_insert_ps (_mm_setzero_ps(), (X), \
- _MM_MK_INSERTPS_NDX((N), 0, 0x0e))
+#define _MM_PICK_OUT_PS(X, N) \
+ _mm_insert_ps(_mm_setzero_ps(), (X), _MM_MK_INSERTPS_NDX((N), 0, 0x0e))
/* Insert int into packed integer array at index. */
/// Constructs a 128-bit vector of [16 x i8] by first making a copy of
@@ -926,9 +919,9 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// 1110: Bits [119:112] of the result are used for insertion. \n
/// 1111: Bits [127:120] of the result are used for insertion.
/// \returns A 128-bit integer vector containing the constructed values.
-#define _mm_insert_epi8(X, I, N) \
- ((__m128i)__builtin_ia32_vec_set_v16qi((__v16qi)(__m128i)(X), \
- (int)(I), (int)(N)))
+#define _mm_insert_epi8(X, I, N) \
+ ((__m128i)__builtin_ia32_vec_set_v16qi((__v16qi)(__m128i)(X), (int)(I), \
+ (int)(N)))
/// Constructs a 128-bit vector of [4 x i32] by first making a copy of
/// the 128-bit integer vector parameter, and then inserting the 32-bit
@@ -958,9 +951,9 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// 10: Bits [95:64] of the result are used for insertion. \n
/// 11: Bits [127:96] of the result are used for insertion.
/// \returns A 128-bit integer vector containing the constructed values.
-#define _mm_insert_epi32(X, I, N) \
- ((__m128i)__builtin_ia32_vec_set_v4si((__v4si)(__m128i)(X), \
- (int)(I), (int)(N)))
+#define _mm_insert_epi32(X, I, N) \
+ ((__m128i)__builtin_ia32_vec_set_v4si((__v4si)(__m128i)(X), (int)(I), \
+ (int)(N)))
#ifdef __x86_64__
/// Constructs a 128-bit vector of [2 x i64] by first making a copy of
@@ -989,9 +982,9 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// 0: Bits [63:0] of the result are used for insertion. \n
/// 1: Bits [127:64] of the result are used for insertion. \n
/// \returns A 128-bit integer vector containing the constructed values.
-#define _mm_insert_epi64(X, I, N) \
- ((__m128i)__builtin_ia32_vec_set_v2di((__v2di)(__m128i)(X), \
- (long long)(I), (int)(N)))
+#define _mm_insert_epi64(X, I, N) \
+ ((__m128i)__builtin_ia32_vec_set_v2di((__v2di)(__m128i)(X), (long long)(I), \
+ (int)(N)))
#endif /* __x86_64__ */
/* Extract int from packed integer array at index. This returns the element
@@ -1032,8 +1025,8 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// \returns An unsigned integer, whose lower 8 bits are selected from the
/// 128-bit integer vector parameter and the remaining bits are assigned
/// zeros.
-#define _mm_extract_epi8(X, N) \
- ((int)(unsigned char)__builtin_ia32_vec_ext_v16qi((__v16qi)(__m128i)(X), \
+#define _mm_extract_epi8(X, N) \
+ ((int)(unsigned char)__builtin_ia32_vec_ext_v16qi((__v16qi)(__m128i)(X), \
(int)(N)))
/// Extracts a 32-bit element from the 128-bit integer vector of
@@ -1058,10 +1051,9 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// 11: Bits [127:96] of the parameter \a X are exracted.
/// \returns An integer, whose lower 32 bits are selected from the 128-bit
/// integer vector parameter and the remaining bits are assigned zeros.
-#define _mm_extract_epi32(X, N) \
+#define _mm_extract_epi32(X, N) \
((int)__builtin_ia32_vec_ext_v4si((__v4si)(__m128i)(X), (int)(N)))
-#ifdef __x86_64__
/// Extracts a 64-bit element from the 128-bit integer vector of
/// [2 x i64], using the immediate value parameter \a N as a selector.
///
@@ -1071,7 +1063,8 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// long long _mm_extract_epi64(__m128i X, const int N);
/// \endcode
///
-/// This intrinsic corresponds to the <c> VPEXTRQ / PEXTRQ </c> instruction.
+/// This intrinsic corresponds to the <c> VPEXTRQ / PEXTRQ </c> instruction
+/// in 64-bit mode.
///
/// \param X
/// A 128-bit integer vector.
@@ -1081,9 +1074,8 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// 0: Bits [63:0] are returned. \n
/// 1: Bits [127:64] are returned. \n
/// \returns A 64-bit integer.
-#define _mm_extract_epi64(X, N) \
+#define _mm_extract_epi64(X, N) \
((long long)__builtin_ia32_vec_ext_v2di((__v2di)(__m128i)(X), (int)(N)))
-#endif /* __x86_64 */
/* SSE4 128-bit Packed Integer Comparisons. */
/// Tests whether the specified bits in a 128-bit integer vector are all
@@ -1098,9 +1090,8 @@ _mm_max_epu32 (__m128i __V1, __m128i __V2)
/// \param __V
/// A 128-bit integer vector selecting which bits to test in operand \a __M.
/// \returns TRUE if the specified bits are all zeros; FALSE otherwise.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_testz_si128(__m128i __M, __m128i __V)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_testz_si128(__m128i __M,
+ __m128i __V) {
return __builtin_ia32_ptestz128((__v2di)__M, (__v2di)__V);
}
@@ -1116,9 +1107,8 @@ _mm_testz_si128(__m128i __M, __m128i __V)
/// \param __V
/// A 128-bit integer vector selecting which bits to test in operand \a __M.
/// \returns TRUE if the specified bits are all ones; FALSE otherwise.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_testc_si128(__m128i __M, __m128i __V)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_testc_si128(__m128i __M,
+ __m128i __V) {
return __builtin_ia32_ptestc128((__v2di)__M, (__v2di)__V);
}
@@ -1135,9 +1125,8 @@ _mm_testc_si128(__m128i __M, __m128i __V)
/// A 128-bit integer vector selecting which bits to test in operand \a __M.
/// \returns TRUE if the specified bits are neither all zeros nor all ones;
/// FALSE otherwise.
-static __inline__ int __DEFAULT_FN_ATTRS
-_mm_testnzc_si128(__m128i __M, __m128i __V)
-{
+static __inline__ int __DEFAULT_FN_ATTRS _mm_testnzc_si128(__m128i __M,
+ __m128i __V) {
return __builtin_ia32_ptestnzc128((__v2di)__M, (__v2di)__V);
}
@@ -1193,7 +1182,7 @@ _mm_testnzc_si128(__m128i __M, __m128i __V)
/// \param V
/// A 128-bit integer vector selecting which bits to test in operand \a M.
/// \returns TRUE if the specified bits are all zeros; FALSE otherwise.
-#define _mm_test_all_zeros(M, V) _mm_testz_si128 ((M), (V))
+#define _mm_test_all_zeros(M, V) _mm_testz_si128((M), (V))
/* SSE4 64-bit Packed Integer Comparisons. */
/// Compares each of the corresponding 64-bit values of the 128-bit
@@ -1208,9 +1197,8 @@ _mm_testnzc_si128(__m128i __M, __m128i __V)
/// \param __V2
/// A 128-bit integer vector.
/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpeq_epi64(__m128i __V1, __m128i __V2)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpeq_epi64(__m128i __V1,
+ __m128i __V2) {
return (__m128i)((__v2di)__V1 == (__v2di)__V2);
}
@@ -1225,15 +1213,16 @@ _mm_cmpeq_epi64(__m128i __V1, __m128i __V2)
/// This intrinsic corresponds to the <c> VPMOVSXBW / PMOVSXBW </c> instruction.
///
/// \param __V
-/// A 128-bit vector of [16 x i8]. The lower eight 8-bit elements are sign-
-/// extended to 16-bit values.
+/// A 128-bit vector of [16 x i8]. The lower eight 8-bit elements are
+/// sign-extended to 16-bit values.
/// \returns A 128-bit vector of [8 x i16] containing the sign-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepi8_epi16(__m128i __V)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi16(__m128i __V) {
/* This function always performs a signed extension, but __v16qi is a char
which may be signed or unsigned, so use __v16qs. */
- return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8hi);
+ return (__m128i) __builtin_convertvector(
+ __builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3, 4, 5, 6,
+ 7),
+ __v8hi);
}
/// Sign-extends each of the lower four 8-bit integer elements of a
@@ -1249,12 +1238,11 @@ _mm_cvtepi8_epi16(__m128i __V)
/// A 128-bit vector of [16 x i8]. The lower four 8-bit elements are
/// sign-extended to 32-bit values.
/// \returns A 128-bit vector of [4 x i32] containing the sign-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepi8_epi32(__m128i __V)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi32(__m128i __V) {
/* This function always performs a signed extension, but __v16qi is a char
which may be signed or unsigned, so use __v16qs. */
- return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3), __v4si);
+ return (__m128i) __builtin_convertvector(
+ __builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1, 2, 3), __v4si);
}
/// Sign-extends each of the lower two 8-bit integer elements of a
@@ -1270,12 +1258,11 @@ _mm_cvtepi8_epi32(__m128i __V)
/// A 128-bit vector of [16 x i8]. The lower two 8-bit elements are
/// sign-extended to 64-bit values.
/// \returns A 128-bit vector of [2 x i64] containing the sign-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepi8_epi64(__m128i __V)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi8_epi64(__m128i __V) {
/* This function always performs a signed extension, but __v16qi is a char
which may be signed or unsigned, so use __v16qs. */
- return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1), __v2di);
+ return (__m128i) __builtin_convertvector(
+ __builtin_shufflevector((__v16qs)__V, (__v16qs)__V, 0, 1), __v2di);
}
/// Sign-extends each of the lower four 16-bit integer elements of a
@@ -1291,10 +1278,9 @@ _mm_cvtepi8_epi64(__m128i __V)
/// A 128-bit vector of [8 x i16]. The lower four 16-bit elements are
/// sign-extended to 32-bit values.
/// \returns A 128-bit vector of [4 x i32] containing the sign-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepi16_epi32(__m128i __V)
-{
- return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1, 2, 3), __v4si);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi16_epi32(__m128i __V) {
+ return (__m128i) __builtin_convertvector(
+ __builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1, 2, 3), __v4si);
}
/// Sign-extends each of the lower two 16-bit integer elements of a
@@ -1310,10 +1296,9 @@ _mm_cvtepi16_epi32(__m128i __V)
/// A 128-bit vector of [8 x i16]. The lower two 16-bit elements are
/// sign-extended to 64-bit values.
/// \returns A 128-bit vector of [2 x i64] containing the sign-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepi16_epi64(__m128i __V)
-{
- return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1), __v2di);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi16_epi64(__m128i __V) {
+ return (__m128i) __builtin_convertvector(
+ __builtin_shufflevector((__v8hi)__V, (__v8hi)__V, 0, 1), __v2di);
}
/// Sign-extends each of the lower two 32-bit integer elements of a
@@ -1329,10 +1314,9 @@ _mm_cvtepi16_epi64(__m128i __V)
/// A 128-bit vector of [4 x i32]. The lower two 32-bit elements are
/// sign-extended to 64-bit values.
/// \returns A 128-bit vector of [2 x i64] containing the sign-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepi32_epi64(__m128i __V)
-{
- return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v4si)__V, (__v4si)__V, 0, 1), __v2di);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepi32_epi64(__m128i __V) {
+ return (__m128i) __builtin_convertvector(
+ __builtin_shufflevector((__v4si)__V, (__v4si)__V, 0, 1), __v2di);
}
/* SSE4 Packed Integer Zero-Extension. */
@@ -1349,10 +1333,11 @@ _mm_cvtepi32_epi64(__m128i __V)
/// A 128-bit vector of [16 x i8]. The lower eight 8-bit elements are
/// zero-extended to 16-bit values.
/// \returns A 128-bit vector of [8 x i16] containing the zero-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepu8_epi16(__m128i __V)
-{
- return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3, 4, 5, 6, 7), __v8hi);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi16(__m128i __V) {
+ return (__m128i) __builtin_convertvector(
+ __builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3, 4, 5, 6,
+ 7),
+ __v8hi);
}
/// Zero-extends each of the lower four 8-bit integer elements of a
@@ -1368,10 +1353,9 @@ _mm_cvtepu8_epi16(__m128i __V)
/// A 128-bit vector of [16 x i8]. The lower four 8-bit elements are
/// zero-extended to 32-bit values.
/// \returns A 128-bit vector of [4 x i32] containing the zero-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepu8_epi32(__m128i __V)
-{
- return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3), __v4si);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi32(__m128i __V) {
+ return (__m128i) __builtin_convertvector(
+ __builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1, 2, 3), __v4si);
}
/// Zero-extends each of the lower two 8-bit integer elements of a
@@ -1387,10 +1371,9 @@ _mm_cvtepu8_epi32(__m128i __V)
/// A 128-bit vector of [16 x i8]. The lower two 8-bit elements are
/// zero-extended to 64-bit values.
/// \returns A 128-bit vector of [2 x i64] containing the zero-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepu8_epi64(__m128i __V)
-{
- return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1), __v2di);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu8_epi64(__m128i __V) {
+ return (__m128i) __builtin_convertvector(
+ __builtin_shufflevector((__v16qu)__V, (__v16qu)__V, 0, 1), __v2di);
}
/// Zero-extends each of the lower four 16-bit integer elements of a
@@ -1406,10 +1389,9 @@ _mm_cvtepu8_epi64(__m128i __V)
/// A 128-bit vector of [8 x i16]. The lower four 16-bit elements are
/// zero-extended to 32-bit values.
/// \returns A 128-bit vector of [4 x i32] containing the zero-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepu16_epi32(__m128i __V)
-{
- return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1, 2, 3), __v4si);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu16_epi32(__m128i __V) {
+ return (__m128i) __builtin_convertvector(
+ __builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1, 2, 3), __v4si);
}
/// Zero-extends each of the lower two 16-bit integer elements of a
@@ -1425,10 +1407,9 @@ _mm_cvtepu16_epi32(__m128i __V)
/// A 128-bit vector of [8 x i16]. The lower two 16-bit elements are
/// zero-extended to 64-bit values.
/// \returns A 128-bit vector of [2 x i64] containing the zero-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepu16_epi64(__m128i __V)
-{
- return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1), __v2di);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu16_epi64(__m128i __V) {
+ return (__m128i) __builtin_convertvector(
+ __builtin_shufflevector((__v8hu)__V, (__v8hu)__V, 0, 1), __v2di);
}
/// Zero-extends each of the lower two 32-bit integer elements of a
@@ -1444,10 +1425,9 @@ _mm_cvtepu16_epi64(__m128i __V)
/// A 128-bit vector of [4 x i32]. The lower two 32-bit elements are
/// zero-extended to 64-bit values.
/// \returns A 128-bit vector of [2 x i64] containing the zero-extended values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cvtepu32_epi64(__m128i __V)
-{
- return (__m128i)__builtin_convertvector(__builtin_shufflevector((__v4su)__V, (__v4su)__V, 0, 1), __v2di);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cvtepu32_epi64(__m128i __V) {
+ return (__m128i) __builtin_convertvector(
+ __builtin_shufflevector((__v4su)__V, (__v4su)__V, 0, 1), __v2di);
}
/* SSE4 Pack with Unsigned Saturation. */
@@ -1473,10 +1453,9 @@ _mm_cvtepu32_epi64(__m128i __V)
/// less than 0x0000 are saturated to 0x0000. The converted [4 x i16] values
/// are written to the higher 64 bits of the result.
/// \returns A 128-bit vector of [8 x i16] containing the converted values.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_packus_epi32(__m128i __V1, __m128i __V2)
-{
- return (__m128i) __builtin_ia32_packusdw128((__v4si)__V1, (__v4si)__V2);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_packus_epi32(__m128i __V1,
+ __m128i __V2) {
+ return (__m128i)__builtin_ia32_packusdw128((__v4si)__V1, (__v4si)__V2);
}
/* SSE4 Multiple Packed Sums of Absolute Difference. */
@@ -1515,9 +1494,9 @@ _mm_packus_epi32(__m128i __V1, __m128i __V2)
/// \endcode
/// \returns A 128-bit integer vector containing the sums of the sets of
/// absolute differences between both operands.
-#define _mm_mpsadbw_epu8(X, Y, M) \
- ((__m128i) __builtin_ia32_mpsadbw128((__v16qi)(__m128i)(X), \
- (__v16qi)(__m128i)(Y), (M)))
+#define _mm_mpsadbw_epu8(X, Y, M) \
+ ((__m128i)__builtin_ia32_mpsadbw128((__v16qi)(__m128i)(X), \
+ (__v16qi)(__m128i)(Y), (M)))
/// Finds the minimum unsigned 16-bit element in the input 128-bit
/// vector of [8 x u16] and returns it and along with its index.
@@ -1532,10 +1511,8 @@ _mm_packus_epi32(__m128i __V1, __m128i __V2)
/// \returns A 128-bit value where bits [15:0] contain the minimum value found
/// in parameter \a __V, bits [18:16] contain the index of the minimum value
/// and the remaining bits are set to 0.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_minpos_epu16(__m128i __V)
-{
- return (__m128i) __builtin_ia32_phminposuw128((__v8hi)__V);
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_minpos_epu16(__m128i __V) {
+ return (__m128i)__builtin_ia32_phminposuw128((__v8hi)__V);
}
/* Handle the sse4.2 definitions here. */
@@ -1544,33 +1521,34 @@ _mm_minpos_epu16(__m128i __V)
so we'll do the same. */
#undef __DEFAULT_FN_ATTRS
-#define __DEFAULT_FN_ATTRS __attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
+#define __DEFAULT_FN_ATTRS \
+ __attribute__((__always_inline__, __nodebug__, __target__("sse4.2")))
/* These specify the type of data that we're comparing. */
-#define _SIDD_UBYTE_OPS 0x00
-#define _SIDD_UWORD_OPS 0x01
-#define _SIDD_SBYTE_OPS 0x02
-#define _SIDD_SWORD_OPS 0x03
+#define _SIDD_UBYTE_OPS 0x00
+#define _SIDD_UWORD_OPS 0x01
+#define _SIDD_SBYTE_OPS 0x02
+#define _SIDD_SWORD_OPS 0x03
/* These specify the type of comparison operation. */
-#define _SIDD_CMP_EQUAL_ANY 0x00
-#define _SIDD_CMP_RANGES 0x04
-#define _SIDD_CMP_EQUAL_EACH 0x08
-#define _SIDD_CMP_EQUAL_ORDERED 0x0c
+#define _SIDD_CMP_EQUAL_ANY 0x00
+#define _SIDD_CMP_RANGES 0x04
+#define _SIDD_CMP_EQUAL_EACH 0x08
+#define _SIDD_CMP_EQUAL_ORDERED 0x0c
/* These macros specify the polarity of the operation. */
-#define _SIDD_POSITIVE_POLARITY 0x00
-#define _SIDD_NEGATIVE_POLARITY 0x10
-#define _SIDD_MASKED_POSITIVE_POLARITY 0x20
-#define _SIDD_MASKED_NEGATIVE_POLARITY 0x30
+#define _SIDD_POSITIVE_POLARITY 0x00
+#define _SIDD_NEGATIVE_POLARITY 0x10
+#define _SIDD_MASKED_POSITIVE_POLARITY 0x20
+#define _SIDD_MASKED_NEGATIVE_POLARITY 0x30
/* These macros are used in _mm_cmpXstri() to specify the return. */
-#define _SIDD_LEAST_SIGNIFICANT 0x00
-#define _SIDD_MOST_SIGNIFICANT 0x40
+#define _SIDD_LEAST_SIGNIFICANT 0x00
+#define _SIDD_MOST_SIGNIFICANT 0x40
/* These macros are used in _mm_cmpXstri() to specify the return. */
-#define _SIDD_BIT_MASK 0x00
-#define _SIDD_UNIT_MASK 0x40
+#define _SIDD_BIT_MASK 0x00
+#define _SIDD_UNIT_MASK 0x40
/* SSE4.2 Packed Comparison Intrinsics. */
/// Uses the immediate operand \a M to perform a comparison of string
@@ -1625,8 +1603,8 @@ _mm_minpos_epu16(__m128i __V)
/// repeating each bit 8 or 16 times).
/// \returns Returns a 128-bit integer vector representing the result mask of
/// the comparison.
-#define _mm_cmpistrm(A, B, M) \
- ((__m128i)__builtin_ia32_pcmpistrm128((__v16qi)(__m128i)(A), \
+#define _mm_cmpistrm(A, B, M) \
+ ((__m128i)__builtin_ia32_pcmpistrm128((__v16qi)(__m128i)(A), \
(__v16qi)(__m128i)(B), (int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
@@ -1679,9 +1657,9 @@ _mm_minpos_epu16(__m128i __V)
/// 0: The index of the least significant set bit. \n
/// 1: The index of the most significant set bit. \n
/// \returns Returns an integer representing the result index of the comparison.
-#define _mm_cmpistri(A, B, M) \
- ((int)__builtin_ia32_pcmpistri128((__v16qi)(__m128i)(A), \
- (__v16qi)(__m128i)(B), (int)(M)))
+#define _mm_cmpistri(A, B, M) \
+ ((int)__builtin_ia32_pcmpistri128((__v16qi)(__m128i)(A), \
+ (__v16qi)(__m128i)(B), (int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
/// data with explicitly defined lengths that is contained in source operands
@@ -1739,9 +1717,9 @@ _mm_minpos_epu16(__m128i __V)
/// repeating each bit 8 or 16 times). \n
/// \returns Returns a 128-bit integer vector representing the result mask of
/// the comparison.
-#define _mm_cmpestrm(A, LA, B, LB, M) \
- ((__m128i)__builtin_ia32_pcmpestrm128((__v16qi)(__m128i)(A), (int)(LA), \
- (__v16qi)(__m128i)(B), (int)(LB), \
+#define _mm_cmpestrm(A, LA, B, LB, M) \
+ ((__m128i)__builtin_ia32_pcmpestrm128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
(int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
@@ -1798,9 +1776,9 @@ _mm_minpos_epu16(__m128i __V)
/// 0: The index of the least significant set bit. \n
/// 1: The index of the most significant set bit. \n
/// \returns Returns an integer representing the result index of the comparison.
-#define _mm_cmpestri(A, LA, B, LB, M) \
- ((int)__builtin_ia32_pcmpestri128((__v16qi)(__m128i)(A), (int)(LA), \
- (__v16qi)(__m128i)(B), (int)(LB), \
+#define _mm_cmpestri(A, LA, B, LB, M) \
+ ((int)__builtin_ia32_pcmpestri128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
(int)(M)))
/* SSE4.2 Packed Comparison Intrinsics and EFlag Reading. */
@@ -1850,8 +1828,8 @@ _mm_minpos_epu16(__m128i __V)
/// to the size of \a A or \a B. \n
/// \returns Returns 1 if the bit mask is zero and the length of the string in
/// \a B is the maximum; otherwise, returns 0.
-#define _mm_cmpistra(A, B, M) \
- ((int)__builtin_ia32_pcmpistria128((__v16qi)(__m128i)(A), \
+#define _mm_cmpistra(A, B, M) \
+ ((int)__builtin_ia32_pcmpistria128((__v16qi)(__m128i)(A), \
(__v16qi)(__m128i)(B), (int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
@@ -1899,8 +1877,8 @@ _mm_minpos_epu16(__m128i __V)
/// 11: Negate the bit mask only for bits with an index less than or equal
/// to the size of \a A or \a B.
/// \returns Returns 1 if the bit mask is non-zero, otherwise, returns 0.
-#define _mm_cmpistrc(A, B, M) \
- ((int)__builtin_ia32_pcmpistric128((__v16qi)(__m128i)(A), \
+#define _mm_cmpistrc(A, B, M) \
+ ((int)__builtin_ia32_pcmpistric128((__v16qi)(__m128i)(A), \
(__v16qi)(__m128i)(B), (int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
@@ -1947,8 +1925,8 @@ _mm_minpos_epu16(__m128i __V)
/// 11: Negate the bit mask only for bits with an index less than or equal
/// to the size of \a A or \a B. \n
/// \returns Returns bit 0 of the resulting bit mask.
-#define _mm_cmpistro(A, B, M) \
- ((int)__builtin_ia32_pcmpistrio128((__v16qi)(__m128i)(A), \
+#define _mm_cmpistro(A, B, M) \
+ ((int)__builtin_ia32_pcmpistrio128((__v16qi)(__m128i)(A), \
(__v16qi)(__m128i)(B), (int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
@@ -1997,8 +1975,8 @@ _mm_minpos_epu16(__m128i __V)
/// to the size of \a A or \a B. \n
/// \returns Returns 1 if the length of the string in \a A is less than the
/// maximum, otherwise, returns 0.
-#define _mm_cmpistrs(A, B, M) \
- ((int)__builtin_ia32_pcmpistris128((__v16qi)(__m128i)(A), \
+#define _mm_cmpistrs(A, B, M) \
+ ((int)__builtin_ia32_pcmpistris128((__v16qi)(__m128i)(A), \
(__v16qi)(__m128i)(B), (int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
@@ -2047,8 +2025,8 @@ _mm_minpos_epu16(__m128i __V)
/// to the size of \a A or \a B.
/// \returns Returns 1 if the length of the string in \a B is less than the
/// maximum, otherwise, returns 0.
-#define _mm_cmpistrz(A, B, M) \
- ((int)__builtin_ia32_pcmpistriz128((__v16qi)(__m128i)(A), \
+#define _mm_cmpistrz(A, B, M) \
+ ((int)__builtin_ia32_pcmpistriz128((__v16qi)(__m128i)(A), \
(__v16qi)(__m128i)(B), (int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
@@ -2101,9 +2079,9 @@ _mm_minpos_epu16(__m128i __V)
/// to the size of \a A or \a B.
/// \returns Returns 1 if the bit mask is zero and the length of the string in
/// \a B is the maximum, otherwise, returns 0.
-#define _mm_cmpestra(A, LA, B, LB, M) \
- ((int)__builtin_ia32_pcmpestria128((__v16qi)(__m128i)(A), (int)(LA), \
- (__v16qi)(__m128i)(B), (int)(LB), \
+#define _mm_cmpestra(A, LA, B, LB, M) \
+ ((int)__builtin_ia32_pcmpestria128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
(int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
@@ -2155,9 +2133,9 @@ _mm_minpos_epu16(__m128i __V)
/// 11: Negate the bit mask only for bits with an index less than or equal
/// to the size of \a A or \a B. \n
/// \returns Returns 1 if the resulting mask is non-zero, otherwise, returns 0.
-#define _mm_cmpestrc(A, LA, B, LB, M) \
- ((int)__builtin_ia32_pcmpestric128((__v16qi)(__m128i)(A), (int)(LA), \
- (__v16qi)(__m128i)(B), (int)(LB), \
+#define _mm_cmpestrc(A, LA, B, LB, M) \
+ ((int)__builtin_ia32_pcmpestric128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
(int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
@@ -2208,9 +2186,9 @@ _mm_minpos_epu16(__m128i __V)
/// 11: Negate the bit mask only for bits with an index less than or equal
/// to the size of \a A or \a B.
/// \returns Returns bit 0 of the resulting bit mask.
-#define _mm_cmpestro(A, LA, B, LB, M) \
- ((int)__builtin_ia32_pcmpestrio128((__v16qi)(__m128i)(A), (int)(LA), \
- (__v16qi)(__m128i)(B), (int)(LB), \
+#define _mm_cmpestro(A, LA, B, LB, M) \
+ ((int)__builtin_ia32_pcmpestrio128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
(int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
@@ -2263,9 +2241,9 @@ _mm_minpos_epu16(__m128i __V)
/// to the size of \a A or \a B. \n
/// \returns Returns 1 if the length of the string in \a A is less than the
/// maximum, otherwise, returns 0.
-#define _mm_cmpestrs(A, LA, B, LB, M) \
- ((int)__builtin_ia32_pcmpestris128((__v16qi)(__m128i)(A), (int)(LA), \
- (__v16qi)(__m128i)(B), (int)(LB), \
+#define _mm_cmpestrs(A, LA, B, LB, M) \
+ ((int)__builtin_ia32_pcmpestris128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
(int)(M)))
/// Uses the immediate operand \a M to perform a comparison of string
@@ -2317,9 +2295,9 @@ _mm_minpos_epu16(__m128i __V)
/// to the size of \a A or \a B.
/// \returns Returns 1 if the length of the string in \a B is less than the
/// maximum, otherwise, returns 0.
-#define _mm_cmpestrz(A, LA, B, LB, M) \
- ((int)__builtin_ia32_pcmpestriz128((__v16qi)(__m128i)(A), (int)(LA), \
- (__v16qi)(__m128i)(B), (int)(LB), \
+#define _mm_cmpestrz(A, LA, B, LB, M) \
+ ((int)__builtin_ia32_pcmpestriz128((__v16qi)(__m128i)(A), (int)(LA), \
+ (__v16qi)(__m128i)(B), (int)(LB), \
(int)(M)))
/* SSE4.2 Compare Packed Data -- Greater Than. */
@@ -2336,9 +2314,8 @@ _mm_minpos_epu16(__m128i __V)
/// \param __V2
/// A 128-bit integer vector.
/// \returns A 128-bit integer vector containing the comparison results.
-static __inline__ __m128i __DEFAULT_FN_ATTRS
-_mm_cmpgt_epi64(__m128i __V1, __m128i __V2)
-{
+static __inline__ __m128i __DEFAULT_FN_ATTRS _mm_cmpgt_epi64(__m128i __V1,
+ __m128i __V2) {
return (__m128i)((__v2di)__V1 > (__v2di)__V2);
}
diff --git a/contrib/llvm-project/clang/lib/Headers/stdbool.h b/contrib/llvm-project/clang/lib/Headers/stdbool.h
index 2525363dd02a..f0e588532e16 100644
--- a/contrib/llvm-project/clang/lib/Headers/stdbool.h
+++ b/contrib/llvm-project/clang/lib/Headers/stdbool.h
@@ -10,8 +10,13 @@
#ifndef __STDBOOL_H
#define __STDBOOL_H
-/* Don't define bool, true, and false in C++, except as a GNU extension. */
-#ifndef __cplusplus
+#define __bool_true_false_are_defined 1
+
+#if __STDC_VERSION__ > 201710L
+/* FIXME: We should be issuing a deprecation warning here, but cannot yet due
+ * to system headers which include this header file unconditionally.
+ */
+#elif !defined(__cplusplus)
#define bool _Bool
#define true 1
#define false 0
@@ -20,12 +25,10 @@
#define _Bool bool
#if __cplusplus < 201103L
/* For C++98, define bool, false, true as a GNU extension. */
-#define bool bool
+#define bool bool
#define false false
-#define true true
+#define true true
#endif
#endif
-#define __bool_true_false_are_defined 1
-
#endif /* __STDBOOL_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/stddef.h b/contrib/llvm-project/clang/lib/Headers/stddef.h
index 15acd4427ca1..a15d21b55317 100644
--- a/contrib/llvm-project/clang/lib/Headers/stddef.h
+++ b/contrib/llvm-project/clang/lib/Headers/stddef.h
@@ -62,7 +62,7 @@ typedef __SIZE_TYPE__ rsize_t;
#endif /* defined(__need_STDDEF_H_misc) */
#if defined(__need_wchar_t)
-#ifndef __cplusplus
+#if !defined(__cplusplus) || (defined(_MSC_VER) && !_NATIVE_WCHAR_T_DEFINED)
/* Always define wchar_t when modules are available. */
#if !defined(_WCHAR_T) || __has_feature(modules)
#if !__has_feature(modules)
diff --git a/contrib/llvm-project/clang/lib/Headers/stdnoreturn.h b/contrib/llvm-project/clang/lib/Headers/stdnoreturn.h
index e83cd8153752..7d19fa7b2f2b 100644
--- a/contrib/llvm-project/clang/lib/Headers/stdnoreturn.h
+++ b/contrib/llvm-project/clang/lib/Headers/stdnoreturn.h
@@ -13,4 +13,17 @@
#define noreturn _Noreturn
#define __noreturn_is_defined 1
+#if __STDC_VERSION__ > 201710L && \
+ !defined(_CLANG_DISABLE_CRT_DEPRECATION_WARNINGS)
+/* The noreturn macro is deprecated in C2x. We do not mark it as such because
+ including the header file in C2x is also deprecated and we do not want to
+ issue a confusing diagnostic for code which includes <stdnoreturn.h>
+ followed by code that writes [[noreturn]]. The issue with such code is not
+ with the attribute, or the use of 'noreturn', but the inclusion of the
+ header. */
+/* FIXME: We should be issuing a deprecation warning here, but cannot yet due
+ * to system headers which include this header file unconditionally.
+ */
+#endif
+
#endif /* __STDNORETURN_H */
diff --git a/contrib/llvm-project/clang/lib/Headers/uintrintrin.h b/contrib/llvm-project/clang/lib/Headers/uintrintrin.h
index e3839dcebe1e..135dc814c72e 100644
--- a/contrib/llvm-project/clang/lib/Headers/uintrintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/uintrintrin.h
@@ -39,9 +39,9 @@ struct __uintr_frame
///
/// This intrinsic corresponds to the <c> CLUI </c> instruction.
///
-/// \operation
+/// \code{.operation}
/// UIF := 0
-/// \endoperation
+/// \endcode
static __inline__ void __DEFAULT_FN_ATTRS
_clui (void)
{
@@ -60,9 +60,9 @@ _clui (void)
///
/// This intrinsic corresponds to the <c> STUI </c> instruction.
///
-/// \operation
+/// \code{.operation}
/// UIF := 1
-/// \endoperation
+/// \endcode
static __inline__ void __DEFAULT_FN_ATTRS
_stui (void)
{
@@ -81,7 +81,7 @@ _stui (void)
///
/// \returns The current value of the user interrupt flag (UIF).
///
-/// \operation
+/// \code{.operation}
/// CF := UIF
/// ZF := 0
/// AF := 0
@@ -89,7 +89,7 @@ _stui (void)
/// PF := 0
/// SF := 0
/// dst := CF
-/// \endoperation
+/// \endcode
static __inline__ unsigned char __DEFAULT_FN_ATTRS
_testui (void)
{
@@ -110,7 +110,7 @@ _testui (void)
/// Index of user-interrupt target table entry in user-interrupt target
/// table.
///
-/// \operation
+/// \code{.operation}
/// IF __a > UITTSZ
/// GP (0)
/// FI
@@ -143,7 +143,7 @@ _testui (void)
/// SendOrdinaryIPI(tempUPID.NV, tempUPID.NDST[15:8])
/// FI
/// FI
-/// \endoperation
+/// \endcode
static __inline__ void __DEFAULT_FN_ATTRS
_senduipi (unsigned long long __a)
{
diff --git a/contrib/llvm-project/clang/lib/Headers/unwind.h b/contrib/llvm-project/clang/lib/Headers/unwind.h
index 6e069798f02d..971a62da0d21 100644
--- a/contrib/llvm-project/clang/lib/Headers/unwind.h
+++ b/contrib/llvm-project/clang/lib/Headers/unwind.h
@@ -62,7 +62,8 @@ typedef intptr_t _sleb128_t;
typedef uintptr_t _uleb128_t;
struct _Unwind_Context;
-#if defined(__arm__) && !(defined(__USING_SJLJ_EXCEPTIONS__) || defined(__ARM_DWARF_EH__))
+#if defined(__arm__) && !(defined(__USING_SJLJ_EXCEPTIONS__) || \
+ defined(__ARM_DWARF_EH__) || defined(__SEH__))
struct _Unwind_Control_Block;
typedef struct _Unwind_Control_Block _Unwind_Exception; /* Alias */
#else
@@ -72,7 +73,7 @@ typedef struct _Unwind_Exception _Unwind_Exception;
typedef enum {
_URC_NO_REASON = 0,
#if defined(__arm__) && !defined(__USING_SJLJ_EXCEPTIONS__) && \
- !defined(__ARM_DWARF_EH__)
+ !defined(__ARM_DWARF_EH__) && !defined(__SEH__)
_URC_OK = 0, /* used by ARM EHABI */
#endif
_URC_FOREIGN_EXCEPTION_CAUGHT = 1,
@@ -86,7 +87,7 @@ typedef enum {
_URC_INSTALL_CONTEXT = 7,
_URC_CONTINUE_UNWIND = 8,
#if defined(__arm__) && !defined(__USING_SJLJ_EXCEPTIONS__) && \
- !defined(__ARM_DWARF_EH__)
+ !defined(__ARM_DWARF_EH__) && !defined(__SEH__)
_URC_FAILURE = 9 /* used by ARM EHABI */
#endif
} _Unwind_Reason_Code;
@@ -103,7 +104,8 @@ typedef enum {
typedef void (*_Unwind_Exception_Cleanup_Fn)(_Unwind_Reason_Code,
_Unwind_Exception *);
-#if defined(__arm__) && !(defined(__USING_SJLJ_EXCEPTIONS__) || defined(__ARM_DWARF_EH__))
+#if defined(__arm__) && !(defined(__USING_SJLJ_EXCEPTIONS__) || \
+ defined(__ARM_DWARF_EH__) || defined(__SEH__))
typedef struct _Unwind_Control_Block _Unwind_Control_Block;
typedef uint32_t _Unwind_EHT_Header;
@@ -167,7 +169,8 @@ typedef _Unwind_Personality_Fn __personality_routine;
typedef _Unwind_Reason_Code (*_Unwind_Trace_Fn)(struct _Unwind_Context *,
void *);
-#if defined(__arm__) && !(defined(__USING_SJLJ_EXCEPTIONS__) || defined(__ARM_DWARF_EH__))
+#if defined(__arm__) && !(defined(__USING_SJLJ_EXCEPTIONS__) || \
+ defined(__ARM_DWARF_EH__) || defined(__SEH__))
typedef enum {
_UVRSC_CORE = 0, /* integer register */
_UVRSC_VFP = 1, /* vfp */
diff --git a/contrib/llvm-project/clang/lib/Headers/velintrin.h b/contrib/llvm-project/clang/lib/Headers/velintrin.h
new file mode 100644
index 000000000000..69b1fba296d4
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/velintrin.h
@@ -0,0 +1,71 @@
+/*===---- velintrin.h - VEL intrinsics for VE ------------------------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __VEL_INTRIN_H__
+#define __VEL_INTRIN_H__
+
+// Vector registers
+typedef double __vr __attribute__((__vector_size__(2048)));
+
+// Vector mask registers
+#if __STDC_VERSION__ >= 199901L
+// For C99
+typedef _Bool __vm __attribute__((ext_vector_type(256)));
+typedef _Bool __vm256 __attribute__((ext_vector_type(256)));
+typedef _Bool __vm512 __attribute__((ext_vector_type(512)));
+#else
+#ifdef __cplusplus
+// For C++
+typedef bool __vm __attribute__((ext_vector_type(256)));
+typedef bool __vm256 __attribute__((ext_vector_type(256)));
+typedef bool __vm512 __attribute__((ext_vector_type(512)));
+#else
+#error need C++ or C99 to use vector intrinsics for VE
+#endif
+#endif
+
+enum VShuffleCodes {
+ VE_VSHUFFLE_YUYU = 0,
+ VE_VSHUFFLE_YUYL = 1,
+ VE_VSHUFFLE_YUZU = 2,
+ VE_VSHUFFLE_YUZL = 3,
+ VE_VSHUFFLE_YLYU = 4,
+ VE_VSHUFFLE_YLYL = 5,
+ VE_VSHUFFLE_YLZU = 6,
+ VE_VSHUFFLE_YLZL = 7,
+ VE_VSHUFFLE_ZUYU = 8,
+ VE_VSHUFFLE_ZUYL = 9,
+ VE_VSHUFFLE_ZUZU = 10,
+ VE_VSHUFFLE_ZUZL = 11,
+ VE_VSHUFFLE_ZLYU = 12,
+ VE_VSHUFFLE_ZLYL = 13,
+ VE_VSHUFFLE_ZLZU = 14,
+ VE_VSHUFFLE_ZLZL = 15,
+};
+
+// Use generated intrinsic name definitions
+#include <velintrin_gen.h>
+
+// Use helper functions
+#include <velintrin_approx.h>
+
+// pack
+
+#define _vel_pack_f32p __builtin_ve_vl_pack_f32p
+#define _vel_pack_f32a __builtin_ve_vl_pack_f32a
+
+static inline unsigned long int _vel_pack_i32(unsigned int a, unsigned int b) {
+ return (((unsigned long int)a) << 32) | b;
+}
+
+#define _vel_extract_vm512u(vm) __builtin_ve_vl_extract_vm512u(vm)
+#define _vel_extract_vm512l(vm) __builtin_ve_vl_extract_vm512l(vm)
+#define _vel_insert_vm512u(vm512, vm) __builtin_ve_vl_insert_vm512u(vm512, vm)
+#define _vel_insert_vm512l(vm512, vm) __builtin_ve_vl_insert_vm512l(vm512, vm)
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/velintrin_approx.h b/contrib/llvm-project/clang/lib/Headers/velintrin_approx.h
new file mode 100644
index 000000000000..89d270fef3c7
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/velintrin_approx.h
@@ -0,0 +1,120 @@
+/*===---- velintrin_approx.h - VEL intrinsics helper for VE ----------------===
+ *
+ * Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+ * See https://llvm.org/LICENSE.txt for license information.
+ * SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+ *
+ *===-----------------------------------------------------------------------===
+ */
+#ifndef __VEL_INTRIN_APPROX_H__
+#define __VEL_INTRIN_APPROX_H__
+
+static inline __vr _vel_approx_vfdivs_vvvl(__vr v0, __vr v1, int l) {
+ float s0;
+ __vr v2, v3, v4, v5;
+ v5 = _vel_vrcps_vvl(v1, l);
+ s0 = 1.0;
+ v4 = _vel_vfnmsbs_vsvvl(s0, v1, v5, l);
+ v3 = _vel_vfmads_vvvvl(v5, v5, v4, l);
+ v2 = _vel_vfmuls_vvvl(v0, v3, l);
+ v4 = _vel_vfnmsbs_vvvvl(v0, v2, v1, l);
+ v2 = _vel_vfmads_vvvvl(v2, v5, v4, l);
+ v0 = _vel_vfnmsbs_vvvvl(v0, v2, v1, l);
+ v0 = _vel_vfmads_vvvvl(v2, v3, v0, l);
+ return v0;
+}
+
+static inline __vr _vel_approx_pvfdiv_vvvl(__vr v0, __vr v1, int l) {
+ float s0;
+ __vr v2, v3, v4, v5;
+ v5 = _vel_pvrcp_vvl(v1, l);
+ s0 = 1.0;
+ v4 = _vel_pvfnmsb_vsvvl(s0, v1, v5, l);
+ v3 = _vel_pvfmad_vvvvl(v5, v5, v4, l);
+ v2 = _vel_pvfmul_vvvl(v0, v3, l);
+ v4 = _vel_pvfnmsb_vvvvl(v0, v2, v1, l);
+ v2 = _vel_pvfmad_vvvvl(v2, v5, v4, l);
+ v0 = _vel_pvfnmsb_vvvvl(v0, v2, v1, l);
+ v0 = _vel_pvfmad_vvvvl(v2, v3, v0, l);
+ return v0;
+}
+
+static inline __vr _vel_approx_vfdivs_vsvl(float s0, __vr v0, int l) {
+ float s1;
+ __vr v1, v2, v3, v4;
+ v4 = _vel_vrcps_vvl(v0, l);
+ s1 = 1.0;
+ v2 = _vel_vfnmsbs_vsvvl(s1, v0, v4, l);
+ v2 = _vel_vfmads_vvvvl(v4, v4, v2, l);
+ v1 = _vel_vfmuls_vsvl(s0, v2, l);
+ v3 = _vel_vfnmsbs_vsvvl(s0, v1, v0, l);
+ v1 = _vel_vfmads_vvvvl(v1, v4, v3, l);
+ v3 = _vel_vfnmsbs_vsvvl(s0, v1, v0, l);
+ v0 = _vel_vfmads_vvvvl(v1, v2, v3, l);
+ return v0;
+}
+
+static inline __vr _vel_approx_vfdivs_vvsl(__vr v0, float s0, int l) {
+ float s1;
+ __vr v1, v2;
+ s1 = 1.0f / s0;
+ v1 = _vel_vfmuls_vsvl(s1, v0, l);
+ v2 = _vel_vfnmsbs_vvsvl(v0, s0, v1, l);
+ v0 = _vel_vfmads_vvsvl(v1, s1, v2, l);
+ return v0;
+}
+
+static inline __vr _vel_approx_vfdivd_vsvl(double s0, __vr v0, int l) {
+ __vr v1, v2, v3;
+ v2 = _vel_vrcpd_vvl(v0, l);
+ double s1 = 1.0;
+ v3 = _vel_vfnmsbd_vsvvl(s1, v0, v2, l);
+ v2 = _vel_vfmadd_vvvvl(v2, v2, v3, l);
+ v1 = _vel_vfnmsbd_vsvvl(s1, v0, v2, l);
+ v1 = _vel_vfmadd_vvvvl(v2, v2, v1, l);
+ v1 = _vel_vaddul_vsvl(1, v1, l);
+ v3 = _vel_vfnmsbd_vsvvl(s1, v0, v1, l);
+ v3 = _vel_vfmadd_vvvvl(v1, v1, v3, l);
+ v1 = _vel_vfmuld_vsvl(s0, v3, l);
+ v0 = _vel_vfnmsbd_vsvvl(s0, v1, v0, l);
+ v0 = _vel_vfmadd_vvvvl(v1, v3, v0, l);
+ return v0;
+}
+
+static inline __vr _vel_approx_vfsqrtd_vvl(__vr v0, int l) {
+ double s0, s1;
+ __vr v1, v2, v3;
+ v2 = _vel_vrsqrtdnex_vvl(v0, l);
+ v1 = _vel_vfmuld_vvvl(v0, v2, l);
+ s0 = 1.0;
+ s1 = 0.5;
+ v3 = _vel_vfnmsbd_vsvvl(s0, v1, v2, l);
+ v3 = _vel_vfmuld_vsvl(s1, v3, l);
+ v2 = _vel_vfmadd_vvvvl(v2, v2, v3, l);
+ v1 = _vel_vfmuld_vvvl(v0, v2, l);
+ v3 = _vel_vfnmsbd_vsvvl(s0, v1, v2, l);
+ v3 = _vel_vfmuld_vsvl(s1, v3, l);
+ v0 = _vel_vfmadd_vvvvl(v1, v1, v3, l);
+ return v0;
+}
+
+static inline __vr _vel_approx_vfsqrts_vvl(__vr v0, int l) {
+ float s0, s1;
+ __vr v1, v2, v3;
+ v0 = _vel_vcvtds_vvl(v0, l);
+ v2 = _vel_vrsqrtdnex_vvl(v0, l);
+ v1 = _vel_vfmuld_vvvl(v0, v2, l);
+ s0 = 1.0;
+ s1 = 0.5;
+ v3 = _vel_vfnmsbd_vsvvl(s0, v1, v2, l);
+ v3 = _vel_vfmuld_vsvl(s1, v3, l);
+ v2 = _vel_vfmadd_vvvvl(v2, v2, v3, l);
+ v1 = _vel_vfmuld_vvvl(v0, v2, l);
+ v3 = _vel_vfnmsbd_vsvvl(s0, v1, v2, l);
+ v3 = _vel_vfmuld_vsvl(s1, v3, l);
+ v0 = _vel_vfmadd_vvvvl(v1, v1, v3, l);
+ v0 = _vel_vcvtsd_vvl(v0, l);
+ return v0;
+}
+
+#endif
diff --git a/contrib/llvm-project/clang/lib/Headers/velintrin_gen.h b/contrib/llvm-project/clang/lib/Headers/velintrin_gen.h
new file mode 100644
index 000000000000..845c0da2ffa2
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Headers/velintrin_gen.h
@@ -0,0 +1,1257 @@
+#define _vel_vld_vssl __builtin_ve_vl_vld_vssl
+#define _vel_vld_vssvl __builtin_ve_vl_vld_vssvl
+#define _vel_vldnc_vssl __builtin_ve_vl_vldnc_vssl
+#define _vel_vldnc_vssvl __builtin_ve_vl_vldnc_vssvl
+#define _vel_vldu_vssl __builtin_ve_vl_vldu_vssl
+#define _vel_vldu_vssvl __builtin_ve_vl_vldu_vssvl
+#define _vel_vldunc_vssl __builtin_ve_vl_vldunc_vssl
+#define _vel_vldunc_vssvl __builtin_ve_vl_vldunc_vssvl
+#define _vel_vldlsx_vssl __builtin_ve_vl_vldlsx_vssl
+#define _vel_vldlsx_vssvl __builtin_ve_vl_vldlsx_vssvl
+#define _vel_vldlsxnc_vssl __builtin_ve_vl_vldlsxnc_vssl
+#define _vel_vldlsxnc_vssvl __builtin_ve_vl_vldlsxnc_vssvl
+#define _vel_vldlzx_vssl __builtin_ve_vl_vldlzx_vssl
+#define _vel_vldlzx_vssvl __builtin_ve_vl_vldlzx_vssvl
+#define _vel_vldlzxnc_vssl __builtin_ve_vl_vldlzxnc_vssl
+#define _vel_vldlzxnc_vssvl __builtin_ve_vl_vldlzxnc_vssvl
+#define _vel_vld2d_vssl __builtin_ve_vl_vld2d_vssl
+#define _vel_vld2d_vssvl __builtin_ve_vl_vld2d_vssvl
+#define _vel_vld2dnc_vssl __builtin_ve_vl_vld2dnc_vssl
+#define _vel_vld2dnc_vssvl __builtin_ve_vl_vld2dnc_vssvl
+#define _vel_vldu2d_vssl __builtin_ve_vl_vldu2d_vssl
+#define _vel_vldu2d_vssvl __builtin_ve_vl_vldu2d_vssvl
+#define _vel_vldu2dnc_vssl __builtin_ve_vl_vldu2dnc_vssl
+#define _vel_vldu2dnc_vssvl __builtin_ve_vl_vldu2dnc_vssvl
+#define _vel_vldl2dsx_vssl __builtin_ve_vl_vldl2dsx_vssl
+#define _vel_vldl2dsx_vssvl __builtin_ve_vl_vldl2dsx_vssvl
+#define _vel_vldl2dsxnc_vssl __builtin_ve_vl_vldl2dsxnc_vssl
+#define _vel_vldl2dsxnc_vssvl __builtin_ve_vl_vldl2dsxnc_vssvl
+#define _vel_vldl2dzx_vssl __builtin_ve_vl_vldl2dzx_vssl
+#define _vel_vldl2dzx_vssvl __builtin_ve_vl_vldl2dzx_vssvl
+#define _vel_vldl2dzxnc_vssl __builtin_ve_vl_vldl2dzxnc_vssl
+#define _vel_vldl2dzxnc_vssvl __builtin_ve_vl_vldl2dzxnc_vssvl
+#define _vel_vst_vssl __builtin_ve_vl_vst_vssl
+#define _vel_vst_vssml __builtin_ve_vl_vst_vssml
+#define _vel_vstnc_vssl __builtin_ve_vl_vstnc_vssl
+#define _vel_vstnc_vssml __builtin_ve_vl_vstnc_vssml
+#define _vel_vstot_vssl __builtin_ve_vl_vstot_vssl
+#define _vel_vstot_vssml __builtin_ve_vl_vstot_vssml
+#define _vel_vstncot_vssl __builtin_ve_vl_vstncot_vssl
+#define _vel_vstncot_vssml __builtin_ve_vl_vstncot_vssml
+#define _vel_vstu_vssl __builtin_ve_vl_vstu_vssl
+#define _vel_vstu_vssml __builtin_ve_vl_vstu_vssml
+#define _vel_vstunc_vssl __builtin_ve_vl_vstunc_vssl
+#define _vel_vstunc_vssml __builtin_ve_vl_vstunc_vssml
+#define _vel_vstuot_vssl __builtin_ve_vl_vstuot_vssl
+#define _vel_vstuot_vssml __builtin_ve_vl_vstuot_vssml
+#define _vel_vstuncot_vssl __builtin_ve_vl_vstuncot_vssl
+#define _vel_vstuncot_vssml __builtin_ve_vl_vstuncot_vssml
+#define _vel_vstl_vssl __builtin_ve_vl_vstl_vssl
+#define _vel_vstl_vssml __builtin_ve_vl_vstl_vssml
+#define _vel_vstlnc_vssl __builtin_ve_vl_vstlnc_vssl
+#define _vel_vstlnc_vssml __builtin_ve_vl_vstlnc_vssml
+#define _vel_vstlot_vssl __builtin_ve_vl_vstlot_vssl
+#define _vel_vstlot_vssml __builtin_ve_vl_vstlot_vssml
+#define _vel_vstlncot_vssl __builtin_ve_vl_vstlncot_vssl
+#define _vel_vstlncot_vssml __builtin_ve_vl_vstlncot_vssml
+#define _vel_vst2d_vssl __builtin_ve_vl_vst2d_vssl
+#define _vel_vst2d_vssml __builtin_ve_vl_vst2d_vssml
+#define _vel_vst2dnc_vssl __builtin_ve_vl_vst2dnc_vssl
+#define _vel_vst2dnc_vssml __builtin_ve_vl_vst2dnc_vssml
+#define _vel_vst2dot_vssl __builtin_ve_vl_vst2dot_vssl
+#define _vel_vst2dot_vssml __builtin_ve_vl_vst2dot_vssml
+#define _vel_vst2dncot_vssl __builtin_ve_vl_vst2dncot_vssl
+#define _vel_vst2dncot_vssml __builtin_ve_vl_vst2dncot_vssml
+#define _vel_vstu2d_vssl __builtin_ve_vl_vstu2d_vssl
+#define _vel_vstu2d_vssml __builtin_ve_vl_vstu2d_vssml
+#define _vel_vstu2dnc_vssl __builtin_ve_vl_vstu2dnc_vssl
+#define _vel_vstu2dnc_vssml __builtin_ve_vl_vstu2dnc_vssml
+#define _vel_vstu2dot_vssl __builtin_ve_vl_vstu2dot_vssl
+#define _vel_vstu2dot_vssml __builtin_ve_vl_vstu2dot_vssml
+#define _vel_vstu2dncot_vssl __builtin_ve_vl_vstu2dncot_vssl
+#define _vel_vstu2dncot_vssml __builtin_ve_vl_vstu2dncot_vssml
+#define _vel_vstl2d_vssl __builtin_ve_vl_vstl2d_vssl
+#define _vel_vstl2d_vssml __builtin_ve_vl_vstl2d_vssml
+#define _vel_vstl2dnc_vssl __builtin_ve_vl_vstl2dnc_vssl
+#define _vel_vstl2dnc_vssml __builtin_ve_vl_vstl2dnc_vssml
+#define _vel_vstl2dot_vssl __builtin_ve_vl_vstl2dot_vssl
+#define _vel_vstl2dot_vssml __builtin_ve_vl_vstl2dot_vssml
+#define _vel_vstl2dncot_vssl __builtin_ve_vl_vstl2dncot_vssl
+#define _vel_vstl2dncot_vssml __builtin_ve_vl_vstl2dncot_vssml
+#define _vel_pfchv_ssl __builtin_ve_vl_pfchv_ssl
+#define _vel_pfchvnc_ssl __builtin_ve_vl_pfchvnc_ssl
+#define _vel_lsv_vvss __builtin_ve_vl_lsv_vvss
+#define _vel_lvsl_svs __builtin_ve_vl_lvsl_svs
+#define _vel_lvsd_svs __builtin_ve_vl_lvsd_svs
+#define _vel_lvss_svs __builtin_ve_vl_lvss_svs
+#define _vel_lvm_mmss __builtin_ve_vl_lvm_mmss
+#define _vel_lvm_MMss __builtin_ve_vl_lvm_MMss
+#define _vel_svm_sms __builtin_ve_vl_svm_sms
+#define _vel_svm_sMs __builtin_ve_vl_svm_sMs
+#define _vel_vbrdd_vsl __builtin_ve_vl_vbrdd_vsl
+#define _vel_vbrdd_vsvl __builtin_ve_vl_vbrdd_vsvl
+#define _vel_vbrdd_vsmvl __builtin_ve_vl_vbrdd_vsmvl
+#define _vel_vbrdl_vsl __builtin_ve_vl_vbrdl_vsl
+#define _vel_vbrdl_vsvl __builtin_ve_vl_vbrdl_vsvl
+#define _vel_vbrdl_vsmvl __builtin_ve_vl_vbrdl_vsmvl
+#define _vel_vbrds_vsl __builtin_ve_vl_vbrds_vsl
+#define _vel_vbrds_vsvl __builtin_ve_vl_vbrds_vsvl
+#define _vel_vbrds_vsmvl __builtin_ve_vl_vbrds_vsmvl
+#define _vel_vbrdw_vsl __builtin_ve_vl_vbrdw_vsl
+#define _vel_vbrdw_vsvl __builtin_ve_vl_vbrdw_vsvl
+#define _vel_vbrdw_vsmvl __builtin_ve_vl_vbrdw_vsmvl
+#define _vel_pvbrd_vsl __builtin_ve_vl_pvbrd_vsl
+#define _vel_pvbrd_vsvl __builtin_ve_vl_pvbrd_vsvl
+#define _vel_pvbrd_vsMvl __builtin_ve_vl_pvbrd_vsMvl
+#define _vel_vmv_vsvl __builtin_ve_vl_vmv_vsvl
+#define _vel_vmv_vsvvl __builtin_ve_vl_vmv_vsvvl
+#define _vel_vmv_vsvmvl __builtin_ve_vl_vmv_vsvmvl
+#define _vel_vaddul_vvvl __builtin_ve_vl_vaddul_vvvl
+#define _vel_vaddul_vvvvl __builtin_ve_vl_vaddul_vvvvl
+#define _vel_vaddul_vsvl __builtin_ve_vl_vaddul_vsvl
+#define _vel_vaddul_vsvvl __builtin_ve_vl_vaddul_vsvvl
+#define _vel_vaddul_vvvmvl __builtin_ve_vl_vaddul_vvvmvl
+#define _vel_vaddul_vsvmvl __builtin_ve_vl_vaddul_vsvmvl
+#define _vel_vadduw_vvvl __builtin_ve_vl_vadduw_vvvl
+#define _vel_vadduw_vvvvl __builtin_ve_vl_vadduw_vvvvl
+#define _vel_vadduw_vsvl __builtin_ve_vl_vadduw_vsvl
+#define _vel_vadduw_vsvvl __builtin_ve_vl_vadduw_vsvvl
+#define _vel_vadduw_vvvmvl __builtin_ve_vl_vadduw_vvvmvl
+#define _vel_vadduw_vsvmvl __builtin_ve_vl_vadduw_vsvmvl
+#define _vel_pvaddu_vvvl __builtin_ve_vl_pvaddu_vvvl
+#define _vel_pvaddu_vvvvl __builtin_ve_vl_pvaddu_vvvvl
+#define _vel_pvaddu_vsvl __builtin_ve_vl_pvaddu_vsvl
+#define _vel_pvaddu_vsvvl __builtin_ve_vl_pvaddu_vsvvl
+#define _vel_pvaddu_vvvMvl __builtin_ve_vl_pvaddu_vvvMvl
+#define _vel_pvaddu_vsvMvl __builtin_ve_vl_pvaddu_vsvMvl
+#define _vel_vaddswsx_vvvl __builtin_ve_vl_vaddswsx_vvvl
+#define _vel_vaddswsx_vvvvl __builtin_ve_vl_vaddswsx_vvvvl
+#define _vel_vaddswsx_vsvl __builtin_ve_vl_vaddswsx_vsvl
+#define _vel_vaddswsx_vsvvl __builtin_ve_vl_vaddswsx_vsvvl
+#define _vel_vaddswsx_vvvmvl __builtin_ve_vl_vaddswsx_vvvmvl
+#define _vel_vaddswsx_vsvmvl __builtin_ve_vl_vaddswsx_vsvmvl
+#define _vel_vaddswzx_vvvl __builtin_ve_vl_vaddswzx_vvvl
+#define _vel_vaddswzx_vvvvl __builtin_ve_vl_vaddswzx_vvvvl
+#define _vel_vaddswzx_vsvl __builtin_ve_vl_vaddswzx_vsvl
+#define _vel_vaddswzx_vsvvl __builtin_ve_vl_vaddswzx_vsvvl
+#define _vel_vaddswzx_vvvmvl __builtin_ve_vl_vaddswzx_vvvmvl
+#define _vel_vaddswzx_vsvmvl __builtin_ve_vl_vaddswzx_vsvmvl
+#define _vel_pvadds_vvvl __builtin_ve_vl_pvadds_vvvl
+#define _vel_pvadds_vvvvl __builtin_ve_vl_pvadds_vvvvl
+#define _vel_pvadds_vsvl __builtin_ve_vl_pvadds_vsvl
+#define _vel_pvadds_vsvvl __builtin_ve_vl_pvadds_vsvvl
+#define _vel_pvadds_vvvMvl __builtin_ve_vl_pvadds_vvvMvl
+#define _vel_pvadds_vsvMvl __builtin_ve_vl_pvadds_vsvMvl
+#define _vel_vaddsl_vvvl __builtin_ve_vl_vaddsl_vvvl
+#define _vel_vaddsl_vvvvl __builtin_ve_vl_vaddsl_vvvvl
+#define _vel_vaddsl_vsvl __builtin_ve_vl_vaddsl_vsvl
+#define _vel_vaddsl_vsvvl __builtin_ve_vl_vaddsl_vsvvl
+#define _vel_vaddsl_vvvmvl __builtin_ve_vl_vaddsl_vvvmvl
+#define _vel_vaddsl_vsvmvl __builtin_ve_vl_vaddsl_vsvmvl
+#define _vel_vsubul_vvvl __builtin_ve_vl_vsubul_vvvl
+#define _vel_vsubul_vvvvl __builtin_ve_vl_vsubul_vvvvl
+#define _vel_vsubul_vsvl __builtin_ve_vl_vsubul_vsvl
+#define _vel_vsubul_vsvvl __builtin_ve_vl_vsubul_vsvvl
+#define _vel_vsubul_vvvmvl __builtin_ve_vl_vsubul_vvvmvl
+#define _vel_vsubul_vsvmvl __builtin_ve_vl_vsubul_vsvmvl
+#define _vel_vsubuw_vvvl __builtin_ve_vl_vsubuw_vvvl
+#define _vel_vsubuw_vvvvl __builtin_ve_vl_vsubuw_vvvvl
+#define _vel_vsubuw_vsvl __builtin_ve_vl_vsubuw_vsvl
+#define _vel_vsubuw_vsvvl __builtin_ve_vl_vsubuw_vsvvl
+#define _vel_vsubuw_vvvmvl __builtin_ve_vl_vsubuw_vvvmvl
+#define _vel_vsubuw_vsvmvl __builtin_ve_vl_vsubuw_vsvmvl
+#define _vel_pvsubu_vvvl __builtin_ve_vl_pvsubu_vvvl
+#define _vel_pvsubu_vvvvl __builtin_ve_vl_pvsubu_vvvvl
+#define _vel_pvsubu_vsvl __builtin_ve_vl_pvsubu_vsvl
+#define _vel_pvsubu_vsvvl __builtin_ve_vl_pvsubu_vsvvl
+#define _vel_pvsubu_vvvMvl __builtin_ve_vl_pvsubu_vvvMvl
+#define _vel_pvsubu_vsvMvl __builtin_ve_vl_pvsubu_vsvMvl
+#define _vel_vsubswsx_vvvl __builtin_ve_vl_vsubswsx_vvvl
+#define _vel_vsubswsx_vvvvl __builtin_ve_vl_vsubswsx_vvvvl
+#define _vel_vsubswsx_vsvl __builtin_ve_vl_vsubswsx_vsvl
+#define _vel_vsubswsx_vsvvl __builtin_ve_vl_vsubswsx_vsvvl
+#define _vel_vsubswsx_vvvmvl __builtin_ve_vl_vsubswsx_vvvmvl
+#define _vel_vsubswsx_vsvmvl __builtin_ve_vl_vsubswsx_vsvmvl
+#define _vel_vsubswzx_vvvl __builtin_ve_vl_vsubswzx_vvvl
+#define _vel_vsubswzx_vvvvl __builtin_ve_vl_vsubswzx_vvvvl
+#define _vel_vsubswzx_vsvl __builtin_ve_vl_vsubswzx_vsvl
+#define _vel_vsubswzx_vsvvl __builtin_ve_vl_vsubswzx_vsvvl
+#define _vel_vsubswzx_vvvmvl __builtin_ve_vl_vsubswzx_vvvmvl
+#define _vel_vsubswzx_vsvmvl __builtin_ve_vl_vsubswzx_vsvmvl
+#define _vel_pvsubs_vvvl __builtin_ve_vl_pvsubs_vvvl
+#define _vel_pvsubs_vvvvl __builtin_ve_vl_pvsubs_vvvvl
+#define _vel_pvsubs_vsvl __builtin_ve_vl_pvsubs_vsvl
+#define _vel_pvsubs_vsvvl __builtin_ve_vl_pvsubs_vsvvl
+#define _vel_pvsubs_vvvMvl __builtin_ve_vl_pvsubs_vvvMvl
+#define _vel_pvsubs_vsvMvl __builtin_ve_vl_pvsubs_vsvMvl
+#define _vel_vsubsl_vvvl __builtin_ve_vl_vsubsl_vvvl
+#define _vel_vsubsl_vvvvl __builtin_ve_vl_vsubsl_vvvvl
+#define _vel_vsubsl_vsvl __builtin_ve_vl_vsubsl_vsvl
+#define _vel_vsubsl_vsvvl __builtin_ve_vl_vsubsl_vsvvl
+#define _vel_vsubsl_vvvmvl __builtin_ve_vl_vsubsl_vvvmvl
+#define _vel_vsubsl_vsvmvl __builtin_ve_vl_vsubsl_vsvmvl
+#define _vel_vmulul_vvvl __builtin_ve_vl_vmulul_vvvl
+#define _vel_vmulul_vvvvl __builtin_ve_vl_vmulul_vvvvl
+#define _vel_vmulul_vsvl __builtin_ve_vl_vmulul_vsvl
+#define _vel_vmulul_vsvvl __builtin_ve_vl_vmulul_vsvvl
+#define _vel_vmulul_vvvmvl __builtin_ve_vl_vmulul_vvvmvl
+#define _vel_vmulul_vsvmvl __builtin_ve_vl_vmulul_vsvmvl
+#define _vel_vmuluw_vvvl __builtin_ve_vl_vmuluw_vvvl
+#define _vel_vmuluw_vvvvl __builtin_ve_vl_vmuluw_vvvvl
+#define _vel_vmuluw_vsvl __builtin_ve_vl_vmuluw_vsvl
+#define _vel_vmuluw_vsvvl __builtin_ve_vl_vmuluw_vsvvl
+#define _vel_vmuluw_vvvmvl __builtin_ve_vl_vmuluw_vvvmvl
+#define _vel_vmuluw_vsvmvl __builtin_ve_vl_vmuluw_vsvmvl
+#define _vel_vmulswsx_vvvl __builtin_ve_vl_vmulswsx_vvvl
+#define _vel_vmulswsx_vvvvl __builtin_ve_vl_vmulswsx_vvvvl
+#define _vel_vmulswsx_vsvl __builtin_ve_vl_vmulswsx_vsvl
+#define _vel_vmulswsx_vsvvl __builtin_ve_vl_vmulswsx_vsvvl
+#define _vel_vmulswsx_vvvmvl __builtin_ve_vl_vmulswsx_vvvmvl
+#define _vel_vmulswsx_vsvmvl __builtin_ve_vl_vmulswsx_vsvmvl
+#define _vel_vmulswzx_vvvl __builtin_ve_vl_vmulswzx_vvvl
+#define _vel_vmulswzx_vvvvl __builtin_ve_vl_vmulswzx_vvvvl
+#define _vel_vmulswzx_vsvl __builtin_ve_vl_vmulswzx_vsvl
+#define _vel_vmulswzx_vsvvl __builtin_ve_vl_vmulswzx_vsvvl
+#define _vel_vmulswzx_vvvmvl __builtin_ve_vl_vmulswzx_vvvmvl
+#define _vel_vmulswzx_vsvmvl __builtin_ve_vl_vmulswzx_vsvmvl
+#define _vel_vmulsl_vvvl __builtin_ve_vl_vmulsl_vvvl
+#define _vel_vmulsl_vvvvl __builtin_ve_vl_vmulsl_vvvvl
+#define _vel_vmulsl_vsvl __builtin_ve_vl_vmulsl_vsvl
+#define _vel_vmulsl_vsvvl __builtin_ve_vl_vmulsl_vsvvl
+#define _vel_vmulsl_vvvmvl __builtin_ve_vl_vmulsl_vvvmvl
+#define _vel_vmulsl_vsvmvl __builtin_ve_vl_vmulsl_vsvmvl
+#define _vel_vmulslw_vvvl __builtin_ve_vl_vmulslw_vvvl
+#define _vel_vmulslw_vvvvl __builtin_ve_vl_vmulslw_vvvvl
+#define _vel_vmulslw_vsvl __builtin_ve_vl_vmulslw_vsvl
+#define _vel_vmulslw_vsvvl __builtin_ve_vl_vmulslw_vsvvl
+#define _vel_vdivul_vvvl __builtin_ve_vl_vdivul_vvvl
+#define _vel_vdivul_vvvvl __builtin_ve_vl_vdivul_vvvvl
+#define _vel_vdivul_vsvl __builtin_ve_vl_vdivul_vsvl
+#define _vel_vdivul_vsvvl __builtin_ve_vl_vdivul_vsvvl
+#define _vel_vdivul_vvvmvl __builtin_ve_vl_vdivul_vvvmvl
+#define _vel_vdivul_vsvmvl __builtin_ve_vl_vdivul_vsvmvl
+#define _vel_vdivuw_vvvl __builtin_ve_vl_vdivuw_vvvl
+#define _vel_vdivuw_vvvvl __builtin_ve_vl_vdivuw_vvvvl
+#define _vel_vdivuw_vsvl __builtin_ve_vl_vdivuw_vsvl
+#define _vel_vdivuw_vsvvl __builtin_ve_vl_vdivuw_vsvvl
+#define _vel_vdivuw_vvvmvl __builtin_ve_vl_vdivuw_vvvmvl
+#define _vel_vdivuw_vsvmvl __builtin_ve_vl_vdivuw_vsvmvl
+#define _vel_vdivul_vvsl __builtin_ve_vl_vdivul_vvsl
+#define _vel_vdivul_vvsvl __builtin_ve_vl_vdivul_vvsvl
+#define _vel_vdivul_vvsmvl __builtin_ve_vl_vdivul_vvsmvl
+#define _vel_vdivuw_vvsl __builtin_ve_vl_vdivuw_vvsl
+#define _vel_vdivuw_vvsvl __builtin_ve_vl_vdivuw_vvsvl
+#define _vel_vdivuw_vvsmvl __builtin_ve_vl_vdivuw_vvsmvl
+#define _vel_vdivswsx_vvvl __builtin_ve_vl_vdivswsx_vvvl
+#define _vel_vdivswsx_vvvvl __builtin_ve_vl_vdivswsx_vvvvl
+#define _vel_vdivswsx_vsvl __builtin_ve_vl_vdivswsx_vsvl
+#define _vel_vdivswsx_vsvvl __builtin_ve_vl_vdivswsx_vsvvl
+#define _vel_vdivswsx_vvvmvl __builtin_ve_vl_vdivswsx_vvvmvl
+#define _vel_vdivswsx_vsvmvl __builtin_ve_vl_vdivswsx_vsvmvl
+#define _vel_vdivswzx_vvvl __builtin_ve_vl_vdivswzx_vvvl
+#define _vel_vdivswzx_vvvvl __builtin_ve_vl_vdivswzx_vvvvl
+#define _vel_vdivswzx_vsvl __builtin_ve_vl_vdivswzx_vsvl
+#define _vel_vdivswzx_vsvvl __builtin_ve_vl_vdivswzx_vsvvl
+#define _vel_vdivswzx_vvvmvl __builtin_ve_vl_vdivswzx_vvvmvl
+#define _vel_vdivswzx_vsvmvl __builtin_ve_vl_vdivswzx_vsvmvl
+#define _vel_vdivswsx_vvsl __builtin_ve_vl_vdivswsx_vvsl
+#define _vel_vdivswsx_vvsvl __builtin_ve_vl_vdivswsx_vvsvl
+#define _vel_vdivswsx_vvsmvl __builtin_ve_vl_vdivswsx_vvsmvl
+#define _vel_vdivswzx_vvsl __builtin_ve_vl_vdivswzx_vvsl
+#define _vel_vdivswzx_vvsvl __builtin_ve_vl_vdivswzx_vvsvl
+#define _vel_vdivswzx_vvsmvl __builtin_ve_vl_vdivswzx_vvsmvl
+#define _vel_vdivsl_vvvl __builtin_ve_vl_vdivsl_vvvl
+#define _vel_vdivsl_vvvvl __builtin_ve_vl_vdivsl_vvvvl
+#define _vel_vdivsl_vsvl __builtin_ve_vl_vdivsl_vsvl
+#define _vel_vdivsl_vsvvl __builtin_ve_vl_vdivsl_vsvvl
+#define _vel_vdivsl_vvvmvl __builtin_ve_vl_vdivsl_vvvmvl
+#define _vel_vdivsl_vsvmvl __builtin_ve_vl_vdivsl_vsvmvl
+#define _vel_vdivsl_vvsl __builtin_ve_vl_vdivsl_vvsl
+#define _vel_vdivsl_vvsvl __builtin_ve_vl_vdivsl_vvsvl
+#define _vel_vdivsl_vvsmvl __builtin_ve_vl_vdivsl_vvsmvl
+#define _vel_vcmpul_vvvl __builtin_ve_vl_vcmpul_vvvl
+#define _vel_vcmpul_vvvvl __builtin_ve_vl_vcmpul_vvvvl
+#define _vel_vcmpul_vsvl __builtin_ve_vl_vcmpul_vsvl
+#define _vel_vcmpul_vsvvl __builtin_ve_vl_vcmpul_vsvvl
+#define _vel_vcmpul_vvvmvl __builtin_ve_vl_vcmpul_vvvmvl
+#define _vel_vcmpul_vsvmvl __builtin_ve_vl_vcmpul_vsvmvl
+#define _vel_vcmpuw_vvvl __builtin_ve_vl_vcmpuw_vvvl
+#define _vel_vcmpuw_vvvvl __builtin_ve_vl_vcmpuw_vvvvl
+#define _vel_vcmpuw_vsvl __builtin_ve_vl_vcmpuw_vsvl
+#define _vel_vcmpuw_vsvvl __builtin_ve_vl_vcmpuw_vsvvl
+#define _vel_vcmpuw_vvvmvl __builtin_ve_vl_vcmpuw_vvvmvl
+#define _vel_vcmpuw_vsvmvl __builtin_ve_vl_vcmpuw_vsvmvl
+#define _vel_pvcmpu_vvvl __builtin_ve_vl_pvcmpu_vvvl
+#define _vel_pvcmpu_vvvvl __builtin_ve_vl_pvcmpu_vvvvl
+#define _vel_pvcmpu_vsvl __builtin_ve_vl_pvcmpu_vsvl
+#define _vel_pvcmpu_vsvvl __builtin_ve_vl_pvcmpu_vsvvl
+#define _vel_pvcmpu_vvvMvl __builtin_ve_vl_pvcmpu_vvvMvl
+#define _vel_pvcmpu_vsvMvl __builtin_ve_vl_pvcmpu_vsvMvl
+#define _vel_vcmpswsx_vvvl __builtin_ve_vl_vcmpswsx_vvvl
+#define _vel_vcmpswsx_vvvvl __builtin_ve_vl_vcmpswsx_vvvvl
+#define _vel_vcmpswsx_vsvl __builtin_ve_vl_vcmpswsx_vsvl
+#define _vel_vcmpswsx_vsvvl __builtin_ve_vl_vcmpswsx_vsvvl
+#define _vel_vcmpswsx_vvvmvl __builtin_ve_vl_vcmpswsx_vvvmvl
+#define _vel_vcmpswsx_vsvmvl __builtin_ve_vl_vcmpswsx_vsvmvl
+#define _vel_vcmpswzx_vvvl __builtin_ve_vl_vcmpswzx_vvvl
+#define _vel_vcmpswzx_vvvvl __builtin_ve_vl_vcmpswzx_vvvvl
+#define _vel_vcmpswzx_vsvl __builtin_ve_vl_vcmpswzx_vsvl
+#define _vel_vcmpswzx_vsvvl __builtin_ve_vl_vcmpswzx_vsvvl
+#define _vel_vcmpswzx_vvvmvl __builtin_ve_vl_vcmpswzx_vvvmvl
+#define _vel_vcmpswzx_vsvmvl __builtin_ve_vl_vcmpswzx_vsvmvl
+#define _vel_pvcmps_vvvl __builtin_ve_vl_pvcmps_vvvl
+#define _vel_pvcmps_vvvvl __builtin_ve_vl_pvcmps_vvvvl
+#define _vel_pvcmps_vsvl __builtin_ve_vl_pvcmps_vsvl
+#define _vel_pvcmps_vsvvl __builtin_ve_vl_pvcmps_vsvvl
+#define _vel_pvcmps_vvvMvl __builtin_ve_vl_pvcmps_vvvMvl
+#define _vel_pvcmps_vsvMvl __builtin_ve_vl_pvcmps_vsvMvl
+#define _vel_vcmpsl_vvvl __builtin_ve_vl_vcmpsl_vvvl
+#define _vel_vcmpsl_vvvvl __builtin_ve_vl_vcmpsl_vvvvl
+#define _vel_vcmpsl_vsvl __builtin_ve_vl_vcmpsl_vsvl
+#define _vel_vcmpsl_vsvvl __builtin_ve_vl_vcmpsl_vsvvl
+#define _vel_vcmpsl_vvvmvl __builtin_ve_vl_vcmpsl_vvvmvl
+#define _vel_vcmpsl_vsvmvl __builtin_ve_vl_vcmpsl_vsvmvl
+#define _vel_vmaxswsx_vvvl __builtin_ve_vl_vmaxswsx_vvvl
+#define _vel_vmaxswsx_vvvvl __builtin_ve_vl_vmaxswsx_vvvvl
+#define _vel_vmaxswsx_vsvl __builtin_ve_vl_vmaxswsx_vsvl
+#define _vel_vmaxswsx_vsvvl __builtin_ve_vl_vmaxswsx_vsvvl
+#define _vel_vmaxswsx_vvvmvl __builtin_ve_vl_vmaxswsx_vvvmvl
+#define _vel_vmaxswsx_vsvmvl __builtin_ve_vl_vmaxswsx_vsvmvl
+#define _vel_vmaxswzx_vvvl __builtin_ve_vl_vmaxswzx_vvvl
+#define _vel_vmaxswzx_vvvvl __builtin_ve_vl_vmaxswzx_vvvvl
+#define _vel_vmaxswzx_vsvl __builtin_ve_vl_vmaxswzx_vsvl
+#define _vel_vmaxswzx_vsvvl __builtin_ve_vl_vmaxswzx_vsvvl
+#define _vel_vmaxswzx_vvvmvl __builtin_ve_vl_vmaxswzx_vvvmvl
+#define _vel_vmaxswzx_vsvmvl __builtin_ve_vl_vmaxswzx_vsvmvl
+#define _vel_pvmaxs_vvvl __builtin_ve_vl_pvmaxs_vvvl
+#define _vel_pvmaxs_vvvvl __builtin_ve_vl_pvmaxs_vvvvl
+#define _vel_pvmaxs_vsvl __builtin_ve_vl_pvmaxs_vsvl
+#define _vel_pvmaxs_vsvvl __builtin_ve_vl_pvmaxs_vsvvl
+#define _vel_pvmaxs_vvvMvl __builtin_ve_vl_pvmaxs_vvvMvl
+#define _vel_pvmaxs_vsvMvl __builtin_ve_vl_pvmaxs_vsvMvl
+#define _vel_vminswsx_vvvl __builtin_ve_vl_vminswsx_vvvl
+#define _vel_vminswsx_vvvvl __builtin_ve_vl_vminswsx_vvvvl
+#define _vel_vminswsx_vsvl __builtin_ve_vl_vminswsx_vsvl
+#define _vel_vminswsx_vsvvl __builtin_ve_vl_vminswsx_vsvvl
+#define _vel_vminswsx_vvvmvl __builtin_ve_vl_vminswsx_vvvmvl
+#define _vel_vminswsx_vsvmvl __builtin_ve_vl_vminswsx_vsvmvl
+#define _vel_vminswzx_vvvl __builtin_ve_vl_vminswzx_vvvl
+#define _vel_vminswzx_vvvvl __builtin_ve_vl_vminswzx_vvvvl
+#define _vel_vminswzx_vsvl __builtin_ve_vl_vminswzx_vsvl
+#define _vel_vminswzx_vsvvl __builtin_ve_vl_vminswzx_vsvvl
+#define _vel_vminswzx_vvvmvl __builtin_ve_vl_vminswzx_vvvmvl
+#define _vel_vminswzx_vsvmvl __builtin_ve_vl_vminswzx_vsvmvl
+#define _vel_pvmins_vvvl __builtin_ve_vl_pvmins_vvvl
+#define _vel_pvmins_vvvvl __builtin_ve_vl_pvmins_vvvvl
+#define _vel_pvmins_vsvl __builtin_ve_vl_pvmins_vsvl
+#define _vel_pvmins_vsvvl __builtin_ve_vl_pvmins_vsvvl
+#define _vel_pvmins_vvvMvl __builtin_ve_vl_pvmins_vvvMvl
+#define _vel_pvmins_vsvMvl __builtin_ve_vl_pvmins_vsvMvl
+#define _vel_vmaxsl_vvvl __builtin_ve_vl_vmaxsl_vvvl
+#define _vel_vmaxsl_vvvvl __builtin_ve_vl_vmaxsl_vvvvl
+#define _vel_vmaxsl_vsvl __builtin_ve_vl_vmaxsl_vsvl
+#define _vel_vmaxsl_vsvvl __builtin_ve_vl_vmaxsl_vsvvl
+#define _vel_vmaxsl_vvvmvl __builtin_ve_vl_vmaxsl_vvvmvl
+#define _vel_vmaxsl_vsvmvl __builtin_ve_vl_vmaxsl_vsvmvl
+#define _vel_vminsl_vvvl __builtin_ve_vl_vminsl_vvvl
+#define _vel_vminsl_vvvvl __builtin_ve_vl_vminsl_vvvvl
+#define _vel_vminsl_vsvl __builtin_ve_vl_vminsl_vsvl
+#define _vel_vminsl_vsvvl __builtin_ve_vl_vminsl_vsvvl
+#define _vel_vminsl_vvvmvl __builtin_ve_vl_vminsl_vvvmvl
+#define _vel_vminsl_vsvmvl __builtin_ve_vl_vminsl_vsvmvl
+#define _vel_vand_vvvl __builtin_ve_vl_vand_vvvl
+#define _vel_vand_vvvvl __builtin_ve_vl_vand_vvvvl
+#define _vel_vand_vsvl __builtin_ve_vl_vand_vsvl
+#define _vel_vand_vsvvl __builtin_ve_vl_vand_vsvvl
+#define _vel_vand_vvvmvl __builtin_ve_vl_vand_vvvmvl
+#define _vel_vand_vsvmvl __builtin_ve_vl_vand_vsvmvl
+#define _vel_pvand_vvvl __builtin_ve_vl_pvand_vvvl
+#define _vel_pvand_vvvvl __builtin_ve_vl_pvand_vvvvl
+#define _vel_pvand_vsvl __builtin_ve_vl_pvand_vsvl
+#define _vel_pvand_vsvvl __builtin_ve_vl_pvand_vsvvl
+#define _vel_pvand_vvvMvl __builtin_ve_vl_pvand_vvvMvl
+#define _vel_pvand_vsvMvl __builtin_ve_vl_pvand_vsvMvl
+#define _vel_vor_vvvl __builtin_ve_vl_vor_vvvl
+#define _vel_vor_vvvvl __builtin_ve_vl_vor_vvvvl
+#define _vel_vor_vsvl __builtin_ve_vl_vor_vsvl
+#define _vel_vor_vsvvl __builtin_ve_vl_vor_vsvvl
+#define _vel_vor_vvvmvl __builtin_ve_vl_vor_vvvmvl
+#define _vel_vor_vsvmvl __builtin_ve_vl_vor_vsvmvl
+#define _vel_pvor_vvvl __builtin_ve_vl_pvor_vvvl
+#define _vel_pvor_vvvvl __builtin_ve_vl_pvor_vvvvl
+#define _vel_pvor_vsvl __builtin_ve_vl_pvor_vsvl
+#define _vel_pvor_vsvvl __builtin_ve_vl_pvor_vsvvl
+#define _vel_pvor_vvvMvl __builtin_ve_vl_pvor_vvvMvl
+#define _vel_pvor_vsvMvl __builtin_ve_vl_pvor_vsvMvl
+#define _vel_vxor_vvvl __builtin_ve_vl_vxor_vvvl
+#define _vel_vxor_vvvvl __builtin_ve_vl_vxor_vvvvl
+#define _vel_vxor_vsvl __builtin_ve_vl_vxor_vsvl
+#define _vel_vxor_vsvvl __builtin_ve_vl_vxor_vsvvl
+#define _vel_vxor_vvvmvl __builtin_ve_vl_vxor_vvvmvl
+#define _vel_vxor_vsvmvl __builtin_ve_vl_vxor_vsvmvl
+#define _vel_pvxor_vvvl __builtin_ve_vl_pvxor_vvvl
+#define _vel_pvxor_vvvvl __builtin_ve_vl_pvxor_vvvvl
+#define _vel_pvxor_vsvl __builtin_ve_vl_pvxor_vsvl
+#define _vel_pvxor_vsvvl __builtin_ve_vl_pvxor_vsvvl
+#define _vel_pvxor_vvvMvl __builtin_ve_vl_pvxor_vvvMvl
+#define _vel_pvxor_vsvMvl __builtin_ve_vl_pvxor_vsvMvl
+#define _vel_veqv_vvvl __builtin_ve_vl_veqv_vvvl
+#define _vel_veqv_vvvvl __builtin_ve_vl_veqv_vvvvl
+#define _vel_veqv_vsvl __builtin_ve_vl_veqv_vsvl
+#define _vel_veqv_vsvvl __builtin_ve_vl_veqv_vsvvl
+#define _vel_veqv_vvvmvl __builtin_ve_vl_veqv_vvvmvl
+#define _vel_veqv_vsvmvl __builtin_ve_vl_veqv_vsvmvl
+#define _vel_pveqv_vvvl __builtin_ve_vl_pveqv_vvvl
+#define _vel_pveqv_vvvvl __builtin_ve_vl_pveqv_vvvvl
+#define _vel_pveqv_vsvl __builtin_ve_vl_pveqv_vsvl
+#define _vel_pveqv_vsvvl __builtin_ve_vl_pveqv_vsvvl
+#define _vel_pveqv_vvvMvl __builtin_ve_vl_pveqv_vvvMvl
+#define _vel_pveqv_vsvMvl __builtin_ve_vl_pveqv_vsvMvl
+#define _vel_vldz_vvl __builtin_ve_vl_vldz_vvl
+#define _vel_vldz_vvvl __builtin_ve_vl_vldz_vvvl
+#define _vel_vldz_vvmvl __builtin_ve_vl_vldz_vvmvl
+#define _vel_pvldzlo_vvl __builtin_ve_vl_pvldzlo_vvl
+#define _vel_pvldzlo_vvvl __builtin_ve_vl_pvldzlo_vvvl
+#define _vel_pvldzlo_vvmvl __builtin_ve_vl_pvldzlo_vvmvl
+#define _vel_pvldzup_vvl __builtin_ve_vl_pvldzup_vvl
+#define _vel_pvldzup_vvvl __builtin_ve_vl_pvldzup_vvvl
+#define _vel_pvldzup_vvmvl __builtin_ve_vl_pvldzup_vvmvl
+#define _vel_pvldz_vvl __builtin_ve_vl_pvldz_vvl
+#define _vel_pvldz_vvvl __builtin_ve_vl_pvldz_vvvl
+#define _vel_pvldz_vvMvl __builtin_ve_vl_pvldz_vvMvl
+#define _vel_vpcnt_vvl __builtin_ve_vl_vpcnt_vvl
+#define _vel_vpcnt_vvvl __builtin_ve_vl_vpcnt_vvvl
+#define _vel_vpcnt_vvmvl __builtin_ve_vl_vpcnt_vvmvl
+#define _vel_pvpcntlo_vvl __builtin_ve_vl_pvpcntlo_vvl
+#define _vel_pvpcntlo_vvvl __builtin_ve_vl_pvpcntlo_vvvl
+#define _vel_pvpcntlo_vvmvl __builtin_ve_vl_pvpcntlo_vvmvl
+#define _vel_pvpcntup_vvl __builtin_ve_vl_pvpcntup_vvl
+#define _vel_pvpcntup_vvvl __builtin_ve_vl_pvpcntup_vvvl
+#define _vel_pvpcntup_vvmvl __builtin_ve_vl_pvpcntup_vvmvl
+#define _vel_pvpcnt_vvl __builtin_ve_vl_pvpcnt_vvl
+#define _vel_pvpcnt_vvvl __builtin_ve_vl_pvpcnt_vvvl
+#define _vel_pvpcnt_vvMvl __builtin_ve_vl_pvpcnt_vvMvl
+#define _vel_vbrv_vvl __builtin_ve_vl_vbrv_vvl
+#define _vel_vbrv_vvvl __builtin_ve_vl_vbrv_vvvl
+#define _vel_vbrv_vvmvl __builtin_ve_vl_vbrv_vvmvl
+#define _vel_pvbrvlo_vvl __builtin_ve_vl_pvbrvlo_vvl
+#define _vel_pvbrvlo_vvvl __builtin_ve_vl_pvbrvlo_vvvl
+#define _vel_pvbrvlo_vvmvl __builtin_ve_vl_pvbrvlo_vvmvl
+#define _vel_pvbrvup_vvl __builtin_ve_vl_pvbrvup_vvl
+#define _vel_pvbrvup_vvvl __builtin_ve_vl_pvbrvup_vvvl
+#define _vel_pvbrvup_vvmvl __builtin_ve_vl_pvbrvup_vvmvl
+#define _vel_pvbrv_vvl __builtin_ve_vl_pvbrv_vvl
+#define _vel_pvbrv_vvvl __builtin_ve_vl_pvbrv_vvvl
+#define _vel_pvbrv_vvMvl __builtin_ve_vl_pvbrv_vvMvl
+#define _vel_vseq_vl __builtin_ve_vl_vseq_vl
+#define _vel_vseq_vvl __builtin_ve_vl_vseq_vvl
+#define _vel_pvseqlo_vl __builtin_ve_vl_pvseqlo_vl
+#define _vel_pvseqlo_vvl __builtin_ve_vl_pvseqlo_vvl
+#define _vel_pvsequp_vl __builtin_ve_vl_pvsequp_vl
+#define _vel_pvsequp_vvl __builtin_ve_vl_pvsequp_vvl
+#define _vel_pvseq_vl __builtin_ve_vl_pvseq_vl
+#define _vel_pvseq_vvl __builtin_ve_vl_pvseq_vvl
+#define _vel_vsll_vvvl __builtin_ve_vl_vsll_vvvl
+#define _vel_vsll_vvvvl __builtin_ve_vl_vsll_vvvvl
+#define _vel_vsll_vvsl __builtin_ve_vl_vsll_vvsl
+#define _vel_vsll_vvsvl __builtin_ve_vl_vsll_vvsvl
+#define _vel_vsll_vvvmvl __builtin_ve_vl_vsll_vvvmvl
+#define _vel_vsll_vvsmvl __builtin_ve_vl_vsll_vvsmvl
+#define _vel_pvsll_vvvl __builtin_ve_vl_pvsll_vvvl
+#define _vel_pvsll_vvvvl __builtin_ve_vl_pvsll_vvvvl
+#define _vel_pvsll_vvsl __builtin_ve_vl_pvsll_vvsl
+#define _vel_pvsll_vvsvl __builtin_ve_vl_pvsll_vvsvl
+#define _vel_pvsll_vvvMvl __builtin_ve_vl_pvsll_vvvMvl
+#define _vel_pvsll_vvsMvl __builtin_ve_vl_pvsll_vvsMvl
+#define _vel_vsrl_vvvl __builtin_ve_vl_vsrl_vvvl
+#define _vel_vsrl_vvvvl __builtin_ve_vl_vsrl_vvvvl
+#define _vel_vsrl_vvsl __builtin_ve_vl_vsrl_vvsl
+#define _vel_vsrl_vvsvl __builtin_ve_vl_vsrl_vvsvl
+#define _vel_vsrl_vvvmvl __builtin_ve_vl_vsrl_vvvmvl
+#define _vel_vsrl_vvsmvl __builtin_ve_vl_vsrl_vvsmvl
+#define _vel_pvsrl_vvvl __builtin_ve_vl_pvsrl_vvvl
+#define _vel_pvsrl_vvvvl __builtin_ve_vl_pvsrl_vvvvl
+#define _vel_pvsrl_vvsl __builtin_ve_vl_pvsrl_vvsl
+#define _vel_pvsrl_vvsvl __builtin_ve_vl_pvsrl_vvsvl
+#define _vel_pvsrl_vvvMvl __builtin_ve_vl_pvsrl_vvvMvl
+#define _vel_pvsrl_vvsMvl __builtin_ve_vl_pvsrl_vvsMvl
+#define _vel_vslawsx_vvvl __builtin_ve_vl_vslawsx_vvvl
+#define _vel_vslawsx_vvvvl __builtin_ve_vl_vslawsx_vvvvl
+#define _vel_vslawsx_vvsl __builtin_ve_vl_vslawsx_vvsl
+#define _vel_vslawsx_vvsvl __builtin_ve_vl_vslawsx_vvsvl
+#define _vel_vslawsx_vvvmvl __builtin_ve_vl_vslawsx_vvvmvl
+#define _vel_vslawsx_vvsmvl __builtin_ve_vl_vslawsx_vvsmvl
+#define _vel_vslawzx_vvvl __builtin_ve_vl_vslawzx_vvvl
+#define _vel_vslawzx_vvvvl __builtin_ve_vl_vslawzx_vvvvl
+#define _vel_vslawzx_vvsl __builtin_ve_vl_vslawzx_vvsl
+#define _vel_vslawzx_vvsvl __builtin_ve_vl_vslawzx_vvsvl
+#define _vel_vslawzx_vvvmvl __builtin_ve_vl_vslawzx_vvvmvl
+#define _vel_vslawzx_vvsmvl __builtin_ve_vl_vslawzx_vvsmvl
+#define _vel_pvsla_vvvl __builtin_ve_vl_pvsla_vvvl
+#define _vel_pvsla_vvvvl __builtin_ve_vl_pvsla_vvvvl
+#define _vel_pvsla_vvsl __builtin_ve_vl_pvsla_vvsl
+#define _vel_pvsla_vvsvl __builtin_ve_vl_pvsla_vvsvl
+#define _vel_pvsla_vvvMvl __builtin_ve_vl_pvsla_vvvMvl
+#define _vel_pvsla_vvsMvl __builtin_ve_vl_pvsla_vvsMvl
+#define _vel_vslal_vvvl __builtin_ve_vl_vslal_vvvl
+#define _vel_vslal_vvvvl __builtin_ve_vl_vslal_vvvvl
+#define _vel_vslal_vvsl __builtin_ve_vl_vslal_vvsl
+#define _vel_vslal_vvsvl __builtin_ve_vl_vslal_vvsvl
+#define _vel_vslal_vvvmvl __builtin_ve_vl_vslal_vvvmvl
+#define _vel_vslal_vvsmvl __builtin_ve_vl_vslal_vvsmvl
+#define _vel_vsrawsx_vvvl __builtin_ve_vl_vsrawsx_vvvl
+#define _vel_vsrawsx_vvvvl __builtin_ve_vl_vsrawsx_vvvvl
+#define _vel_vsrawsx_vvsl __builtin_ve_vl_vsrawsx_vvsl
+#define _vel_vsrawsx_vvsvl __builtin_ve_vl_vsrawsx_vvsvl
+#define _vel_vsrawsx_vvvmvl __builtin_ve_vl_vsrawsx_vvvmvl
+#define _vel_vsrawsx_vvsmvl __builtin_ve_vl_vsrawsx_vvsmvl
+#define _vel_vsrawzx_vvvl __builtin_ve_vl_vsrawzx_vvvl
+#define _vel_vsrawzx_vvvvl __builtin_ve_vl_vsrawzx_vvvvl
+#define _vel_vsrawzx_vvsl __builtin_ve_vl_vsrawzx_vvsl
+#define _vel_vsrawzx_vvsvl __builtin_ve_vl_vsrawzx_vvsvl
+#define _vel_vsrawzx_vvvmvl __builtin_ve_vl_vsrawzx_vvvmvl
+#define _vel_vsrawzx_vvsmvl __builtin_ve_vl_vsrawzx_vvsmvl
+#define _vel_pvsra_vvvl __builtin_ve_vl_pvsra_vvvl
+#define _vel_pvsra_vvvvl __builtin_ve_vl_pvsra_vvvvl
+#define _vel_pvsra_vvsl __builtin_ve_vl_pvsra_vvsl
+#define _vel_pvsra_vvsvl __builtin_ve_vl_pvsra_vvsvl
+#define _vel_pvsra_vvvMvl __builtin_ve_vl_pvsra_vvvMvl
+#define _vel_pvsra_vvsMvl __builtin_ve_vl_pvsra_vvsMvl
+#define _vel_vsral_vvvl __builtin_ve_vl_vsral_vvvl
+#define _vel_vsral_vvvvl __builtin_ve_vl_vsral_vvvvl
+#define _vel_vsral_vvsl __builtin_ve_vl_vsral_vvsl
+#define _vel_vsral_vvsvl __builtin_ve_vl_vsral_vvsvl
+#define _vel_vsral_vvvmvl __builtin_ve_vl_vsral_vvvmvl
+#define _vel_vsral_vvsmvl __builtin_ve_vl_vsral_vvsmvl
+#define _vel_vsfa_vvssl __builtin_ve_vl_vsfa_vvssl
+#define _vel_vsfa_vvssvl __builtin_ve_vl_vsfa_vvssvl
+#define _vel_vsfa_vvssmvl __builtin_ve_vl_vsfa_vvssmvl
+#define _vel_vfaddd_vvvl __builtin_ve_vl_vfaddd_vvvl
+#define _vel_vfaddd_vvvvl __builtin_ve_vl_vfaddd_vvvvl
+#define _vel_vfaddd_vsvl __builtin_ve_vl_vfaddd_vsvl
+#define _vel_vfaddd_vsvvl __builtin_ve_vl_vfaddd_vsvvl
+#define _vel_vfaddd_vvvmvl __builtin_ve_vl_vfaddd_vvvmvl
+#define _vel_vfaddd_vsvmvl __builtin_ve_vl_vfaddd_vsvmvl
+#define _vel_vfadds_vvvl __builtin_ve_vl_vfadds_vvvl
+#define _vel_vfadds_vvvvl __builtin_ve_vl_vfadds_vvvvl
+#define _vel_vfadds_vsvl __builtin_ve_vl_vfadds_vsvl
+#define _vel_vfadds_vsvvl __builtin_ve_vl_vfadds_vsvvl
+#define _vel_vfadds_vvvmvl __builtin_ve_vl_vfadds_vvvmvl
+#define _vel_vfadds_vsvmvl __builtin_ve_vl_vfadds_vsvmvl
+#define _vel_pvfadd_vvvl __builtin_ve_vl_pvfadd_vvvl
+#define _vel_pvfadd_vvvvl __builtin_ve_vl_pvfadd_vvvvl
+#define _vel_pvfadd_vsvl __builtin_ve_vl_pvfadd_vsvl
+#define _vel_pvfadd_vsvvl __builtin_ve_vl_pvfadd_vsvvl
+#define _vel_pvfadd_vvvMvl __builtin_ve_vl_pvfadd_vvvMvl
+#define _vel_pvfadd_vsvMvl __builtin_ve_vl_pvfadd_vsvMvl
+#define _vel_vfsubd_vvvl __builtin_ve_vl_vfsubd_vvvl
+#define _vel_vfsubd_vvvvl __builtin_ve_vl_vfsubd_vvvvl
+#define _vel_vfsubd_vsvl __builtin_ve_vl_vfsubd_vsvl
+#define _vel_vfsubd_vsvvl __builtin_ve_vl_vfsubd_vsvvl
+#define _vel_vfsubd_vvvmvl __builtin_ve_vl_vfsubd_vvvmvl
+#define _vel_vfsubd_vsvmvl __builtin_ve_vl_vfsubd_vsvmvl
+#define _vel_vfsubs_vvvl __builtin_ve_vl_vfsubs_vvvl
+#define _vel_vfsubs_vvvvl __builtin_ve_vl_vfsubs_vvvvl
+#define _vel_vfsubs_vsvl __builtin_ve_vl_vfsubs_vsvl
+#define _vel_vfsubs_vsvvl __builtin_ve_vl_vfsubs_vsvvl
+#define _vel_vfsubs_vvvmvl __builtin_ve_vl_vfsubs_vvvmvl
+#define _vel_vfsubs_vsvmvl __builtin_ve_vl_vfsubs_vsvmvl
+#define _vel_pvfsub_vvvl __builtin_ve_vl_pvfsub_vvvl
+#define _vel_pvfsub_vvvvl __builtin_ve_vl_pvfsub_vvvvl
+#define _vel_pvfsub_vsvl __builtin_ve_vl_pvfsub_vsvl
+#define _vel_pvfsub_vsvvl __builtin_ve_vl_pvfsub_vsvvl
+#define _vel_pvfsub_vvvMvl __builtin_ve_vl_pvfsub_vvvMvl
+#define _vel_pvfsub_vsvMvl __builtin_ve_vl_pvfsub_vsvMvl
+#define _vel_vfmuld_vvvl __builtin_ve_vl_vfmuld_vvvl
+#define _vel_vfmuld_vvvvl __builtin_ve_vl_vfmuld_vvvvl
+#define _vel_vfmuld_vsvl __builtin_ve_vl_vfmuld_vsvl
+#define _vel_vfmuld_vsvvl __builtin_ve_vl_vfmuld_vsvvl
+#define _vel_vfmuld_vvvmvl __builtin_ve_vl_vfmuld_vvvmvl
+#define _vel_vfmuld_vsvmvl __builtin_ve_vl_vfmuld_vsvmvl
+#define _vel_vfmuls_vvvl __builtin_ve_vl_vfmuls_vvvl
+#define _vel_vfmuls_vvvvl __builtin_ve_vl_vfmuls_vvvvl
+#define _vel_vfmuls_vsvl __builtin_ve_vl_vfmuls_vsvl
+#define _vel_vfmuls_vsvvl __builtin_ve_vl_vfmuls_vsvvl
+#define _vel_vfmuls_vvvmvl __builtin_ve_vl_vfmuls_vvvmvl
+#define _vel_vfmuls_vsvmvl __builtin_ve_vl_vfmuls_vsvmvl
+#define _vel_pvfmul_vvvl __builtin_ve_vl_pvfmul_vvvl
+#define _vel_pvfmul_vvvvl __builtin_ve_vl_pvfmul_vvvvl
+#define _vel_pvfmul_vsvl __builtin_ve_vl_pvfmul_vsvl
+#define _vel_pvfmul_vsvvl __builtin_ve_vl_pvfmul_vsvvl
+#define _vel_pvfmul_vvvMvl __builtin_ve_vl_pvfmul_vvvMvl
+#define _vel_pvfmul_vsvMvl __builtin_ve_vl_pvfmul_vsvMvl
+#define _vel_vfdivd_vvvl __builtin_ve_vl_vfdivd_vvvl
+#define _vel_vfdivd_vvvvl __builtin_ve_vl_vfdivd_vvvvl
+#define _vel_vfdivd_vsvl __builtin_ve_vl_vfdivd_vsvl
+#define _vel_vfdivd_vsvvl __builtin_ve_vl_vfdivd_vsvvl
+#define _vel_vfdivd_vvvmvl __builtin_ve_vl_vfdivd_vvvmvl
+#define _vel_vfdivd_vsvmvl __builtin_ve_vl_vfdivd_vsvmvl
+#define _vel_vfdivs_vvvl __builtin_ve_vl_vfdivs_vvvl
+#define _vel_vfdivs_vvvvl __builtin_ve_vl_vfdivs_vvvvl
+#define _vel_vfdivs_vsvl __builtin_ve_vl_vfdivs_vsvl
+#define _vel_vfdivs_vsvvl __builtin_ve_vl_vfdivs_vsvvl
+#define _vel_vfdivs_vvvmvl __builtin_ve_vl_vfdivs_vvvmvl
+#define _vel_vfdivs_vsvmvl __builtin_ve_vl_vfdivs_vsvmvl
+#define _vel_vfsqrtd_vvl __builtin_ve_vl_vfsqrtd_vvl
+#define _vel_vfsqrtd_vvvl __builtin_ve_vl_vfsqrtd_vvvl
+#define _vel_vfsqrts_vvl __builtin_ve_vl_vfsqrts_vvl
+#define _vel_vfsqrts_vvvl __builtin_ve_vl_vfsqrts_vvvl
+#define _vel_vfcmpd_vvvl __builtin_ve_vl_vfcmpd_vvvl
+#define _vel_vfcmpd_vvvvl __builtin_ve_vl_vfcmpd_vvvvl
+#define _vel_vfcmpd_vsvl __builtin_ve_vl_vfcmpd_vsvl
+#define _vel_vfcmpd_vsvvl __builtin_ve_vl_vfcmpd_vsvvl
+#define _vel_vfcmpd_vvvmvl __builtin_ve_vl_vfcmpd_vvvmvl
+#define _vel_vfcmpd_vsvmvl __builtin_ve_vl_vfcmpd_vsvmvl
+#define _vel_vfcmps_vvvl __builtin_ve_vl_vfcmps_vvvl
+#define _vel_vfcmps_vvvvl __builtin_ve_vl_vfcmps_vvvvl
+#define _vel_vfcmps_vsvl __builtin_ve_vl_vfcmps_vsvl
+#define _vel_vfcmps_vsvvl __builtin_ve_vl_vfcmps_vsvvl
+#define _vel_vfcmps_vvvmvl __builtin_ve_vl_vfcmps_vvvmvl
+#define _vel_vfcmps_vsvmvl __builtin_ve_vl_vfcmps_vsvmvl
+#define _vel_pvfcmp_vvvl __builtin_ve_vl_pvfcmp_vvvl
+#define _vel_pvfcmp_vvvvl __builtin_ve_vl_pvfcmp_vvvvl
+#define _vel_pvfcmp_vsvl __builtin_ve_vl_pvfcmp_vsvl
+#define _vel_pvfcmp_vsvvl __builtin_ve_vl_pvfcmp_vsvvl
+#define _vel_pvfcmp_vvvMvl __builtin_ve_vl_pvfcmp_vvvMvl
+#define _vel_pvfcmp_vsvMvl __builtin_ve_vl_pvfcmp_vsvMvl
+#define _vel_vfmaxd_vvvl __builtin_ve_vl_vfmaxd_vvvl
+#define _vel_vfmaxd_vvvvl __builtin_ve_vl_vfmaxd_vvvvl
+#define _vel_vfmaxd_vsvl __builtin_ve_vl_vfmaxd_vsvl
+#define _vel_vfmaxd_vsvvl __builtin_ve_vl_vfmaxd_vsvvl
+#define _vel_vfmaxd_vvvmvl __builtin_ve_vl_vfmaxd_vvvmvl
+#define _vel_vfmaxd_vsvmvl __builtin_ve_vl_vfmaxd_vsvmvl
+#define _vel_vfmaxs_vvvl __builtin_ve_vl_vfmaxs_vvvl
+#define _vel_vfmaxs_vvvvl __builtin_ve_vl_vfmaxs_vvvvl
+#define _vel_vfmaxs_vsvl __builtin_ve_vl_vfmaxs_vsvl
+#define _vel_vfmaxs_vsvvl __builtin_ve_vl_vfmaxs_vsvvl
+#define _vel_vfmaxs_vvvmvl __builtin_ve_vl_vfmaxs_vvvmvl
+#define _vel_vfmaxs_vsvmvl __builtin_ve_vl_vfmaxs_vsvmvl
+#define _vel_pvfmax_vvvl __builtin_ve_vl_pvfmax_vvvl
+#define _vel_pvfmax_vvvvl __builtin_ve_vl_pvfmax_vvvvl
+#define _vel_pvfmax_vsvl __builtin_ve_vl_pvfmax_vsvl
+#define _vel_pvfmax_vsvvl __builtin_ve_vl_pvfmax_vsvvl
+#define _vel_pvfmax_vvvMvl __builtin_ve_vl_pvfmax_vvvMvl
+#define _vel_pvfmax_vsvMvl __builtin_ve_vl_pvfmax_vsvMvl
+#define _vel_vfmind_vvvl __builtin_ve_vl_vfmind_vvvl
+#define _vel_vfmind_vvvvl __builtin_ve_vl_vfmind_vvvvl
+#define _vel_vfmind_vsvl __builtin_ve_vl_vfmind_vsvl
+#define _vel_vfmind_vsvvl __builtin_ve_vl_vfmind_vsvvl
+#define _vel_vfmind_vvvmvl __builtin_ve_vl_vfmind_vvvmvl
+#define _vel_vfmind_vsvmvl __builtin_ve_vl_vfmind_vsvmvl
+#define _vel_vfmins_vvvl __builtin_ve_vl_vfmins_vvvl
+#define _vel_vfmins_vvvvl __builtin_ve_vl_vfmins_vvvvl
+#define _vel_vfmins_vsvl __builtin_ve_vl_vfmins_vsvl
+#define _vel_vfmins_vsvvl __builtin_ve_vl_vfmins_vsvvl
+#define _vel_vfmins_vvvmvl __builtin_ve_vl_vfmins_vvvmvl
+#define _vel_vfmins_vsvmvl __builtin_ve_vl_vfmins_vsvmvl
+#define _vel_pvfmin_vvvl __builtin_ve_vl_pvfmin_vvvl
+#define _vel_pvfmin_vvvvl __builtin_ve_vl_pvfmin_vvvvl
+#define _vel_pvfmin_vsvl __builtin_ve_vl_pvfmin_vsvl
+#define _vel_pvfmin_vsvvl __builtin_ve_vl_pvfmin_vsvvl
+#define _vel_pvfmin_vvvMvl __builtin_ve_vl_pvfmin_vvvMvl
+#define _vel_pvfmin_vsvMvl __builtin_ve_vl_pvfmin_vsvMvl
+#define _vel_vfmadd_vvvvl __builtin_ve_vl_vfmadd_vvvvl
+#define _vel_vfmadd_vvvvvl __builtin_ve_vl_vfmadd_vvvvvl
+#define _vel_vfmadd_vsvvl __builtin_ve_vl_vfmadd_vsvvl
+#define _vel_vfmadd_vsvvvl __builtin_ve_vl_vfmadd_vsvvvl
+#define _vel_vfmadd_vvsvl __builtin_ve_vl_vfmadd_vvsvl
+#define _vel_vfmadd_vvsvvl __builtin_ve_vl_vfmadd_vvsvvl
+#define _vel_vfmadd_vvvvmvl __builtin_ve_vl_vfmadd_vvvvmvl
+#define _vel_vfmadd_vsvvmvl __builtin_ve_vl_vfmadd_vsvvmvl
+#define _vel_vfmadd_vvsvmvl __builtin_ve_vl_vfmadd_vvsvmvl
+#define _vel_vfmads_vvvvl __builtin_ve_vl_vfmads_vvvvl
+#define _vel_vfmads_vvvvvl __builtin_ve_vl_vfmads_vvvvvl
+#define _vel_vfmads_vsvvl __builtin_ve_vl_vfmads_vsvvl
+#define _vel_vfmads_vsvvvl __builtin_ve_vl_vfmads_vsvvvl
+#define _vel_vfmads_vvsvl __builtin_ve_vl_vfmads_vvsvl
+#define _vel_vfmads_vvsvvl __builtin_ve_vl_vfmads_vvsvvl
+#define _vel_vfmads_vvvvmvl __builtin_ve_vl_vfmads_vvvvmvl
+#define _vel_vfmads_vsvvmvl __builtin_ve_vl_vfmads_vsvvmvl
+#define _vel_vfmads_vvsvmvl __builtin_ve_vl_vfmads_vvsvmvl
+#define _vel_pvfmad_vvvvl __builtin_ve_vl_pvfmad_vvvvl
+#define _vel_pvfmad_vvvvvl __builtin_ve_vl_pvfmad_vvvvvl
+#define _vel_pvfmad_vsvvl __builtin_ve_vl_pvfmad_vsvvl
+#define _vel_pvfmad_vsvvvl __builtin_ve_vl_pvfmad_vsvvvl
+#define _vel_pvfmad_vvsvl __builtin_ve_vl_pvfmad_vvsvl
+#define _vel_pvfmad_vvsvvl __builtin_ve_vl_pvfmad_vvsvvl
+#define _vel_pvfmad_vvvvMvl __builtin_ve_vl_pvfmad_vvvvMvl
+#define _vel_pvfmad_vsvvMvl __builtin_ve_vl_pvfmad_vsvvMvl
+#define _vel_pvfmad_vvsvMvl __builtin_ve_vl_pvfmad_vvsvMvl
+#define _vel_vfmsbd_vvvvl __builtin_ve_vl_vfmsbd_vvvvl
+#define _vel_vfmsbd_vvvvvl __builtin_ve_vl_vfmsbd_vvvvvl
+#define _vel_vfmsbd_vsvvl __builtin_ve_vl_vfmsbd_vsvvl
+#define _vel_vfmsbd_vsvvvl __builtin_ve_vl_vfmsbd_vsvvvl
+#define _vel_vfmsbd_vvsvl __builtin_ve_vl_vfmsbd_vvsvl
+#define _vel_vfmsbd_vvsvvl __builtin_ve_vl_vfmsbd_vvsvvl
+#define _vel_vfmsbd_vvvvmvl __builtin_ve_vl_vfmsbd_vvvvmvl
+#define _vel_vfmsbd_vsvvmvl __builtin_ve_vl_vfmsbd_vsvvmvl
+#define _vel_vfmsbd_vvsvmvl __builtin_ve_vl_vfmsbd_vvsvmvl
+#define _vel_vfmsbs_vvvvl __builtin_ve_vl_vfmsbs_vvvvl
+#define _vel_vfmsbs_vvvvvl __builtin_ve_vl_vfmsbs_vvvvvl
+#define _vel_vfmsbs_vsvvl __builtin_ve_vl_vfmsbs_vsvvl
+#define _vel_vfmsbs_vsvvvl __builtin_ve_vl_vfmsbs_vsvvvl
+#define _vel_vfmsbs_vvsvl __builtin_ve_vl_vfmsbs_vvsvl
+#define _vel_vfmsbs_vvsvvl __builtin_ve_vl_vfmsbs_vvsvvl
+#define _vel_vfmsbs_vvvvmvl __builtin_ve_vl_vfmsbs_vvvvmvl
+#define _vel_vfmsbs_vsvvmvl __builtin_ve_vl_vfmsbs_vsvvmvl
+#define _vel_vfmsbs_vvsvmvl __builtin_ve_vl_vfmsbs_vvsvmvl
+#define _vel_pvfmsb_vvvvl __builtin_ve_vl_pvfmsb_vvvvl
+#define _vel_pvfmsb_vvvvvl __builtin_ve_vl_pvfmsb_vvvvvl
+#define _vel_pvfmsb_vsvvl __builtin_ve_vl_pvfmsb_vsvvl
+#define _vel_pvfmsb_vsvvvl __builtin_ve_vl_pvfmsb_vsvvvl
+#define _vel_pvfmsb_vvsvl __builtin_ve_vl_pvfmsb_vvsvl
+#define _vel_pvfmsb_vvsvvl __builtin_ve_vl_pvfmsb_vvsvvl
+#define _vel_pvfmsb_vvvvMvl __builtin_ve_vl_pvfmsb_vvvvMvl
+#define _vel_pvfmsb_vsvvMvl __builtin_ve_vl_pvfmsb_vsvvMvl
+#define _vel_pvfmsb_vvsvMvl __builtin_ve_vl_pvfmsb_vvsvMvl
+#define _vel_vfnmadd_vvvvl __builtin_ve_vl_vfnmadd_vvvvl
+#define _vel_vfnmadd_vvvvvl __builtin_ve_vl_vfnmadd_vvvvvl
+#define _vel_vfnmadd_vsvvl __builtin_ve_vl_vfnmadd_vsvvl
+#define _vel_vfnmadd_vsvvvl __builtin_ve_vl_vfnmadd_vsvvvl
+#define _vel_vfnmadd_vvsvl __builtin_ve_vl_vfnmadd_vvsvl
+#define _vel_vfnmadd_vvsvvl __builtin_ve_vl_vfnmadd_vvsvvl
+#define _vel_vfnmadd_vvvvmvl __builtin_ve_vl_vfnmadd_vvvvmvl
+#define _vel_vfnmadd_vsvvmvl __builtin_ve_vl_vfnmadd_vsvvmvl
+#define _vel_vfnmadd_vvsvmvl __builtin_ve_vl_vfnmadd_vvsvmvl
+#define _vel_vfnmads_vvvvl __builtin_ve_vl_vfnmads_vvvvl
+#define _vel_vfnmads_vvvvvl __builtin_ve_vl_vfnmads_vvvvvl
+#define _vel_vfnmads_vsvvl __builtin_ve_vl_vfnmads_vsvvl
+#define _vel_vfnmads_vsvvvl __builtin_ve_vl_vfnmads_vsvvvl
+#define _vel_vfnmads_vvsvl __builtin_ve_vl_vfnmads_vvsvl
+#define _vel_vfnmads_vvsvvl __builtin_ve_vl_vfnmads_vvsvvl
+#define _vel_vfnmads_vvvvmvl __builtin_ve_vl_vfnmads_vvvvmvl
+#define _vel_vfnmads_vsvvmvl __builtin_ve_vl_vfnmads_vsvvmvl
+#define _vel_vfnmads_vvsvmvl __builtin_ve_vl_vfnmads_vvsvmvl
+#define _vel_pvfnmad_vvvvl __builtin_ve_vl_pvfnmad_vvvvl
+#define _vel_pvfnmad_vvvvvl __builtin_ve_vl_pvfnmad_vvvvvl
+#define _vel_pvfnmad_vsvvl __builtin_ve_vl_pvfnmad_vsvvl
+#define _vel_pvfnmad_vsvvvl __builtin_ve_vl_pvfnmad_vsvvvl
+#define _vel_pvfnmad_vvsvl __builtin_ve_vl_pvfnmad_vvsvl
+#define _vel_pvfnmad_vvsvvl __builtin_ve_vl_pvfnmad_vvsvvl
+#define _vel_pvfnmad_vvvvMvl __builtin_ve_vl_pvfnmad_vvvvMvl
+#define _vel_pvfnmad_vsvvMvl __builtin_ve_vl_pvfnmad_vsvvMvl
+#define _vel_pvfnmad_vvsvMvl __builtin_ve_vl_pvfnmad_vvsvMvl
+#define _vel_vfnmsbd_vvvvl __builtin_ve_vl_vfnmsbd_vvvvl
+#define _vel_vfnmsbd_vvvvvl __builtin_ve_vl_vfnmsbd_vvvvvl
+#define _vel_vfnmsbd_vsvvl __builtin_ve_vl_vfnmsbd_vsvvl
+#define _vel_vfnmsbd_vsvvvl __builtin_ve_vl_vfnmsbd_vsvvvl
+#define _vel_vfnmsbd_vvsvl __builtin_ve_vl_vfnmsbd_vvsvl
+#define _vel_vfnmsbd_vvsvvl __builtin_ve_vl_vfnmsbd_vvsvvl
+#define _vel_vfnmsbd_vvvvmvl __builtin_ve_vl_vfnmsbd_vvvvmvl
+#define _vel_vfnmsbd_vsvvmvl __builtin_ve_vl_vfnmsbd_vsvvmvl
+#define _vel_vfnmsbd_vvsvmvl __builtin_ve_vl_vfnmsbd_vvsvmvl
+#define _vel_vfnmsbs_vvvvl __builtin_ve_vl_vfnmsbs_vvvvl
+#define _vel_vfnmsbs_vvvvvl __builtin_ve_vl_vfnmsbs_vvvvvl
+#define _vel_vfnmsbs_vsvvl __builtin_ve_vl_vfnmsbs_vsvvl
+#define _vel_vfnmsbs_vsvvvl __builtin_ve_vl_vfnmsbs_vsvvvl
+#define _vel_vfnmsbs_vvsvl __builtin_ve_vl_vfnmsbs_vvsvl
+#define _vel_vfnmsbs_vvsvvl __builtin_ve_vl_vfnmsbs_vvsvvl
+#define _vel_vfnmsbs_vvvvmvl __builtin_ve_vl_vfnmsbs_vvvvmvl
+#define _vel_vfnmsbs_vsvvmvl __builtin_ve_vl_vfnmsbs_vsvvmvl
+#define _vel_vfnmsbs_vvsvmvl __builtin_ve_vl_vfnmsbs_vvsvmvl
+#define _vel_pvfnmsb_vvvvl __builtin_ve_vl_pvfnmsb_vvvvl
+#define _vel_pvfnmsb_vvvvvl __builtin_ve_vl_pvfnmsb_vvvvvl
+#define _vel_pvfnmsb_vsvvl __builtin_ve_vl_pvfnmsb_vsvvl
+#define _vel_pvfnmsb_vsvvvl __builtin_ve_vl_pvfnmsb_vsvvvl
+#define _vel_pvfnmsb_vvsvl __builtin_ve_vl_pvfnmsb_vvsvl
+#define _vel_pvfnmsb_vvsvvl __builtin_ve_vl_pvfnmsb_vvsvvl
+#define _vel_pvfnmsb_vvvvMvl __builtin_ve_vl_pvfnmsb_vvvvMvl
+#define _vel_pvfnmsb_vsvvMvl __builtin_ve_vl_pvfnmsb_vsvvMvl
+#define _vel_pvfnmsb_vvsvMvl __builtin_ve_vl_pvfnmsb_vvsvMvl
+#define _vel_vrcpd_vvl __builtin_ve_vl_vrcpd_vvl
+#define _vel_vrcpd_vvvl __builtin_ve_vl_vrcpd_vvvl
+#define _vel_vrcps_vvl __builtin_ve_vl_vrcps_vvl
+#define _vel_vrcps_vvvl __builtin_ve_vl_vrcps_vvvl
+#define _vel_pvrcp_vvl __builtin_ve_vl_pvrcp_vvl
+#define _vel_pvrcp_vvvl __builtin_ve_vl_pvrcp_vvvl
+#define _vel_vrsqrtd_vvl __builtin_ve_vl_vrsqrtd_vvl
+#define _vel_vrsqrtd_vvvl __builtin_ve_vl_vrsqrtd_vvvl
+#define _vel_vrsqrts_vvl __builtin_ve_vl_vrsqrts_vvl
+#define _vel_vrsqrts_vvvl __builtin_ve_vl_vrsqrts_vvvl
+#define _vel_pvrsqrt_vvl __builtin_ve_vl_pvrsqrt_vvl
+#define _vel_pvrsqrt_vvvl __builtin_ve_vl_pvrsqrt_vvvl
+#define _vel_vrsqrtdnex_vvl __builtin_ve_vl_vrsqrtdnex_vvl
+#define _vel_vrsqrtdnex_vvvl __builtin_ve_vl_vrsqrtdnex_vvvl
+#define _vel_vrsqrtsnex_vvl __builtin_ve_vl_vrsqrtsnex_vvl
+#define _vel_vrsqrtsnex_vvvl __builtin_ve_vl_vrsqrtsnex_vvvl
+#define _vel_pvrsqrtnex_vvl __builtin_ve_vl_pvrsqrtnex_vvl
+#define _vel_pvrsqrtnex_vvvl __builtin_ve_vl_pvrsqrtnex_vvvl
+#define _vel_vcvtwdsx_vvl __builtin_ve_vl_vcvtwdsx_vvl
+#define _vel_vcvtwdsx_vvvl __builtin_ve_vl_vcvtwdsx_vvvl
+#define _vel_vcvtwdsx_vvmvl __builtin_ve_vl_vcvtwdsx_vvmvl
+#define _vel_vcvtwdsxrz_vvl __builtin_ve_vl_vcvtwdsxrz_vvl
+#define _vel_vcvtwdsxrz_vvvl __builtin_ve_vl_vcvtwdsxrz_vvvl
+#define _vel_vcvtwdsxrz_vvmvl __builtin_ve_vl_vcvtwdsxrz_vvmvl
+#define _vel_vcvtwdzx_vvl __builtin_ve_vl_vcvtwdzx_vvl
+#define _vel_vcvtwdzx_vvvl __builtin_ve_vl_vcvtwdzx_vvvl
+#define _vel_vcvtwdzx_vvmvl __builtin_ve_vl_vcvtwdzx_vvmvl
+#define _vel_vcvtwdzxrz_vvl __builtin_ve_vl_vcvtwdzxrz_vvl
+#define _vel_vcvtwdzxrz_vvvl __builtin_ve_vl_vcvtwdzxrz_vvvl
+#define _vel_vcvtwdzxrz_vvmvl __builtin_ve_vl_vcvtwdzxrz_vvmvl
+#define _vel_vcvtwssx_vvl __builtin_ve_vl_vcvtwssx_vvl
+#define _vel_vcvtwssx_vvvl __builtin_ve_vl_vcvtwssx_vvvl
+#define _vel_vcvtwssx_vvmvl __builtin_ve_vl_vcvtwssx_vvmvl
+#define _vel_vcvtwssxrz_vvl __builtin_ve_vl_vcvtwssxrz_vvl
+#define _vel_vcvtwssxrz_vvvl __builtin_ve_vl_vcvtwssxrz_vvvl
+#define _vel_vcvtwssxrz_vvmvl __builtin_ve_vl_vcvtwssxrz_vvmvl
+#define _vel_vcvtwszx_vvl __builtin_ve_vl_vcvtwszx_vvl
+#define _vel_vcvtwszx_vvvl __builtin_ve_vl_vcvtwszx_vvvl
+#define _vel_vcvtwszx_vvmvl __builtin_ve_vl_vcvtwszx_vvmvl
+#define _vel_vcvtwszxrz_vvl __builtin_ve_vl_vcvtwszxrz_vvl
+#define _vel_vcvtwszxrz_vvvl __builtin_ve_vl_vcvtwszxrz_vvvl
+#define _vel_vcvtwszxrz_vvmvl __builtin_ve_vl_vcvtwszxrz_vvmvl
+#define _vel_pvcvtws_vvl __builtin_ve_vl_pvcvtws_vvl
+#define _vel_pvcvtws_vvvl __builtin_ve_vl_pvcvtws_vvvl
+#define _vel_pvcvtws_vvMvl __builtin_ve_vl_pvcvtws_vvMvl
+#define _vel_pvcvtwsrz_vvl __builtin_ve_vl_pvcvtwsrz_vvl
+#define _vel_pvcvtwsrz_vvvl __builtin_ve_vl_pvcvtwsrz_vvvl
+#define _vel_pvcvtwsrz_vvMvl __builtin_ve_vl_pvcvtwsrz_vvMvl
+#define _vel_vcvtld_vvl __builtin_ve_vl_vcvtld_vvl
+#define _vel_vcvtld_vvvl __builtin_ve_vl_vcvtld_vvvl
+#define _vel_vcvtld_vvmvl __builtin_ve_vl_vcvtld_vvmvl
+#define _vel_vcvtldrz_vvl __builtin_ve_vl_vcvtldrz_vvl
+#define _vel_vcvtldrz_vvvl __builtin_ve_vl_vcvtldrz_vvvl
+#define _vel_vcvtldrz_vvmvl __builtin_ve_vl_vcvtldrz_vvmvl
+#define _vel_vcvtdw_vvl __builtin_ve_vl_vcvtdw_vvl
+#define _vel_vcvtdw_vvvl __builtin_ve_vl_vcvtdw_vvvl
+#define _vel_vcvtsw_vvl __builtin_ve_vl_vcvtsw_vvl
+#define _vel_vcvtsw_vvvl __builtin_ve_vl_vcvtsw_vvvl
+#define _vel_pvcvtsw_vvl __builtin_ve_vl_pvcvtsw_vvl
+#define _vel_pvcvtsw_vvvl __builtin_ve_vl_pvcvtsw_vvvl
+#define _vel_vcvtdl_vvl __builtin_ve_vl_vcvtdl_vvl
+#define _vel_vcvtdl_vvvl __builtin_ve_vl_vcvtdl_vvvl
+#define _vel_vcvtds_vvl __builtin_ve_vl_vcvtds_vvl
+#define _vel_vcvtds_vvvl __builtin_ve_vl_vcvtds_vvvl
+#define _vel_vcvtsd_vvl __builtin_ve_vl_vcvtsd_vvl
+#define _vel_vcvtsd_vvvl __builtin_ve_vl_vcvtsd_vvvl
+#define _vel_vmrg_vvvml __builtin_ve_vl_vmrg_vvvml
+#define _vel_vmrg_vvvmvl __builtin_ve_vl_vmrg_vvvmvl
+#define _vel_vmrg_vsvml __builtin_ve_vl_vmrg_vsvml
+#define _vel_vmrg_vsvmvl __builtin_ve_vl_vmrg_vsvmvl
+#define _vel_vmrgw_vvvMl __builtin_ve_vl_vmrgw_vvvMl
+#define _vel_vmrgw_vvvMvl __builtin_ve_vl_vmrgw_vvvMvl
+#define _vel_vmrgw_vsvMl __builtin_ve_vl_vmrgw_vsvMl
+#define _vel_vmrgw_vsvMvl __builtin_ve_vl_vmrgw_vsvMvl
+#define _vel_vshf_vvvsl __builtin_ve_vl_vshf_vvvsl
+#define _vel_vshf_vvvsvl __builtin_ve_vl_vshf_vvvsvl
+#define _vel_vcp_vvmvl __builtin_ve_vl_vcp_vvmvl
+#define _vel_vex_vvmvl __builtin_ve_vl_vex_vvmvl
+#define _vel_vfmklat_ml __builtin_ve_vl_vfmklat_ml
+#define _vel_vfmklaf_ml __builtin_ve_vl_vfmklaf_ml
+#define _vel_pvfmkat_Ml __builtin_ve_vl_pvfmkat_Ml
+#define _vel_pvfmkaf_Ml __builtin_ve_vl_pvfmkaf_Ml
+#define _vel_vfmklgt_mvl __builtin_ve_vl_vfmklgt_mvl
+#define _vel_vfmklgt_mvml __builtin_ve_vl_vfmklgt_mvml
+#define _vel_vfmkllt_mvl __builtin_ve_vl_vfmkllt_mvl
+#define _vel_vfmkllt_mvml __builtin_ve_vl_vfmkllt_mvml
+#define _vel_vfmklne_mvl __builtin_ve_vl_vfmklne_mvl
+#define _vel_vfmklne_mvml __builtin_ve_vl_vfmklne_mvml
+#define _vel_vfmkleq_mvl __builtin_ve_vl_vfmkleq_mvl
+#define _vel_vfmkleq_mvml __builtin_ve_vl_vfmkleq_mvml
+#define _vel_vfmklge_mvl __builtin_ve_vl_vfmklge_mvl
+#define _vel_vfmklge_mvml __builtin_ve_vl_vfmklge_mvml
+#define _vel_vfmklle_mvl __builtin_ve_vl_vfmklle_mvl
+#define _vel_vfmklle_mvml __builtin_ve_vl_vfmklle_mvml
+#define _vel_vfmklnum_mvl __builtin_ve_vl_vfmklnum_mvl
+#define _vel_vfmklnum_mvml __builtin_ve_vl_vfmklnum_mvml
+#define _vel_vfmklnan_mvl __builtin_ve_vl_vfmklnan_mvl
+#define _vel_vfmklnan_mvml __builtin_ve_vl_vfmklnan_mvml
+#define _vel_vfmklgtnan_mvl __builtin_ve_vl_vfmklgtnan_mvl
+#define _vel_vfmklgtnan_mvml __builtin_ve_vl_vfmklgtnan_mvml
+#define _vel_vfmklltnan_mvl __builtin_ve_vl_vfmklltnan_mvl
+#define _vel_vfmklltnan_mvml __builtin_ve_vl_vfmklltnan_mvml
+#define _vel_vfmklnenan_mvl __builtin_ve_vl_vfmklnenan_mvl
+#define _vel_vfmklnenan_mvml __builtin_ve_vl_vfmklnenan_mvml
+#define _vel_vfmkleqnan_mvl __builtin_ve_vl_vfmkleqnan_mvl
+#define _vel_vfmkleqnan_mvml __builtin_ve_vl_vfmkleqnan_mvml
+#define _vel_vfmklgenan_mvl __builtin_ve_vl_vfmklgenan_mvl
+#define _vel_vfmklgenan_mvml __builtin_ve_vl_vfmklgenan_mvml
+#define _vel_vfmkllenan_mvl __builtin_ve_vl_vfmkllenan_mvl
+#define _vel_vfmkllenan_mvml __builtin_ve_vl_vfmkllenan_mvml
+#define _vel_vfmkwgt_mvl __builtin_ve_vl_vfmkwgt_mvl
+#define _vel_vfmkwgt_mvml __builtin_ve_vl_vfmkwgt_mvml
+#define _vel_vfmkwlt_mvl __builtin_ve_vl_vfmkwlt_mvl
+#define _vel_vfmkwlt_mvml __builtin_ve_vl_vfmkwlt_mvml
+#define _vel_vfmkwne_mvl __builtin_ve_vl_vfmkwne_mvl
+#define _vel_vfmkwne_mvml __builtin_ve_vl_vfmkwne_mvml
+#define _vel_vfmkweq_mvl __builtin_ve_vl_vfmkweq_mvl
+#define _vel_vfmkweq_mvml __builtin_ve_vl_vfmkweq_mvml
+#define _vel_vfmkwge_mvl __builtin_ve_vl_vfmkwge_mvl
+#define _vel_vfmkwge_mvml __builtin_ve_vl_vfmkwge_mvml
+#define _vel_vfmkwle_mvl __builtin_ve_vl_vfmkwle_mvl
+#define _vel_vfmkwle_mvml __builtin_ve_vl_vfmkwle_mvml
+#define _vel_vfmkwnum_mvl __builtin_ve_vl_vfmkwnum_mvl
+#define _vel_vfmkwnum_mvml __builtin_ve_vl_vfmkwnum_mvml
+#define _vel_vfmkwnan_mvl __builtin_ve_vl_vfmkwnan_mvl
+#define _vel_vfmkwnan_mvml __builtin_ve_vl_vfmkwnan_mvml
+#define _vel_vfmkwgtnan_mvl __builtin_ve_vl_vfmkwgtnan_mvl
+#define _vel_vfmkwgtnan_mvml __builtin_ve_vl_vfmkwgtnan_mvml
+#define _vel_vfmkwltnan_mvl __builtin_ve_vl_vfmkwltnan_mvl
+#define _vel_vfmkwltnan_mvml __builtin_ve_vl_vfmkwltnan_mvml
+#define _vel_vfmkwnenan_mvl __builtin_ve_vl_vfmkwnenan_mvl
+#define _vel_vfmkwnenan_mvml __builtin_ve_vl_vfmkwnenan_mvml
+#define _vel_vfmkweqnan_mvl __builtin_ve_vl_vfmkweqnan_mvl
+#define _vel_vfmkweqnan_mvml __builtin_ve_vl_vfmkweqnan_mvml
+#define _vel_vfmkwgenan_mvl __builtin_ve_vl_vfmkwgenan_mvl
+#define _vel_vfmkwgenan_mvml __builtin_ve_vl_vfmkwgenan_mvml
+#define _vel_vfmkwlenan_mvl __builtin_ve_vl_vfmkwlenan_mvl
+#define _vel_vfmkwlenan_mvml __builtin_ve_vl_vfmkwlenan_mvml
+#define _vel_pvfmkwlogt_mvl __builtin_ve_vl_pvfmkwlogt_mvl
+#define _vel_pvfmkwupgt_mvl __builtin_ve_vl_pvfmkwupgt_mvl
+#define _vel_pvfmkwlogt_mvml __builtin_ve_vl_pvfmkwlogt_mvml
+#define _vel_pvfmkwupgt_mvml __builtin_ve_vl_pvfmkwupgt_mvml
+#define _vel_pvfmkwlolt_mvl __builtin_ve_vl_pvfmkwlolt_mvl
+#define _vel_pvfmkwuplt_mvl __builtin_ve_vl_pvfmkwuplt_mvl
+#define _vel_pvfmkwlolt_mvml __builtin_ve_vl_pvfmkwlolt_mvml
+#define _vel_pvfmkwuplt_mvml __builtin_ve_vl_pvfmkwuplt_mvml
+#define _vel_pvfmkwlone_mvl __builtin_ve_vl_pvfmkwlone_mvl
+#define _vel_pvfmkwupne_mvl __builtin_ve_vl_pvfmkwupne_mvl
+#define _vel_pvfmkwlone_mvml __builtin_ve_vl_pvfmkwlone_mvml
+#define _vel_pvfmkwupne_mvml __builtin_ve_vl_pvfmkwupne_mvml
+#define _vel_pvfmkwloeq_mvl __builtin_ve_vl_pvfmkwloeq_mvl
+#define _vel_pvfmkwupeq_mvl __builtin_ve_vl_pvfmkwupeq_mvl
+#define _vel_pvfmkwloeq_mvml __builtin_ve_vl_pvfmkwloeq_mvml
+#define _vel_pvfmkwupeq_mvml __builtin_ve_vl_pvfmkwupeq_mvml
+#define _vel_pvfmkwloge_mvl __builtin_ve_vl_pvfmkwloge_mvl
+#define _vel_pvfmkwupge_mvl __builtin_ve_vl_pvfmkwupge_mvl
+#define _vel_pvfmkwloge_mvml __builtin_ve_vl_pvfmkwloge_mvml
+#define _vel_pvfmkwupge_mvml __builtin_ve_vl_pvfmkwupge_mvml
+#define _vel_pvfmkwlole_mvl __builtin_ve_vl_pvfmkwlole_mvl
+#define _vel_pvfmkwuple_mvl __builtin_ve_vl_pvfmkwuple_mvl
+#define _vel_pvfmkwlole_mvml __builtin_ve_vl_pvfmkwlole_mvml
+#define _vel_pvfmkwuple_mvml __builtin_ve_vl_pvfmkwuple_mvml
+#define _vel_pvfmkwlonum_mvl __builtin_ve_vl_pvfmkwlonum_mvl
+#define _vel_pvfmkwupnum_mvl __builtin_ve_vl_pvfmkwupnum_mvl
+#define _vel_pvfmkwlonum_mvml __builtin_ve_vl_pvfmkwlonum_mvml
+#define _vel_pvfmkwupnum_mvml __builtin_ve_vl_pvfmkwupnum_mvml
+#define _vel_pvfmkwlonan_mvl __builtin_ve_vl_pvfmkwlonan_mvl
+#define _vel_pvfmkwupnan_mvl __builtin_ve_vl_pvfmkwupnan_mvl
+#define _vel_pvfmkwlonan_mvml __builtin_ve_vl_pvfmkwlonan_mvml
+#define _vel_pvfmkwupnan_mvml __builtin_ve_vl_pvfmkwupnan_mvml
+#define _vel_pvfmkwlogtnan_mvl __builtin_ve_vl_pvfmkwlogtnan_mvl
+#define _vel_pvfmkwupgtnan_mvl __builtin_ve_vl_pvfmkwupgtnan_mvl
+#define _vel_pvfmkwlogtnan_mvml __builtin_ve_vl_pvfmkwlogtnan_mvml
+#define _vel_pvfmkwupgtnan_mvml __builtin_ve_vl_pvfmkwupgtnan_mvml
+#define _vel_pvfmkwloltnan_mvl __builtin_ve_vl_pvfmkwloltnan_mvl
+#define _vel_pvfmkwupltnan_mvl __builtin_ve_vl_pvfmkwupltnan_mvl
+#define _vel_pvfmkwloltnan_mvml __builtin_ve_vl_pvfmkwloltnan_mvml
+#define _vel_pvfmkwupltnan_mvml __builtin_ve_vl_pvfmkwupltnan_mvml
+#define _vel_pvfmkwlonenan_mvl __builtin_ve_vl_pvfmkwlonenan_mvl
+#define _vel_pvfmkwupnenan_mvl __builtin_ve_vl_pvfmkwupnenan_mvl
+#define _vel_pvfmkwlonenan_mvml __builtin_ve_vl_pvfmkwlonenan_mvml
+#define _vel_pvfmkwupnenan_mvml __builtin_ve_vl_pvfmkwupnenan_mvml
+#define _vel_pvfmkwloeqnan_mvl __builtin_ve_vl_pvfmkwloeqnan_mvl
+#define _vel_pvfmkwupeqnan_mvl __builtin_ve_vl_pvfmkwupeqnan_mvl
+#define _vel_pvfmkwloeqnan_mvml __builtin_ve_vl_pvfmkwloeqnan_mvml
+#define _vel_pvfmkwupeqnan_mvml __builtin_ve_vl_pvfmkwupeqnan_mvml
+#define _vel_pvfmkwlogenan_mvl __builtin_ve_vl_pvfmkwlogenan_mvl
+#define _vel_pvfmkwupgenan_mvl __builtin_ve_vl_pvfmkwupgenan_mvl
+#define _vel_pvfmkwlogenan_mvml __builtin_ve_vl_pvfmkwlogenan_mvml
+#define _vel_pvfmkwupgenan_mvml __builtin_ve_vl_pvfmkwupgenan_mvml
+#define _vel_pvfmkwlolenan_mvl __builtin_ve_vl_pvfmkwlolenan_mvl
+#define _vel_pvfmkwuplenan_mvl __builtin_ve_vl_pvfmkwuplenan_mvl
+#define _vel_pvfmkwlolenan_mvml __builtin_ve_vl_pvfmkwlolenan_mvml
+#define _vel_pvfmkwuplenan_mvml __builtin_ve_vl_pvfmkwuplenan_mvml
+#define _vel_pvfmkwgt_Mvl __builtin_ve_vl_pvfmkwgt_Mvl
+#define _vel_pvfmkwgt_MvMl __builtin_ve_vl_pvfmkwgt_MvMl
+#define _vel_pvfmkwlt_Mvl __builtin_ve_vl_pvfmkwlt_Mvl
+#define _vel_pvfmkwlt_MvMl __builtin_ve_vl_pvfmkwlt_MvMl
+#define _vel_pvfmkwne_Mvl __builtin_ve_vl_pvfmkwne_Mvl
+#define _vel_pvfmkwne_MvMl __builtin_ve_vl_pvfmkwne_MvMl
+#define _vel_pvfmkweq_Mvl __builtin_ve_vl_pvfmkweq_Mvl
+#define _vel_pvfmkweq_MvMl __builtin_ve_vl_pvfmkweq_MvMl
+#define _vel_pvfmkwge_Mvl __builtin_ve_vl_pvfmkwge_Mvl
+#define _vel_pvfmkwge_MvMl __builtin_ve_vl_pvfmkwge_MvMl
+#define _vel_pvfmkwle_Mvl __builtin_ve_vl_pvfmkwle_Mvl
+#define _vel_pvfmkwle_MvMl __builtin_ve_vl_pvfmkwle_MvMl
+#define _vel_pvfmkwnum_Mvl __builtin_ve_vl_pvfmkwnum_Mvl
+#define _vel_pvfmkwnum_MvMl __builtin_ve_vl_pvfmkwnum_MvMl
+#define _vel_pvfmkwnan_Mvl __builtin_ve_vl_pvfmkwnan_Mvl
+#define _vel_pvfmkwnan_MvMl __builtin_ve_vl_pvfmkwnan_MvMl
+#define _vel_pvfmkwgtnan_Mvl __builtin_ve_vl_pvfmkwgtnan_Mvl
+#define _vel_pvfmkwgtnan_MvMl __builtin_ve_vl_pvfmkwgtnan_MvMl
+#define _vel_pvfmkwltnan_Mvl __builtin_ve_vl_pvfmkwltnan_Mvl
+#define _vel_pvfmkwltnan_MvMl __builtin_ve_vl_pvfmkwltnan_MvMl
+#define _vel_pvfmkwnenan_Mvl __builtin_ve_vl_pvfmkwnenan_Mvl
+#define _vel_pvfmkwnenan_MvMl __builtin_ve_vl_pvfmkwnenan_MvMl
+#define _vel_pvfmkweqnan_Mvl __builtin_ve_vl_pvfmkweqnan_Mvl
+#define _vel_pvfmkweqnan_MvMl __builtin_ve_vl_pvfmkweqnan_MvMl
+#define _vel_pvfmkwgenan_Mvl __builtin_ve_vl_pvfmkwgenan_Mvl
+#define _vel_pvfmkwgenan_MvMl __builtin_ve_vl_pvfmkwgenan_MvMl
+#define _vel_pvfmkwlenan_Mvl __builtin_ve_vl_pvfmkwlenan_Mvl
+#define _vel_pvfmkwlenan_MvMl __builtin_ve_vl_pvfmkwlenan_MvMl
+#define _vel_vfmkdgt_mvl __builtin_ve_vl_vfmkdgt_mvl
+#define _vel_vfmkdgt_mvml __builtin_ve_vl_vfmkdgt_mvml
+#define _vel_vfmkdlt_mvl __builtin_ve_vl_vfmkdlt_mvl
+#define _vel_vfmkdlt_mvml __builtin_ve_vl_vfmkdlt_mvml
+#define _vel_vfmkdne_mvl __builtin_ve_vl_vfmkdne_mvl
+#define _vel_vfmkdne_mvml __builtin_ve_vl_vfmkdne_mvml
+#define _vel_vfmkdeq_mvl __builtin_ve_vl_vfmkdeq_mvl
+#define _vel_vfmkdeq_mvml __builtin_ve_vl_vfmkdeq_mvml
+#define _vel_vfmkdge_mvl __builtin_ve_vl_vfmkdge_mvl
+#define _vel_vfmkdge_mvml __builtin_ve_vl_vfmkdge_mvml
+#define _vel_vfmkdle_mvl __builtin_ve_vl_vfmkdle_mvl
+#define _vel_vfmkdle_mvml __builtin_ve_vl_vfmkdle_mvml
+#define _vel_vfmkdnum_mvl __builtin_ve_vl_vfmkdnum_mvl
+#define _vel_vfmkdnum_mvml __builtin_ve_vl_vfmkdnum_mvml
+#define _vel_vfmkdnan_mvl __builtin_ve_vl_vfmkdnan_mvl
+#define _vel_vfmkdnan_mvml __builtin_ve_vl_vfmkdnan_mvml
+#define _vel_vfmkdgtnan_mvl __builtin_ve_vl_vfmkdgtnan_mvl
+#define _vel_vfmkdgtnan_mvml __builtin_ve_vl_vfmkdgtnan_mvml
+#define _vel_vfmkdltnan_mvl __builtin_ve_vl_vfmkdltnan_mvl
+#define _vel_vfmkdltnan_mvml __builtin_ve_vl_vfmkdltnan_mvml
+#define _vel_vfmkdnenan_mvl __builtin_ve_vl_vfmkdnenan_mvl
+#define _vel_vfmkdnenan_mvml __builtin_ve_vl_vfmkdnenan_mvml
+#define _vel_vfmkdeqnan_mvl __builtin_ve_vl_vfmkdeqnan_mvl
+#define _vel_vfmkdeqnan_mvml __builtin_ve_vl_vfmkdeqnan_mvml
+#define _vel_vfmkdgenan_mvl __builtin_ve_vl_vfmkdgenan_mvl
+#define _vel_vfmkdgenan_mvml __builtin_ve_vl_vfmkdgenan_mvml
+#define _vel_vfmkdlenan_mvl __builtin_ve_vl_vfmkdlenan_mvl
+#define _vel_vfmkdlenan_mvml __builtin_ve_vl_vfmkdlenan_mvml
+#define _vel_vfmksgt_mvl __builtin_ve_vl_vfmksgt_mvl
+#define _vel_vfmksgt_mvml __builtin_ve_vl_vfmksgt_mvml
+#define _vel_vfmkslt_mvl __builtin_ve_vl_vfmkslt_mvl
+#define _vel_vfmkslt_mvml __builtin_ve_vl_vfmkslt_mvml
+#define _vel_vfmksne_mvl __builtin_ve_vl_vfmksne_mvl
+#define _vel_vfmksne_mvml __builtin_ve_vl_vfmksne_mvml
+#define _vel_vfmkseq_mvl __builtin_ve_vl_vfmkseq_mvl
+#define _vel_vfmkseq_mvml __builtin_ve_vl_vfmkseq_mvml
+#define _vel_vfmksge_mvl __builtin_ve_vl_vfmksge_mvl
+#define _vel_vfmksge_mvml __builtin_ve_vl_vfmksge_mvml
+#define _vel_vfmksle_mvl __builtin_ve_vl_vfmksle_mvl
+#define _vel_vfmksle_mvml __builtin_ve_vl_vfmksle_mvml
+#define _vel_vfmksnum_mvl __builtin_ve_vl_vfmksnum_mvl
+#define _vel_vfmksnum_mvml __builtin_ve_vl_vfmksnum_mvml
+#define _vel_vfmksnan_mvl __builtin_ve_vl_vfmksnan_mvl
+#define _vel_vfmksnan_mvml __builtin_ve_vl_vfmksnan_mvml
+#define _vel_vfmksgtnan_mvl __builtin_ve_vl_vfmksgtnan_mvl
+#define _vel_vfmksgtnan_mvml __builtin_ve_vl_vfmksgtnan_mvml
+#define _vel_vfmksltnan_mvl __builtin_ve_vl_vfmksltnan_mvl
+#define _vel_vfmksltnan_mvml __builtin_ve_vl_vfmksltnan_mvml
+#define _vel_vfmksnenan_mvl __builtin_ve_vl_vfmksnenan_mvl
+#define _vel_vfmksnenan_mvml __builtin_ve_vl_vfmksnenan_mvml
+#define _vel_vfmkseqnan_mvl __builtin_ve_vl_vfmkseqnan_mvl
+#define _vel_vfmkseqnan_mvml __builtin_ve_vl_vfmkseqnan_mvml
+#define _vel_vfmksgenan_mvl __builtin_ve_vl_vfmksgenan_mvl
+#define _vel_vfmksgenan_mvml __builtin_ve_vl_vfmksgenan_mvml
+#define _vel_vfmkslenan_mvl __builtin_ve_vl_vfmkslenan_mvl
+#define _vel_vfmkslenan_mvml __builtin_ve_vl_vfmkslenan_mvml
+#define _vel_pvfmkslogt_mvl __builtin_ve_vl_pvfmkslogt_mvl
+#define _vel_pvfmksupgt_mvl __builtin_ve_vl_pvfmksupgt_mvl
+#define _vel_pvfmkslogt_mvml __builtin_ve_vl_pvfmkslogt_mvml
+#define _vel_pvfmksupgt_mvml __builtin_ve_vl_pvfmksupgt_mvml
+#define _vel_pvfmkslolt_mvl __builtin_ve_vl_pvfmkslolt_mvl
+#define _vel_pvfmksuplt_mvl __builtin_ve_vl_pvfmksuplt_mvl
+#define _vel_pvfmkslolt_mvml __builtin_ve_vl_pvfmkslolt_mvml
+#define _vel_pvfmksuplt_mvml __builtin_ve_vl_pvfmksuplt_mvml
+#define _vel_pvfmkslone_mvl __builtin_ve_vl_pvfmkslone_mvl
+#define _vel_pvfmksupne_mvl __builtin_ve_vl_pvfmksupne_mvl
+#define _vel_pvfmkslone_mvml __builtin_ve_vl_pvfmkslone_mvml
+#define _vel_pvfmksupne_mvml __builtin_ve_vl_pvfmksupne_mvml
+#define _vel_pvfmksloeq_mvl __builtin_ve_vl_pvfmksloeq_mvl
+#define _vel_pvfmksupeq_mvl __builtin_ve_vl_pvfmksupeq_mvl
+#define _vel_pvfmksloeq_mvml __builtin_ve_vl_pvfmksloeq_mvml
+#define _vel_pvfmksupeq_mvml __builtin_ve_vl_pvfmksupeq_mvml
+#define _vel_pvfmksloge_mvl __builtin_ve_vl_pvfmksloge_mvl
+#define _vel_pvfmksupge_mvl __builtin_ve_vl_pvfmksupge_mvl
+#define _vel_pvfmksloge_mvml __builtin_ve_vl_pvfmksloge_mvml
+#define _vel_pvfmksupge_mvml __builtin_ve_vl_pvfmksupge_mvml
+#define _vel_pvfmkslole_mvl __builtin_ve_vl_pvfmkslole_mvl
+#define _vel_pvfmksuple_mvl __builtin_ve_vl_pvfmksuple_mvl
+#define _vel_pvfmkslole_mvml __builtin_ve_vl_pvfmkslole_mvml
+#define _vel_pvfmksuple_mvml __builtin_ve_vl_pvfmksuple_mvml
+#define _vel_pvfmkslonum_mvl __builtin_ve_vl_pvfmkslonum_mvl
+#define _vel_pvfmksupnum_mvl __builtin_ve_vl_pvfmksupnum_mvl
+#define _vel_pvfmkslonum_mvml __builtin_ve_vl_pvfmkslonum_mvml
+#define _vel_pvfmksupnum_mvml __builtin_ve_vl_pvfmksupnum_mvml
+#define _vel_pvfmkslonan_mvl __builtin_ve_vl_pvfmkslonan_mvl
+#define _vel_pvfmksupnan_mvl __builtin_ve_vl_pvfmksupnan_mvl
+#define _vel_pvfmkslonan_mvml __builtin_ve_vl_pvfmkslonan_mvml
+#define _vel_pvfmksupnan_mvml __builtin_ve_vl_pvfmksupnan_mvml
+#define _vel_pvfmkslogtnan_mvl __builtin_ve_vl_pvfmkslogtnan_mvl
+#define _vel_pvfmksupgtnan_mvl __builtin_ve_vl_pvfmksupgtnan_mvl
+#define _vel_pvfmkslogtnan_mvml __builtin_ve_vl_pvfmkslogtnan_mvml
+#define _vel_pvfmksupgtnan_mvml __builtin_ve_vl_pvfmksupgtnan_mvml
+#define _vel_pvfmksloltnan_mvl __builtin_ve_vl_pvfmksloltnan_mvl
+#define _vel_pvfmksupltnan_mvl __builtin_ve_vl_pvfmksupltnan_mvl
+#define _vel_pvfmksloltnan_mvml __builtin_ve_vl_pvfmksloltnan_mvml
+#define _vel_pvfmksupltnan_mvml __builtin_ve_vl_pvfmksupltnan_mvml
+#define _vel_pvfmkslonenan_mvl __builtin_ve_vl_pvfmkslonenan_mvl
+#define _vel_pvfmksupnenan_mvl __builtin_ve_vl_pvfmksupnenan_mvl
+#define _vel_pvfmkslonenan_mvml __builtin_ve_vl_pvfmkslonenan_mvml
+#define _vel_pvfmksupnenan_mvml __builtin_ve_vl_pvfmksupnenan_mvml
+#define _vel_pvfmksloeqnan_mvl __builtin_ve_vl_pvfmksloeqnan_mvl
+#define _vel_pvfmksupeqnan_mvl __builtin_ve_vl_pvfmksupeqnan_mvl
+#define _vel_pvfmksloeqnan_mvml __builtin_ve_vl_pvfmksloeqnan_mvml
+#define _vel_pvfmksupeqnan_mvml __builtin_ve_vl_pvfmksupeqnan_mvml
+#define _vel_pvfmkslogenan_mvl __builtin_ve_vl_pvfmkslogenan_mvl
+#define _vel_pvfmksupgenan_mvl __builtin_ve_vl_pvfmksupgenan_mvl
+#define _vel_pvfmkslogenan_mvml __builtin_ve_vl_pvfmkslogenan_mvml
+#define _vel_pvfmksupgenan_mvml __builtin_ve_vl_pvfmksupgenan_mvml
+#define _vel_pvfmkslolenan_mvl __builtin_ve_vl_pvfmkslolenan_mvl
+#define _vel_pvfmksuplenan_mvl __builtin_ve_vl_pvfmksuplenan_mvl
+#define _vel_pvfmkslolenan_mvml __builtin_ve_vl_pvfmkslolenan_mvml
+#define _vel_pvfmksuplenan_mvml __builtin_ve_vl_pvfmksuplenan_mvml
+#define _vel_pvfmksgt_Mvl __builtin_ve_vl_pvfmksgt_Mvl
+#define _vel_pvfmksgt_MvMl __builtin_ve_vl_pvfmksgt_MvMl
+#define _vel_pvfmkslt_Mvl __builtin_ve_vl_pvfmkslt_Mvl
+#define _vel_pvfmkslt_MvMl __builtin_ve_vl_pvfmkslt_MvMl
+#define _vel_pvfmksne_Mvl __builtin_ve_vl_pvfmksne_Mvl
+#define _vel_pvfmksne_MvMl __builtin_ve_vl_pvfmksne_MvMl
+#define _vel_pvfmkseq_Mvl __builtin_ve_vl_pvfmkseq_Mvl
+#define _vel_pvfmkseq_MvMl __builtin_ve_vl_pvfmkseq_MvMl
+#define _vel_pvfmksge_Mvl __builtin_ve_vl_pvfmksge_Mvl
+#define _vel_pvfmksge_MvMl __builtin_ve_vl_pvfmksge_MvMl
+#define _vel_pvfmksle_Mvl __builtin_ve_vl_pvfmksle_Mvl
+#define _vel_pvfmksle_MvMl __builtin_ve_vl_pvfmksle_MvMl
+#define _vel_pvfmksnum_Mvl __builtin_ve_vl_pvfmksnum_Mvl
+#define _vel_pvfmksnum_MvMl __builtin_ve_vl_pvfmksnum_MvMl
+#define _vel_pvfmksnan_Mvl __builtin_ve_vl_pvfmksnan_Mvl
+#define _vel_pvfmksnan_MvMl __builtin_ve_vl_pvfmksnan_MvMl
+#define _vel_pvfmksgtnan_Mvl __builtin_ve_vl_pvfmksgtnan_Mvl
+#define _vel_pvfmksgtnan_MvMl __builtin_ve_vl_pvfmksgtnan_MvMl
+#define _vel_pvfmksltnan_Mvl __builtin_ve_vl_pvfmksltnan_Mvl
+#define _vel_pvfmksltnan_MvMl __builtin_ve_vl_pvfmksltnan_MvMl
+#define _vel_pvfmksnenan_Mvl __builtin_ve_vl_pvfmksnenan_Mvl
+#define _vel_pvfmksnenan_MvMl __builtin_ve_vl_pvfmksnenan_MvMl
+#define _vel_pvfmkseqnan_Mvl __builtin_ve_vl_pvfmkseqnan_Mvl
+#define _vel_pvfmkseqnan_MvMl __builtin_ve_vl_pvfmkseqnan_MvMl
+#define _vel_pvfmksgenan_Mvl __builtin_ve_vl_pvfmksgenan_Mvl
+#define _vel_pvfmksgenan_MvMl __builtin_ve_vl_pvfmksgenan_MvMl
+#define _vel_pvfmkslenan_Mvl __builtin_ve_vl_pvfmkslenan_Mvl
+#define _vel_pvfmkslenan_MvMl __builtin_ve_vl_pvfmkslenan_MvMl
+#define _vel_vsumwsx_vvl __builtin_ve_vl_vsumwsx_vvl
+#define _vel_vsumwsx_vvml __builtin_ve_vl_vsumwsx_vvml
+#define _vel_vsumwzx_vvl __builtin_ve_vl_vsumwzx_vvl
+#define _vel_vsumwzx_vvml __builtin_ve_vl_vsumwzx_vvml
+#define _vel_vsuml_vvl __builtin_ve_vl_vsuml_vvl
+#define _vel_vsuml_vvml __builtin_ve_vl_vsuml_vvml
+#define _vel_vfsumd_vvl __builtin_ve_vl_vfsumd_vvl
+#define _vel_vfsumd_vvml __builtin_ve_vl_vfsumd_vvml
+#define _vel_vfsums_vvl __builtin_ve_vl_vfsums_vvl
+#define _vel_vfsums_vvml __builtin_ve_vl_vfsums_vvml
+#define _vel_vrmaxswfstsx_vvl __builtin_ve_vl_vrmaxswfstsx_vvl
+#define _vel_vrmaxswfstsx_vvvl __builtin_ve_vl_vrmaxswfstsx_vvvl
+#define _vel_vrmaxswlstsx_vvl __builtin_ve_vl_vrmaxswlstsx_vvl
+#define _vel_vrmaxswlstsx_vvvl __builtin_ve_vl_vrmaxswlstsx_vvvl
+#define _vel_vrmaxswfstzx_vvl __builtin_ve_vl_vrmaxswfstzx_vvl
+#define _vel_vrmaxswfstzx_vvvl __builtin_ve_vl_vrmaxswfstzx_vvvl
+#define _vel_vrmaxswlstzx_vvl __builtin_ve_vl_vrmaxswlstzx_vvl
+#define _vel_vrmaxswlstzx_vvvl __builtin_ve_vl_vrmaxswlstzx_vvvl
+#define _vel_vrminswfstsx_vvl __builtin_ve_vl_vrminswfstsx_vvl
+#define _vel_vrminswfstsx_vvvl __builtin_ve_vl_vrminswfstsx_vvvl
+#define _vel_vrminswlstsx_vvl __builtin_ve_vl_vrminswlstsx_vvl
+#define _vel_vrminswlstsx_vvvl __builtin_ve_vl_vrminswlstsx_vvvl
+#define _vel_vrminswfstzx_vvl __builtin_ve_vl_vrminswfstzx_vvl
+#define _vel_vrminswfstzx_vvvl __builtin_ve_vl_vrminswfstzx_vvvl
+#define _vel_vrminswlstzx_vvl __builtin_ve_vl_vrminswlstzx_vvl
+#define _vel_vrminswlstzx_vvvl __builtin_ve_vl_vrminswlstzx_vvvl
+#define _vel_vrmaxslfst_vvl __builtin_ve_vl_vrmaxslfst_vvl
+#define _vel_vrmaxslfst_vvvl __builtin_ve_vl_vrmaxslfst_vvvl
+#define _vel_vrmaxsllst_vvl __builtin_ve_vl_vrmaxsllst_vvl
+#define _vel_vrmaxsllst_vvvl __builtin_ve_vl_vrmaxsllst_vvvl
+#define _vel_vrminslfst_vvl __builtin_ve_vl_vrminslfst_vvl
+#define _vel_vrminslfst_vvvl __builtin_ve_vl_vrminslfst_vvvl
+#define _vel_vrminsllst_vvl __builtin_ve_vl_vrminsllst_vvl
+#define _vel_vrminsllst_vvvl __builtin_ve_vl_vrminsllst_vvvl
+#define _vel_vfrmaxdfst_vvl __builtin_ve_vl_vfrmaxdfst_vvl
+#define _vel_vfrmaxdfst_vvvl __builtin_ve_vl_vfrmaxdfst_vvvl
+#define _vel_vfrmaxdlst_vvl __builtin_ve_vl_vfrmaxdlst_vvl
+#define _vel_vfrmaxdlst_vvvl __builtin_ve_vl_vfrmaxdlst_vvvl
+#define _vel_vfrmaxsfst_vvl __builtin_ve_vl_vfrmaxsfst_vvl
+#define _vel_vfrmaxsfst_vvvl __builtin_ve_vl_vfrmaxsfst_vvvl
+#define _vel_vfrmaxslst_vvl __builtin_ve_vl_vfrmaxslst_vvl
+#define _vel_vfrmaxslst_vvvl __builtin_ve_vl_vfrmaxslst_vvvl
+#define _vel_vfrmindfst_vvl __builtin_ve_vl_vfrmindfst_vvl
+#define _vel_vfrmindfst_vvvl __builtin_ve_vl_vfrmindfst_vvvl
+#define _vel_vfrmindlst_vvl __builtin_ve_vl_vfrmindlst_vvl
+#define _vel_vfrmindlst_vvvl __builtin_ve_vl_vfrmindlst_vvvl
+#define _vel_vfrminsfst_vvl __builtin_ve_vl_vfrminsfst_vvl
+#define _vel_vfrminsfst_vvvl __builtin_ve_vl_vfrminsfst_vvvl
+#define _vel_vfrminslst_vvl __builtin_ve_vl_vfrminslst_vvl
+#define _vel_vfrminslst_vvvl __builtin_ve_vl_vfrminslst_vvvl
+#define _vel_vrand_vvl __builtin_ve_vl_vrand_vvl
+#define _vel_vrand_vvml __builtin_ve_vl_vrand_vvml
+#define _vel_vror_vvl __builtin_ve_vl_vror_vvl
+#define _vel_vror_vvml __builtin_ve_vl_vror_vvml
+#define _vel_vrxor_vvl __builtin_ve_vl_vrxor_vvl
+#define _vel_vrxor_vvml __builtin_ve_vl_vrxor_vvml
+#define _vel_vgt_vvssl __builtin_ve_vl_vgt_vvssl
+#define _vel_vgt_vvssvl __builtin_ve_vl_vgt_vvssvl
+#define _vel_vgt_vvssml __builtin_ve_vl_vgt_vvssml
+#define _vel_vgt_vvssmvl __builtin_ve_vl_vgt_vvssmvl
+#define _vel_vgtnc_vvssl __builtin_ve_vl_vgtnc_vvssl
+#define _vel_vgtnc_vvssvl __builtin_ve_vl_vgtnc_vvssvl
+#define _vel_vgtnc_vvssml __builtin_ve_vl_vgtnc_vvssml
+#define _vel_vgtnc_vvssmvl __builtin_ve_vl_vgtnc_vvssmvl
+#define _vel_vgtu_vvssl __builtin_ve_vl_vgtu_vvssl
+#define _vel_vgtu_vvssvl __builtin_ve_vl_vgtu_vvssvl
+#define _vel_vgtu_vvssml __builtin_ve_vl_vgtu_vvssml
+#define _vel_vgtu_vvssmvl __builtin_ve_vl_vgtu_vvssmvl
+#define _vel_vgtunc_vvssl __builtin_ve_vl_vgtunc_vvssl
+#define _vel_vgtunc_vvssvl __builtin_ve_vl_vgtunc_vvssvl
+#define _vel_vgtunc_vvssml __builtin_ve_vl_vgtunc_vvssml
+#define _vel_vgtunc_vvssmvl __builtin_ve_vl_vgtunc_vvssmvl
+#define _vel_vgtlsx_vvssl __builtin_ve_vl_vgtlsx_vvssl
+#define _vel_vgtlsx_vvssvl __builtin_ve_vl_vgtlsx_vvssvl
+#define _vel_vgtlsx_vvssml __builtin_ve_vl_vgtlsx_vvssml
+#define _vel_vgtlsx_vvssmvl __builtin_ve_vl_vgtlsx_vvssmvl
+#define _vel_vgtlsxnc_vvssl __builtin_ve_vl_vgtlsxnc_vvssl
+#define _vel_vgtlsxnc_vvssvl __builtin_ve_vl_vgtlsxnc_vvssvl
+#define _vel_vgtlsxnc_vvssml __builtin_ve_vl_vgtlsxnc_vvssml
+#define _vel_vgtlsxnc_vvssmvl __builtin_ve_vl_vgtlsxnc_vvssmvl
+#define _vel_vgtlzx_vvssl __builtin_ve_vl_vgtlzx_vvssl
+#define _vel_vgtlzx_vvssvl __builtin_ve_vl_vgtlzx_vvssvl
+#define _vel_vgtlzx_vvssml __builtin_ve_vl_vgtlzx_vvssml
+#define _vel_vgtlzx_vvssmvl __builtin_ve_vl_vgtlzx_vvssmvl
+#define _vel_vgtlzxnc_vvssl __builtin_ve_vl_vgtlzxnc_vvssl
+#define _vel_vgtlzxnc_vvssvl __builtin_ve_vl_vgtlzxnc_vvssvl
+#define _vel_vgtlzxnc_vvssml __builtin_ve_vl_vgtlzxnc_vvssml
+#define _vel_vgtlzxnc_vvssmvl __builtin_ve_vl_vgtlzxnc_vvssmvl
+#define _vel_vsc_vvssl __builtin_ve_vl_vsc_vvssl
+#define _vel_vsc_vvssml __builtin_ve_vl_vsc_vvssml
+#define _vel_vscnc_vvssl __builtin_ve_vl_vscnc_vvssl
+#define _vel_vscnc_vvssml __builtin_ve_vl_vscnc_vvssml
+#define _vel_vscot_vvssl __builtin_ve_vl_vscot_vvssl
+#define _vel_vscot_vvssml __builtin_ve_vl_vscot_vvssml
+#define _vel_vscncot_vvssl __builtin_ve_vl_vscncot_vvssl
+#define _vel_vscncot_vvssml __builtin_ve_vl_vscncot_vvssml
+#define _vel_vscu_vvssl __builtin_ve_vl_vscu_vvssl
+#define _vel_vscu_vvssml __builtin_ve_vl_vscu_vvssml
+#define _vel_vscunc_vvssl __builtin_ve_vl_vscunc_vvssl
+#define _vel_vscunc_vvssml __builtin_ve_vl_vscunc_vvssml
+#define _vel_vscuot_vvssl __builtin_ve_vl_vscuot_vvssl
+#define _vel_vscuot_vvssml __builtin_ve_vl_vscuot_vvssml
+#define _vel_vscuncot_vvssl __builtin_ve_vl_vscuncot_vvssl
+#define _vel_vscuncot_vvssml __builtin_ve_vl_vscuncot_vvssml
+#define _vel_vscl_vvssl __builtin_ve_vl_vscl_vvssl
+#define _vel_vscl_vvssml __builtin_ve_vl_vscl_vvssml
+#define _vel_vsclnc_vvssl __builtin_ve_vl_vsclnc_vvssl
+#define _vel_vsclnc_vvssml __builtin_ve_vl_vsclnc_vvssml
+#define _vel_vsclot_vvssl __builtin_ve_vl_vsclot_vvssl
+#define _vel_vsclot_vvssml __builtin_ve_vl_vsclot_vvssml
+#define _vel_vsclncot_vvssl __builtin_ve_vl_vsclncot_vvssl
+#define _vel_vsclncot_vvssml __builtin_ve_vl_vsclncot_vvssml
+#define _vel_andm_mmm __builtin_ve_vl_andm_mmm
+#define _vel_andm_MMM __builtin_ve_vl_andm_MMM
+#define _vel_orm_mmm __builtin_ve_vl_orm_mmm
+#define _vel_orm_MMM __builtin_ve_vl_orm_MMM
+#define _vel_xorm_mmm __builtin_ve_vl_xorm_mmm
+#define _vel_xorm_MMM __builtin_ve_vl_xorm_MMM
+#define _vel_eqvm_mmm __builtin_ve_vl_eqvm_mmm
+#define _vel_eqvm_MMM __builtin_ve_vl_eqvm_MMM
+#define _vel_nndm_mmm __builtin_ve_vl_nndm_mmm
+#define _vel_nndm_MMM __builtin_ve_vl_nndm_MMM
+#define _vel_negm_mm __builtin_ve_vl_negm_mm
+#define _vel_negm_MM __builtin_ve_vl_negm_MM
+#define _vel_pcvm_sml __builtin_ve_vl_pcvm_sml
+#define _vel_lzvm_sml __builtin_ve_vl_lzvm_sml
+#define _vel_tovm_sml __builtin_ve_vl_tovm_sml
+#define _vel_lcr_sss __builtin_ve_vl_lcr_sss
+#define _vel_scr_sss __builtin_ve_vl_scr_sss
+#define _vel_tscr_ssss __builtin_ve_vl_tscr_ssss
+#define _vel_fidcr_sss __builtin_ve_vl_fidcr_sss
+#define _vel_fencei __builtin_ve_vl_fencei
+#define _vel_fencem_s __builtin_ve_vl_fencem_s
+#define _vel_fencec_s __builtin_ve_vl_fencec_s
+#define _vel_svob __builtin_ve_vl_svob
diff --git a/contrib/llvm-project/clang/lib/Headers/wasm_simd128.h b/contrib/llvm-project/clang/lib/Headers/wasm_simd128.h
index 3889a2769faf..f93de129f957 100644
--- a/contrib/llvm-project/clang/lib/Headers/wasm_simd128.h
+++ b/contrib/llvm-project/clang/lib/Headers/wasm_simd128.h
@@ -1405,12 +1405,12 @@ wasm_f64x2_convert_low_u32x4(v128_t __a) {
static __inline__ v128_t __DEFAULT_FN_ATTRS
wasm_i32x4_trunc_sat_f64x2_zero(v128_t __a) {
- return (v128_t)__builtin_wasm_trunc_sat_zero_s_f64x2_i32x4((__f64x2)__a);
+ return (v128_t)__builtin_wasm_trunc_sat_s_zero_f64x2_i32x4((__f64x2)__a);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS
wasm_u32x4_trunc_sat_f64x2_zero(v128_t __a) {
- return (v128_t)__builtin_wasm_trunc_sat_zero_u_f64x2_i32x4((__f64x2)__a);
+ return (v128_t)__builtin_wasm_trunc_sat_u_zero_f64x2_i32x4((__f64x2)__a);
}
static __inline__ v128_t __DEFAULT_FN_ATTRS
diff --git a/contrib/llvm-project/clang/lib/Headers/xmmintrin.h b/contrib/llvm-project/clang/lib/Headers/xmmintrin.h
index 1612d3d2773d..4aa70d6e55a6 100644
--- a/contrib/llvm-project/clang/lib/Headers/xmmintrin.h
+++ b/contrib/llvm-project/clang/lib/Headers/xmmintrin.h
@@ -2086,7 +2086,7 @@ _mm_storer_ps(float *__p, __m128 __a)
/// \headerfile <x86intrin.h>
///
/// \code
-/// void _mm_prefetch(const void * a, const int sel);
+/// void _mm_prefetch(const void *a, const int sel);
/// \endcode
///
/// This intrinsic corresponds to the <c> PREFETCHNTA </c> instruction.
@@ -2360,7 +2360,10 @@ _mm_mulhi_pu16(__m64 __a, __m64 __b)
/// 00: assigned from bits [15:0] of \a a. \n
/// 01: assigned from bits [31:16] of \a a. \n
/// 10: assigned from bits [47:32] of \a a. \n
-/// 11: assigned from bits [63:48] of \a a.
+/// 11: assigned from bits [63:48] of \a a. \n
+/// Note: To generate a mask, you can use the \c _MM_SHUFFLE macro.
+/// <c>_MM_SHUFFLE(b6, b4, b2, b0)</c> can create an 8-bit mask of the form
+/// <c>[b6, b4, b2, b0]</c>.
/// \returns A 64-bit integer vector containing the shuffled values.
#define _mm_shuffle_pi16(a, n) \
((__m64)__builtin_ia32_pshufw((__v4hi)(__m64)(a), (n)))
@@ -2602,7 +2605,10 @@ void _mm_setcsr(unsigned int __i);
/// 00: Bits [31:0] copied from the specified operand. \n
/// 01: Bits [63:32] copied from the specified operand. \n
/// 10: Bits [95:64] copied from the specified operand. \n
-/// 11: Bits [127:96] copied from the specified operand.
+/// 11: Bits [127:96] copied from the specified operand. \n
+/// Note: To generate a mask, you can use the \c _MM_SHUFFLE macro.
+/// <c>_MM_SHUFFLE(b6, b4, b2, b0)</c> can create an 8-bit mask of the form
+/// <c>[b6, b4, b2, b0]</c>.
/// \returns A 128-bit vector of [4 x float] containing the shuffled values.
#define _mm_shuffle_ps(a, b, mask) \
((__m128)__builtin_ia32_shufps((__v4sf)(__m128)(a), (__v4sf)(__m128)(b), \
diff --git a/contrib/llvm-project/clang/lib/Index/IndexBody.cpp b/contrib/llvm-project/clang/lib/Index/IndexBody.cpp
index fa35f749d028..eb8905a7459c 100644
--- a/contrib/llvm-project/clang/lib/Index/IndexBody.cpp
+++ b/contrib/llvm-project/clang/lib/Index/IndexBody.cpp
@@ -7,8 +7,12 @@
//===----------------------------------------------------------------------===//
#include "IndexingContext.h"
-#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTLambda.h"
+#include "clang/AST/DeclCXX.h"
+#include "clang/AST/ExprConcepts.h"
+#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/Type.h"
using namespace clang;
using namespace clang::index;
@@ -455,10 +459,10 @@ public:
}
bool VisitParmVarDecl(ParmVarDecl* D) {
- // Index the parameters of lambda expression.
+ // Index the parameters of lambda expression and requires expression.
if (IndexCtx.shouldIndexFunctionLocalSymbols()) {
const auto *DC = D->getDeclContext();
- if (DC && isLambdaCallOperator(DC))
+ if (DC && (isLambdaCallOperator(DC) || isa<RequiresExprBodyDecl>(DC)))
IndexCtx.handleDecl(D);
}
return true;
@@ -472,6 +476,18 @@ public:
Relations, E);
return true;
}
+
+ bool VisitConceptSpecializationExpr(ConceptSpecializationExpr *R) {
+ IndexCtx.handleReference(R->getNamedConcept(), R->getConceptNameLoc(),
+ Parent, ParentDC);
+ return true;
+ }
+
+ bool TraverseTypeConstraint(const TypeConstraint *C) {
+ IndexCtx.handleReference(C->getNamedConcept(), C->getConceptNameLoc(),
+ Parent, ParentDC);
+ return RecursiveASTVisitor::TraverseTypeConstraint(C);
+ }
};
} // anonymous namespace
diff --git a/contrib/llvm-project/clang/lib/Index/IndexDecl.cpp b/contrib/llvm-project/clang/lib/Index/IndexDecl.cpp
index 3139aedaf01d..882e02836d4f 100644
--- a/contrib/llvm-project/clang/lib/Index/IndexDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Index/IndexDecl.cpp
@@ -7,10 +7,13 @@
//===----------------------------------------------------------------------===//
#include "IndexingContext.h"
+#include "clang/AST/ASTConcept.h"
#include "clang/AST/Attr.h"
#include "clang/AST/Decl.h"
+#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclVisitor.h"
#include "clang/Index/IndexDataConsumer.h"
+#include "clang/Index/IndexSymbol.h"
using namespace clang;
using namespace index;
@@ -129,6 +132,8 @@ public:
}
}
}
+ if (auto *C = D->getTrailingRequiresClause())
+ IndexCtx.indexBody(C, Parent);
}
bool handleObjCMethod(const ObjCMethodDecl *D,
@@ -605,9 +610,16 @@ public:
const NamedDecl *Parent = dyn_cast<NamedDecl>(DC);
IndexCtx.indexNestedNameSpecifierLoc(D->getQualifierLoc(), Parent,
D->getLexicalDeclContext());
- for (const auto *I : D->shadows())
+ for (const auto *I : D->shadows()) {
+ // Skip unresolved using decls - we already have a decl for the using
+ // itself, so there's not much point adding another decl or reference to
+ // refer to the same location.
+ if (isa<UnresolvedUsingIfExistsDecl>(I->getUnderlyingDecl()))
+ continue;
+
IndexCtx.handleReference(I->getUnderlyingDecl(), D->getLocation(), Parent,
D->getLexicalDeclContext(), SymbolRoleSet());
+ }
return true;
}
@@ -681,36 +693,52 @@ public:
return true;
}
- bool VisitTemplateDecl(const TemplateDecl *D) {
+ void indexTemplateParameters(TemplateParameterList *Params,
+ const NamedDecl *Parent) {
+ for (const NamedDecl *TP : *Params) {
+ if (IndexCtx.shouldIndexTemplateParameters())
+ IndexCtx.handleDecl(TP);
+ if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(TP)) {
+ if (TTP->hasDefaultArgument())
+ IndexCtx.indexTypeSourceInfo(TTP->getDefaultArgumentInfo(), Parent);
+ if (auto *C = TTP->getTypeConstraint())
+ IndexCtx.handleReference(C->getNamedConcept(), C->getConceptNameLoc(),
+ Parent, TTP->getLexicalDeclContext());
+ } else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(TP)) {
+ if (NTTP->hasDefaultArgument())
+ IndexCtx.indexBody(NTTP->getDefaultArgument(), Parent);
+ } else if (const auto *TTPD = dyn_cast<TemplateTemplateParmDecl>(TP)) {
+ if (TTPD->hasDefaultArgument())
+ handleTemplateArgumentLoc(TTPD->getDefaultArgument(), Parent,
+ TP->getLexicalDeclContext());
+ }
+ }
+ if (auto *R = Params->getRequiresClause())
+ IndexCtx.indexBody(R, Parent);
+ }
+ bool VisitTemplateDecl(const TemplateDecl *D) {
const NamedDecl *Parent = D->getTemplatedDecl();
if (!Parent)
return true;
// Index the default values for the template parameters.
- if (D->getTemplateParameters() &&
- shouldIndexTemplateParameterDefaultValue(Parent)) {
- const TemplateParameterList *Params = D->getTemplateParameters();
- for (const NamedDecl *TP : *Params) {
- if (IndexCtx.shouldIndexTemplateParameters())
- IndexCtx.handleDecl(TP);
- if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(TP)) {
- if (TTP->hasDefaultArgument())
- IndexCtx.indexTypeSourceInfo(TTP->getDefaultArgumentInfo(), Parent);
- } else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(TP)) {
- if (NTTP->hasDefaultArgument())
- IndexCtx.indexBody(NTTP->getDefaultArgument(), Parent);
- } else if (const auto *TTPD = dyn_cast<TemplateTemplateParmDecl>(TP)) {
- if (TTPD->hasDefaultArgument())
- handleTemplateArgumentLoc(TTPD->getDefaultArgument(), Parent,
- TP->getLexicalDeclContext());
- }
- }
+ auto *Params = D->getTemplateParameters();
+ if (Params && shouldIndexTemplateParameterDefaultValue(Parent)) {
+ indexTemplateParameters(Params, Parent);
}
return Visit(Parent);
}
+ bool VisitConceptDecl(const ConceptDecl *D) {
+ if (auto *Params = D->getTemplateParameters())
+ indexTemplateParameters(Params, D);
+ if (auto *E = D->getConstraintExpr())
+ IndexCtx.indexBody(E, D);
+ return IndexCtx.handleDecl(D);
+ }
+
bool VisitFriendDecl(const FriendDecl *D) {
if (auto ND = D->getFriendDecl()) {
// FIXME: Ignore a class template in a dependent context, these are not
diff --git a/contrib/llvm-project/clang/lib/Index/IndexSymbol.cpp b/contrib/llvm-project/clang/lib/Index/IndexSymbol.cpp
index 68e457de5265..a40c218a3c43 100644
--- a/contrib/llvm-project/clang/lib/Index/IndexSymbol.cpp
+++ b/contrib/llvm-project/clang/lib/Index/IndexSymbol.cpp
@@ -371,6 +371,9 @@ SymbolInfo index::getSymbolInfo(const Decl *D) {
case Decl::NonTypeTemplateParm:
Info.Kind = SymbolKind::NonTypeTemplateParm;
break;
+ case Decl::Concept:
+ Info.Kind = SymbolKind::Concept;
+ break;
// Other decls get the 'unknown' kind.
default:
break;
@@ -534,6 +537,8 @@ StringRef index::getSymbolKindString(SymbolKind K) {
case SymbolKind::TemplateTypeParm: return "template-type-param";
case SymbolKind::TemplateTemplateParm: return "template-template-param";
case SymbolKind::NonTypeTemplateParm: return "non-type-template-param";
+ case SymbolKind::Concept:
+ return "concept";
}
llvm_unreachable("invalid symbol kind");
}
diff --git a/contrib/llvm-project/clang/lib/Index/IndexTypeSourceInfo.cpp b/contrib/llvm-project/clang/lib/Index/IndexTypeSourceInfo.cpp
index ec4ca23942ca..b986ccde5745 100644
--- a/contrib/llvm-project/clang/lib/Index/IndexTypeSourceInfo.cpp
+++ b/contrib/llvm-project/clang/lib/Index/IndexTypeSourceInfo.cpp
@@ -7,7 +7,10 @@
//===----------------------------------------------------------------------===//
#include "IndexingContext.h"
+#include "clang/AST/ASTConcept.h"
+#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/TypeLoc.h"
#include "llvm/ADT/ScopeExit.h"
using namespace clang;
@@ -77,6 +80,13 @@ public:
return true;
}
+ bool VisitAutoTypeLoc(AutoTypeLoc TL) {
+ if (auto *C = TL.getNamedConcept())
+ return IndexCtx.handleReference(C, TL.getConceptNameLoc(), Parent,
+ ParentDC);
+ return true;
+ }
+
bool traverseParamVarHelper(ParmVarDecl *D) {
TRY_TO(TraverseNestedNameSpecifierLoc(D->getQualifierLoc()));
if (D->getTypeSourceInfo())
diff --git a/contrib/llvm-project/clang/lib/Index/IndexingContext.cpp b/contrib/llvm-project/clang/lib/Index/IndexingContext.cpp
index 8a962a055bac..ca8b144a431c 100644
--- a/contrib/llvm-project/clang/lib/Index/IndexingContext.cpp
+++ b/contrib/llvm-project/clang/lib/Index/IndexingContext.cpp
@@ -86,7 +86,6 @@ bool IndexingContext::handleReference(const NamedDecl *D, SourceLocation Loc,
isa<TemplateTemplateParmDecl>(D))) {
return true;
}
-
return handleDeclOccurrence(D, Loc, /*IsRef=*/true, Parent, Roles, Relations,
RefE, RefD, DC);
}
@@ -259,12 +258,9 @@ static bool isDeclADefinition(const Decl *D, const DeclContext *ContainerDC, AST
if (auto MD = dyn_cast<ObjCMethodDecl>(D))
return MD->isThisDeclarationADefinition() || isa<ObjCImplDecl>(ContainerDC);
- if (isa<TypedefNameDecl>(D) ||
- isa<EnumConstantDecl>(D) ||
- isa<FieldDecl>(D) ||
- isa<MSPropertyDecl>(D) ||
- isa<ObjCImplDecl>(D) ||
- isa<ObjCPropertyImplDecl>(D))
+ if (isa<TypedefNameDecl>(D) || isa<EnumConstantDecl>(D) ||
+ isa<FieldDecl>(D) || isa<MSPropertyDecl>(D) || isa<ObjCImplDecl>(D) ||
+ isa<ObjCPropertyImplDecl>(D) || isa<ConceptDecl>(D))
return true;
return false;
diff --git a/contrib/llvm-project/clang/lib/Index/USRGeneration.cpp b/contrib/llvm-project/clang/lib/Index/USRGeneration.cpp
index 41edd431dd5b..c606d8521bee 100644
--- a/contrib/llvm-project/clang/lib/Index/USRGeneration.cpp
+++ b/contrib/llvm-project/clang/lib/Index/USRGeneration.cpp
@@ -103,6 +103,7 @@ public:
void VisitTemplateTemplateParmDecl(const TemplateTemplateParmDecl *D);
void VisitUnresolvedUsingValueDecl(const UnresolvedUsingValueDecl *D);
void VisitUnresolvedUsingTypenameDecl(const UnresolvedUsingTypenameDecl *D);
+ void VisitConceptDecl(const ConceptDecl *D);
void VisitLinkageSpecDecl(const LinkageSpecDecl *D) {
IgnoreResults = true; // No USRs for linkage specs themselves.
@@ -549,22 +550,22 @@ void USRGenerator::VisitTagDecl(const TagDecl *D) {
if (const TypedefNameDecl *TD = D->getTypedefNameForAnonDecl()) {
Buf[off] = 'A';
Out << '@' << *TD;
- }
- else {
- if (D->isEmbeddedInDeclarator() && !D->isFreeStanding()) {
- printLoc(Out, D->getLocation(), Context->getSourceManager(), true);
} else {
- Buf[off] = 'a';
- if (auto *ED = dyn_cast<EnumDecl>(D)) {
- // Distinguish USRs of anonymous enums by using their first enumerator.
- auto enum_range = ED->enumerators();
- if (enum_range.begin() != enum_range.end()) {
- Out << '@' << **enum_range.begin();
+ if (D->isEmbeddedInDeclarator() && !D->isFreeStanding()) {
+ printLoc(Out, D->getLocation(), Context->getSourceManager(), true);
+ } else {
+ Buf[off] = 'a';
+ if (auto *ED = dyn_cast<EnumDecl>(D)) {
+ // Distinguish USRs of anonymous enums by using their first
+ // enumerator.
+ auto enum_range = ED->enumerators();
+ if (enum_range.begin() != enum_range.end()) {
+ Out << '@' << **enum_range.begin();
+ }
}
}
}
}
- }
// For a class template specialization, mangle the template arguments.
if (const ClassTemplateSpecializationDecl *Spec
@@ -1007,7 +1008,13 @@ void USRGenerator::VisitUnresolvedUsingTypenameDecl(const UnresolvedUsingTypenam
Out << D->getName(); // Simple name.
}
-
+void USRGenerator::VisitConceptDecl(const ConceptDecl *D) {
+ if (ShouldGenerateLocation(D) && GenLoc(D, /*IncludeOffset=*/isLocal(D)))
+ return;
+ VisitDeclContext(D->getDeclContext());
+ Out << "@CT@";
+ EmitDeclName(D);
+}
//===----------------------------------------------------------------------===//
// USR generation functions.
diff --git a/contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.cpp b/contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.cpp
index 705235aafa07..227ab9703dc7 100644
--- a/contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.cpp
+++ b/contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.cpp
@@ -12,6 +12,9 @@
#include "IncrementalExecutor.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/TargetOptions.h"
+#include "clang/Interpreter/PartialTranslationUnit.h"
#include "llvm/ExecutionEngine/ExecutionEngine.h"
#include "llvm/ExecutionEngine/Orc/CompileUtils.h"
#include "llvm/ExecutionEngine/Orc/ExecutionUtils.h"
@@ -27,12 +30,13 @@ namespace clang {
IncrementalExecutor::IncrementalExecutor(llvm::orc::ThreadSafeContext &TSC,
llvm::Error &Err,
- const llvm::Triple &Triple)
+ const clang::TargetInfo &TI)
: TSCtx(TSC) {
using namespace llvm::orc;
llvm::ErrorAsOutParameter EAO(&Err);
- auto JTMB = JITTargetMachineBuilder(Triple);
+ auto JTMB = JITTargetMachineBuilder(TI.getTriple());
+ JTMB.addFeatures(TI.getTargetOpts().Features);
if (auto JitOrErr = LLJITBuilder().setJITTargetMachineBuilder(JTMB).create())
Jit = std::move(*JitOrErr);
else {
@@ -52,8 +56,24 @@ IncrementalExecutor::IncrementalExecutor(llvm::orc::ThreadSafeContext &TSC,
IncrementalExecutor::~IncrementalExecutor() {}
-llvm::Error IncrementalExecutor::addModule(std::unique_ptr<llvm::Module> M) {
- return Jit->addIRModule(llvm::orc::ThreadSafeModule(std::move(M), TSCtx));
+llvm::Error IncrementalExecutor::addModule(PartialTranslationUnit &PTU) {
+ llvm::orc::ResourceTrackerSP RT =
+ Jit->getMainJITDylib().createResourceTracker();
+ ResourceTrackers[&PTU] = RT;
+
+ return Jit->addIRModule(RT, {std::move(PTU.TheModule), TSCtx});
+}
+
+llvm::Error IncrementalExecutor::removeModule(PartialTranslationUnit &PTU) {
+
+ llvm::orc::ResourceTrackerSP RT = std::move(ResourceTrackers[&PTU]);
+ if (!RT)
+ return llvm::Error::success();
+
+ ResourceTrackers.erase(&PTU);
+ if (llvm::Error Err = RT->remove())
+ return Err;
+ return llvm::Error::success();
}
llvm::Error IncrementalExecutor::runCtors() const {
@@ -68,7 +88,7 @@ IncrementalExecutor::getSymbolAddress(llvm::StringRef Name,
if (!Sym)
return Sym.takeError();
- return Sym->getAddress();
+ return Sym->getValue();
}
} // end namespace clang
diff --git a/contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.h b/contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.h
index 51b4d83d10b1..f11ec0aa9e75 100644
--- a/contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.h
+++ b/contrib/llvm-project/clang/lib/Interpreter/IncrementalExecutor.h
@@ -13,8 +13,8 @@
#ifndef LLVM_CLANG_LIB_INTERPRETER_INCREMENTALEXECUTOR_H
#define LLVM_CLANG_LIB_INTERPRETER_INCREMENTALEXECUTOR_H
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/Triple.h"
#include "llvm/ExecutionEngine/Orc/ExecutionUtils.h"
#include <memory>
@@ -29,19 +29,27 @@ class ThreadSafeContext;
} // namespace llvm
namespace clang {
+
+struct PartialTranslationUnit;
+class TargetInfo;
+
class IncrementalExecutor {
using CtorDtorIterator = llvm::orc::CtorDtorIterator;
std::unique_ptr<llvm::orc::LLJIT> Jit;
llvm::orc::ThreadSafeContext &TSCtx;
+ llvm::DenseMap<const PartialTranslationUnit *, llvm::orc::ResourceTrackerSP>
+ ResourceTrackers;
+
public:
enum SymbolNameKind { IRName, LinkerName };
IncrementalExecutor(llvm::orc::ThreadSafeContext &TSC, llvm::Error &Err,
- const llvm::Triple &Triple);
+ const clang::TargetInfo &TI);
~IncrementalExecutor();
- llvm::Error addModule(std::unique_ptr<llvm::Module> M);
+ llvm::Error addModule(PartialTranslationUnit &PTU);
+ llvm::Error removeModule(PartialTranslationUnit &PTU);
llvm::Error runCtors() const;
llvm::Expected<llvm::JITTargetAddress>
getSymbolAddress(llvm::StringRef Name, SymbolNameKind NameKind) const;
diff --git a/contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.cpp b/contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.cpp
index 4ade8b8bb074..db854c4161b4 100644
--- a/contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.cpp
+++ b/contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.cpp
@@ -134,7 +134,10 @@ IncrementalParser::IncrementalParser(std::unique_ptr<CompilerInstance> Instance,
P->Initialize();
}
-IncrementalParser::~IncrementalParser() { Act->FinalizeAction(); }
+IncrementalParser::~IncrementalParser() {
+ P.reset();
+ Act->FinalizeAction();
+}
llvm::Expected<PartialTranslationUnit &>
IncrementalParser::ParseOrWrapTopLevelDecl() {
@@ -164,8 +167,9 @@ IncrementalParser::ParseOrWrapTopLevelDecl() {
}
Parser::DeclGroupPtrTy ADecl;
- for (bool AtEOF = P->ParseFirstTopLevelDecl(ADecl); !AtEOF;
- AtEOF = P->ParseTopLevelDecl(ADecl)) {
+ Sema::ModuleImportState ImportState;
+ for (bool AtEOF = P->ParseFirstTopLevelDecl(ADecl, ImportState); !AtEOF;
+ AtEOF = P->ParseTopLevelDecl(ADecl, ImportState)) {
// If we got a null return and something *was* parsed, ignore it. This
// is due to a top-level semicolon, an action override, or a parse error
// skipping something.
@@ -177,30 +181,12 @@ IncrementalParser::ParseOrWrapTopLevelDecl() {
DiagnosticsEngine &Diags = getCI()->getDiagnostics();
if (Diags.hasErrorOccurred()) {
- TranslationUnitDecl *MostRecentTU = C.getTranslationUnitDecl();
- TranslationUnitDecl *PreviousTU = MostRecentTU->getPreviousDecl();
- assert(PreviousTU && "Must have a TU from the ASTContext initialization!");
- TranslationUnitDecl *FirstTU = MostRecentTU->getFirstDecl();
- assert(FirstTU);
- FirstTU->RedeclLink.setLatest(PreviousTU);
- C.TUDecl = PreviousTU;
- S.TUScope->setEntity(PreviousTU);
-
- // Clean up the lookup table
- if (StoredDeclsMap *Map = PreviousTU->getLookupPtr()) {
- for (auto I = Map->begin(); I != Map->end(); ++I) {
- StoredDeclsList &List = I->second;
- DeclContextLookupResult R = List.getLookupResult();
- for (NamedDecl *D : R)
- if (D->getTranslationUnitDecl() == MostRecentTU)
- List.remove(D);
- if (List.isNull())
- Map->erase(I);
- }
- }
+ PartialTranslationUnit MostRecentPTU = {C.getTranslationUnitDecl(),
+ nullptr};
+ CleanUpPTU(MostRecentPTU);
- // FIXME: Do not reset the pragma handlers.
- Diags.Reset();
+ Diags.Reset(/*soft=*/true);
+ Diags.getClient()->clear();
return llvm::make_error<llvm::StringError>("Parsing failed.",
std::error_code());
}
@@ -292,6 +278,24 @@ IncrementalParser::Parse(llvm::StringRef input) {
return PTU;
}
+void IncrementalParser::CleanUpPTU(PartialTranslationUnit &PTU) {
+ TranslationUnitDecl *MostRecentTU = PTU.TUPart;
+ TranslationUnitDecl *FirstTU = MostRecentTU->getFirstDecl();
+ if (StoredDeclsMap *Map = FirstTU->getPrimaryContext()->getLookupPtr()) {
+ for (auto I = Map->begin(); I != Map->end(); ++I) {
+ StoredDeclsList &List = I->second;
+ DeclContextLookupResult R = List.getLookupResult();
+ for (NamedDecl *D : R) {
+ if (D->getTranslationUnitDecl() == MostRecentTU) {
+ List.remove(D);
+ }
+ }
+ if (List.isNull())
+ Map->erase(I);
+ }
+ }
+}
+
llvm::StringRef IncrementalParser::GetMangledName(GlobalDecl GD) const {
CodeGenerator *CG = getCodeGen(Act.get());
assert(CG);
diff --git a/contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.h b/contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.h
index d1f454f21239..8e45d6b5931b 100644
--- a/contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.h
+++ b/contrib/llvm-project/clang/lib/Interpreter/IncrementalParser.h
@@ -72,6 +72,10 @@ public:
///\returns the mangled name of a \c GD.
llvm::StringRef GetMangledName(GlobalDecl GD) const;
+ void CleanUpPTU(PartialTranslationUnit &PTU);
+
+ std::list<PartialTranslationUnit> &getPTUs() { return PTUs; }
+
private:
llvm::Expected<PartialTranslationUnit &> ParseOrWrapTopLevelDecl();
};
diff --git a/contrib/llvm-project/clang/lib/Interpreter/Interpreter.cpp b/contrib/llvm-project/clang/lib/Interpreter/Interpreter.cpp
index 470c9c289a74..0191ad78581d 100644
--- a/contrib/llvm-project/clang/lib/Interpreter/Interpreter.cpp
+++ b/contrib/llvm-project/clang/lib/Interpreter/Interpreter.cpp
@@ -116,6 +116,9 @@ CreateCI(const llvm::opt::ArgStringList &Argv) {
// times, reusing the same AST.
Clang->getCodeGenOpts().ClearASTBeforeBackend = false;
+ Clang->getFrontendOpts().DisableFree = false;
+ Clang->getCodeGenOpts().DisableFree = false;
+
return std::move(Clang);
}
@@ -210,16 +213,16 @@ Interpreter::Parse(llvm::StringRef Code) {
llvm::Error Interpreter::Execute(PartialTranslationUnit &T) {
assert(T.TheModule);
if (!IncrExecutor) {
- const llvm::Triple &Triple =
- getCompilerInstance()->getASTContext().getTargetInfo().getTriple();
+ const clang::TargetInfo &TI =
+ getCompilerInstance()->getASTContext().getTargetInfo();
llvm::Error Err = llvm::Error::success();
- IncrExecutor = std::make_unique<IncrementalExecutor>(*TSCtx, Err, Triple);
+ IncrExecutor = std::make_unique<IncrementalExecutor>(*TSCtx, Err, TI);
if (Err)
return Err;
}
// FIXME: Add a callback to retain the llvm::Module once the JIT is done.
- if (auto Err = IncrExecutor->addModule(std::move(T.TheModule)))
+ if (auto Err = IncrExecutor->addModule(T))
return Err;
if (auto Err = IncrExecutor->runCtors())
@@ -257,3 +260,22 @@ Interpreter::getSymbolAddressFromLinkerName(llvm::StringRef Name) const {
return IncrExecutor->getSymbolAddress(Name, IncrementalExecutor::LinkerName);
}
+
+llvm::Error Interpreter::Undo(unsigned N) {
+
+ std::list<PartialTranslationUnit> &PTUs = IncrParser->getPTUs();
+ if (N > PTUs.size())
+ return llvm::make_error<llvm::StringError>("Operation failed. "
+ "Too many undos",
+ std::error_code());
+ for (unsigned I = 0; I < N; I++) {
+ if (IncrExecutor) {
+ if (llvm::Error Err = IncrExecutor->removeModule(PTUs.back()))
+ return Err;
+ }
+
+ IncrParser->CleanUpPTU(PTUs.back());
+ PTUs.pop_back();
+ }
+ return llvm::Error::success();
+}
diff --git a/contrib/llvm-project/clang/lib/Lex/DependencyDirectivesScanner.cpp b/contrib/llvm-project/clang/lib/Lex/DependencyDirectivesScanner.cpp
new file mode 100644
index 000000000000..be7b7d6e17b2
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Lex/DependencyDirectivesScanner.cpp
@@ -0,0 +1,862 @@
+//===- DependencyDirectivesScanner.cpp ------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This is the interface for scanning header and source files to get the
+/// minimum necessary preprocessor directives for evaluating includes. It
+/// reduces the source down to #define, #include, #import, @import, and any
+/// conditional preprocessor logic that contains one of those.
+///
+//===----------------------------------------------------------------------===//
+
+#include "clang/Lex/DependencyDirectivesScanner.h"
+#include "clang/Basic/CharInfo.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Lex/LexDiagnostic.h"
+#include "clang/Lex/Lexer.h"
+#include "llvm/ADT/ScopeExit.h"
+#include "llvm/ADT/SmallString.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringSwitch.h"
+
+using namespace clang;
+using namespace clang::dependency_directives_scan;
+using namespace llvm;
+
+namespace {
+
+struct DirectiveWithTokens {
+ DirectiveKind Kind;
+ unsigned NumTokens;
+
+ DirectiveWithTokens(DirectiveKind Kind, unsigned NumTokens)
+ : Kind(Kind), NumTokens(NumTokens) {}
+};
+
+/// Does an efficient "scan" of the sources to detect the presence of
+/// preprocessor (or module import) directives and collects the raw lexed tokens
+/// for those directives so that the \p Lexer can "replay" them when the file is
+/// included.
+///
+/// Note that the behavior of the raw lexer is affected by the language mode,
+/// while at this point we want to do a scan and collect tokens once,
+/// irrespective of the language mode that the file will get included in. To
+/// compensate for that the \p Lexer, while "replaying", will adjust a token
+/// where appropriate, when it could affect the preprocessor's state.
+/// For example in a directive like
+///
+/// \code
+/// #if __has_cpp_attribute(clang::fallthrough)
+/// \endcode
+///
+/// The preprocessor needs to see '::' as 'tok::coloncolon' instead of 2
+/// 'tok::colon'. The \p Lexer will adjust if it sees consecutive 'tok::colon'
+/// while in C++ mode.
+struct Scanner {
+ Scanner(StringRef Input,
+ SmallVectorImpl<dependency_directives_scan::Token> &Tokens,
+ DiagnosticsEngine *Diags, SourceLocation InputSourceLoc)
+ : Input(Input), Tokens(Tokens), Diags(Diags),
+ InputSourceLoc(InputSourceLoc), LangOpts(getLangOptsForDepScanning()),
+ TheLexer(InputSourceLoc, LangOpts, Input.begin(), Input.begin(),
+ Input.end()) {}
+
+ static LangOptions getLangOptsForDepScanning() {
+ LangOptions LangOpts;
+ // Set the lexer to use 'tok::at' for '@', instead of 'tok::unknown'.
+ LangOpts.ObjC = true;
+ LangOpts.LineComment = true;
+ return LangOpts;
+ }
+
+ /// Lex the provided source and emit the directive tokens.
+ ///
+ /// \returns True on error.
+ bool scan(SmallVectorImpl<Directive> &Directives);
+
+private:
+ /// Lexes next token and advances \p First and the \p Lexer.
+ LLVM_NODISCARD dependency_directives_scan::Token &
+ lexToken(const char *&First, const char *const End);
+
+ dependency_directives_scan::Token &lexIncludeFilename(const char *&First,
+ const char *const End);
+
+ /// Lexes next token and if it is identifier returns its string, otherwise
+ /// it skips the current line and returns \p None.
+ ///
+ /// In any case (whatever the token kind) \p First and the \p Lexer will
+ /// advance beyond the token.
+ LLVM_NODISCARD Optional<StringRef>
+ tryLexIdentifierOrSkipLine(const char *&First, const char *const End);
+
+ /// Used when it is certain that next token is an identifier.
+ LLVM_NODISCARD StringRef lexIdentifier(const char *&First,
+ const char *const End);
+
+ /// Lexes next token and returns true iff it is an identifier that matches \p
+ /// Id, otherwise it skips the current line and returns false.
+ ///
+ /// In any case (whatever the token kind) \p First and the \p Lexer will
+ /// advance beyond the token.
+ LLVM_NODISCARD bool isNextIdentifierOrSkipLine(StringRef Id,
+ const char *&First,
+ const char *const End);
+
+ LLVM_NODISCARD bool scanImpl(const char *First, const char *const End);
+ LLVM_NODISCARD bool lexPPLine(const char *&First, const char *const End);
+ LLVM_NODISCARD bool lexAt(const char *&First, const char *const End);
+ LLVM_NODISCARD bool lexModule(const char *&First, const char *const End);
+ LLVM_NODISCARD bool lexDefine(const char *HashLoc, const char *&First,
+ const char *const End);
+ LLVM_NODISCARD bool lexPragma(const char *&First, const char *const End);
+ LLVM_NODISCARD bool lexEndif(const char *&First, const char *const End);
+ LLVM_NODISCARD bool lexDefault(DirectiveKind Kind, const char *&First,
+ const char *const End);
+ LLVM_NODISCARD bool lexModuleDirectiveBody(DirectiveKind Kind,
+ const char *&First,
+ const char *const End);
+ void lexPPDirectiveBody(const char *&First, const char *const End);
+
+ DirectiveWithTokens &pushDirective(DirectiveKind Kind) {
+ Tokens.append(CurDirToks);
+ DirsWithToks.emplace_back(Kind, CurDirToks.size());
+ CurDirToks.clear();
+ return DirsWithToks.back();
+ }
+ void popDirective() {
+ Tokens.pop_back_n(DirsWithToks.pop_back_val().NumTokens);
+ }
+ DirectiveKind topDirective() const {
+ return DirsWithToks.empty() ? pp_none : DirsWithToks.back().Kind;
+ }
+
+ unsigned getOffsetAt(const char *CurPtr) const {
+ return CurPtr - Input.data();
+ }
+
+ /// Reports a diagnostic if the diagnostic engine is provided. Always returns
+ /// true at the end.
+ bool reportError(const char *CurPtr, unsigned Err);
+
+ StringMap<char> SplitIds;
+ StringRef Input;
+ SmallVectorImpl<dependency_directives_scan::Token> &Tokens;
+ DiagnosticsEngine *Diags;
+ SourceLocation InputSourceLoc;
+
+ /// Keeps track of the tokens for the currently lexed directive. Once a
+ /// directive is fully lexed and "committed" then the tokens get appended to
+ /// \p Tokens and \p CurDirToks is cleared for the next directive.
+ SmallVector<dependency_directives_scan::Token, 32> CurDirToks;
+ /// The directives that were lexed along with the number of tokens that each
+ /// directive contains. The tokens of all the directives are kept in \p Tokens
+ /// vector, in the same order as the directives order in \p DirsWithToks.
+ SmallVector<DirectiveWithTokens, 64> DirsWithToks;
+ LangOptions LangOpts;
+ Lexer TheLexer;
+};
+
+} // end anonymous namespace
+
+bool Scanner::reportError(const char *CurPtr, unsigned Err) {
+ if (!Diags)
+ return true;
+ assert(CurPtr >= Input.data() && "invalid buffer ptr");
+ Diags->Report(InputSourceLoc.getLocWithOffset(getOffsetAt(CurPtr)), Err);
+ return true;
+}
+
+static void skipOverSpaces(const char *&First, const char *const End) {
+ while (First != End && isHorizontalWhitespace(*First))
+ ++First;
+}
+
+LLVM_NODISCARD static bool isRawStringLiteral(const char *First,
+ const char *Current) {
+ assert(First <= Current);
+
+ // Check if we can even back up.
+ if (*Current != '"' || First == Current)
+ return false;
+
+ // Check for an "R".
+ --Current;
+ if (*Current != 'R')
+ return false;
+ if (First == Current || !isAsciiIdentifierContinue(*--Current))
+ return true;
+
+ // Check for a prefix of "u", "U", or "L".
+ if (*Current == 'u' || *Current == 'U' || *Current == 'L')
+ return First == Current || !isAsciiIdentifierContinue(*--Current);
+
+ // Check for a prefix of "u8".
+ if (*Current != '8' || First == Current || *Current-- != 'u')
+ return false;
+ return First == Current || !isAsciiIdentifierContinue(*--Current);
+}
+
+static void skipRawString(const char *&First, const char *const End) {
+ assert(First[0] == '"');
+ assert(First[-1] == 'R');
+
+ const char *Last = ++First;
+ while (Last != End && *Last != '(')
+ ++Last;
+ if (Last == End) {
+ First = Last; // Hit the end... just give up.
+ return;
+ }
+
+ StringRef Terminator(First, Last - First);
+ for (;;) {
+ // Move First to just past the next ")".
+ First = Last;
+ while (First != End && *First != ')')
+ ++First;
+ if (First == End)
+ return;
+ ++First;
+
+ // Look ahead for the terminator sequence.
+ Last = First;
+ while (Last != End && size_t(Last - First) < Terminator.size() &&
+ Terminator[Last - First] == *Last)
+ ++Last;
+
+ // Check if we hit it (or the end of the file).
+ if (Last == End) {
+ First = Last;
+ return;
+ }
+ if (size_t(Last - First) < Terminator.size())
+ continue;
+ if (*Last != '"')
+ continue;
+ First = Last + 1;
+ return;
+ }
+}
+
+// Returns the length of EOL, either 0 (no end-of-line), 1 (\n) or 2 (\r\n)
+static unsigned isEOL(const char *First, const char *const End) {
+ if (First == End)
+ return 0;
+ if (End - First > 1 && isVerticalWhitespace(First[0]) &&
+ isVerticalWhitespace(First[1]) && First[0] != First[1])
+ return 2;
+ return !!isVerticalWhitespace(First[0]);
+}
+
+static void skipString(const char *&First, const char *const End) {
+ assert(*First == '\'' || *First == '"' || *First == '<');
+ const char Terminator = *First == '<' ? '>' : *First;
+ for (++First; First != End && *First != Terminator; ++First) {
+ // String and character literals don't extend past the end of the line.
+ if (isVerticalWhitespace(*First))
+ return;
+ if (*First != '\\')
+ continue;
+ // Skip past backslash to the next character. This ensures that the
+ // character right after it is skipped as well, which matters if it's
+ // the terminator.
+ if (++First == End)
+ return;
+ if (!isWhitespace(*First))
+ continue;
+ // Whitespace after the backslash might indicate a line continuation.
+ const char *FirstAfterBackslashPastSpace = First;
+ skipOverSpaces(FirstAfterBackslashPastSpace, End);
+ if (unsigned NLSize = isEOL(FirstAfterBackslashPastSpace, End)) {
+ // Advance the character pointer to the next line for the next
+ // iteration.
+ First = FirstAfterBackslashPastSpace + NLSize - 1;
+ }
+ }
+ if (First != End)
+ ++First; // Finish off the string.
+}
+
+// Returns the length of the skipped newline
+static unsigned skipNewline(const char *&First, const char *End) {
+ if (First == End)
+ return 0;
+ assert(isVerticalWhitespace(*First));
+ unsigned Len = isEOL(First, End);
+ assert(Len && "expected newline");
+ First += Len;
+ return Len;
+}
+
+static bool wasLineContinuation(const char *First, unsigned EOLLen) {
+ return *(First - (int)EOLLen - 1) == '\\';
+}
+
+static void skipToNewlineRaw(const char *&First, const char *const End) {
+ for (;;) {
+ if (First == End)
+ return;
+
+ unsigned Len = isEOL(First, End);
+ if (Len)
+ return;
+
+ do {
+ if (++First == End)
+ return;
+ Len = isEOL(First, End);
+ } while (!Len);
+
+ if (First[-1] != '\\')
+ return;
+
+ First += Len;
+ // Keep skipping lines...
+ }
+}
+
+static void skipLineComment(const char *&First, const char *const End) {
+ assert(First[0] == '/' && First[1] == '/');
+ First += 2;
+ skipToNewlineRaw(First, End);
+}
+
+static void skipBlockComment(const char *&First, const char *const End) {
+ assert(First[0] == '/' && First[1] == '*');
+ if (End - First < 4) {
+ First = End;
+ return;
+ }
+ for (First += 3; First != End; ++First)
+ if (First[-1] == '*' && First[0] == '/') {
+ ++First;
+ return;
+ }
+}
+
+/// \returns True if the current single quotation mark character is a C++ 14
+/// digit separator.
+static bool isQuoteCppDigitSeparator(const char *const Start,
+ const char *const Cur,
+ const char *const End) {
+ assert(*Cur == '\'' && "expected quotation character");
+ // skipLine called in places where we don't expect a valid number
+ // body before `start` on the same line, so always return false at the start.
+ if (Start == Cur)
+ return false;
+ // The previous character must be a valid PP number character.
+ // Make sure that the L, u, U, u8 prefixes don't get marked as a
+ // separator though.
+ char Prev = *(Cur - 1);
+ if (Prev == 'L' || Prev == 'U' || Prev == 'u')
+ return false;
+ if (Prev == '8' && (Cur - 1 != Start) && *(Cur - 2) == 'u')
+ return false;
+ if (!isPreprocessingNumberBody(Prev))
+ return false;
+ // The next character should be a valid identifier body character.
+ return (Cur + 1) < End && isAsciiIdentifierContinue(*(Cur + 1));
+}
+
+static void skipLine(const char *&First, const char *const End) {
+ for (;;) {
+ assert(First <= End);
+ if (First == End)
+ return;
+
+ if (isVerticalWhitespace(*First)) {
+ skipNewline(First, End);
+ return;
+ }
+ const char *Start = First;
+ while (First != End && !isVerticalWhitespace(*First)) {
+ // Iterate over strings correctly to avoid comments and newlines.
+ if (*First == '"' ||
+ (*First == '\'' && !isQuoteCppDigitSeparator(Start, First, End))) {
+ if (isRawStringLiteral(Start, First))
+ skipRawString(First, End);
+ else
+ skipString(First, End);
+ continue;
+ }
+
+ // Iterate over comments correctly.
+ if (*First != '/' || End - First < 2) {
+ ++First;
+ continue;
+ }
+
+ if (First[1] == '/') {
+ // "//...".
+ skipLineComment(First, End);
+ continue;
+ }
+
+ if (First[1] != '*') {
+ ++First;
+ continue;
+ }
+
+ // "/*...*/".
+ skipBlockComment(First, End);
+ }
+ if (First == End)
+ return;
+
+ // Skip over the newline.
+ unsigned Len = skipNewline(First, End);
+ if (!wasLineContinuation(First, Len)) // Continue past line-continuations.
+ break;
+ }
+}
+
+static void skipDirective(StringRef Name, const char *&First,
+ const char *const End) {
+ if (llvm::StringSwitch<bool>(Name)
+ .Case("warning", true)
+ .Case("error", true)
+ .Default(false))
+ // Do not process quotes or comments.
+ skipToNewlineRaw(First, End);
+ else
+ skipLine(First, End);
+}
+
+static void skipWhitespace(const char *&First, const char *const End) {
+ for (;;) {
+ assert(First <= End);
+ skipOverSpaces(First, End);
+
+ if (End - First < 2)
+ return;
+
+ if (First[0] == '\\' && isVerticalWhitespace(First[1])) {
+ skipNewline(++First, End);
+ continue;
+ }
+
+ // Check for a non-comment character.
+ if (First[0] != '/')
+ return;
+
+ // "// ...".
+ if (First[1] == '/') {
+ skipLineComment(First, End);
+ return;
+ }
+
+ // Cannot be a comment.
+ if (First[1] != '*')
+ return;
+
+ // "/*...*/".
+ skipBlockComment(First, End);
+ }
+}
+
+bool Scanner::lexModuleDirectiveBody(DirectiveKind Kind, const char *&First,
+ const char *const End) {
+ const char *DirectiveLoc = Input.data() + CurDirToks.front().Offset;
+ for (;;) {
+ const dependency_directives_scan::Token &Tok = lexToken(First, End);
+ if (Tok.is(tok::eof))
+ return reportError(
+ DirectiveLoc,
+ diag::err_dep_source_scanner_missing_semi_after_at_import);
+ if (Tok.is(tok::semi))
+ break;
+ }
+ pushDirective(Kind);
+ skipWhitespace(First, End);
+ if (First == End)
+ return false;
+ if (!isVerticalWhitespace(*First))
+ return reportError(
+ DirectiveLoc, diag::err_dep_source_scanner_unexpected_tokens_at_import);
+ skipNewline(First, End);
+ return false;
+}
+
+dependency_directives_scan::Token &Scanner::lexToken(const char *&First,
+ const char *const End) {
+ clang::Token Tok;
+ TheLexer.LexFromRawLexer(Tok);
+ First = Input.data() + TheLexer.getCurrentBufferOffset();
+ assert(First <= End);
+
+ unsigned Offset = TheLexer.getCurrentBufferOffset() - Tok.getLength();
+ CurDirToks.emplace_back(Offset, Tok.getLength(), Tok.getKind(),
+ Tok.getFlags());
+ return CurDirToks.back();
+}
+
+dependency_directives_scan::Token &
+Scanner::lexIncludeFilename(const char *&First, const char *const End) {
+ clang::Token Tok;
+ TheLexer.LexIncludeFilename(Tok);
+ First = Input.data() + TheLexer.getCurrentBufferOffset();
+ assert(First <= End);
+
+ unsigned Offset = TheLexer.getCurrentBufferOffset() - Tok.getLength();
+ CurDirToks.emplace_back(Offset, Tok.getLength(), Tok.getKind(),
+ Tok.getFlags());
+ return CurDirToks.back();
+}
+
+void Scanner::lexPPDirectiveBody(const char *&First, const char *const End) {
+ while (true) {
+ const dependency_directives_scan::Token &Tok = lexToken(First, End);
+ if (Tok.is(tok::eod))
+ break;
+ }
+}
+
+LLVM_NODISCARD Optional<StringRef>
+Scanner::tryLexIdentifierOrSkipLine(const char *&First, const char *const End) {
+ const dependency_directives_scan::Token &Tok = lexToken(First, End);
+ if (Tok.isNot(tok::raw_identifier)) {
+ if (!Tok.is(tok::eod))
+ skipLine(First, End);
+ return None;
+ }
+
+ bool NeedsCleaning = Tok.Flags & clang::Token::NeedsCleaning;
+ if (LLVM_LIKELY(!NeedsCleaning))
+ return Input.slice(Tok.Offset, Tok.getEnd());
+
+ SmallString<64> Spelling;
+ Spelling.resize(Tok.Length);
+
+ unsigned SpellingLength = 0;
+ const char *BufPtr = Input.begin() + Tok.Offset;
+ const char *AfterIdent = Input.begin() + Tok.getEnd();
+ while (BufPtr < AfterIdent) {
+ unsigned Size;
+ Spelling[SpellingLength++] =
+ Lexer::getCharAndSizeNoWarn(BufPtr, Size, LangOpts);
+ BufPtr += Size;
+ }
+
+ return SplitIds.try_emplace(StringRef(Spelling.begin(), SpellingLength), 0)
+ .first->first();
+}
+
+StringRef Scanner::lexIdentifier(const char *&First, const char *const End) {
+ Optional<StringRef> Id = tryLexIdentifierOrSkipLine(First, End);
+ assert(Id && "expected identifier token");
+ return Id.getValue();
+}
+
+bool Scanner::isNextIdentifierOrSkipLine(StringRef Id, const char *&First,
+ const char *const End) {
+ if (Optional<StringRef> FoundId = tryLexIdentifierOrSkipLine(First, End)) {
+ if (*FoundId == Id)
+ return true;
+ skipLine(First, End);
+ }
+ return false;
+}
+
+bool Scanner::lexAt(const char *&First, const char *const End) {
+ // Handle "@import".
+
+ // Lex '@'.
+ const dependency_directives_scan::Token &AtTok = lexToken(First, End);
+ assert(AtTok.is(tok::at));
+ (void)AtTok;
+
+ if (!isNextIdentifierOrSkipLine("import", First, End))
+ return false;
+ return lexModuleDirectiveBody(decl_at_import, First, End);
+}
+
+bool Scanner::lexModule(const char *&First, const char *const End) {
+ StringRef Id = lexIdentifier(First, End);
+ bool Export = false;
+ if (Id == "export") {
+ Export = true;
+ Optional<StringRef> NextId = tryLexIdentifierOrSkipLine(First, End);
+ if (!NextId)
+ return false;
+ Id = *NextId;
+ }
+
+ if (Id != "module" && Id != "import") {
+ skipLine(First, End);
+ return false;
+ }
+
+ skipWhitespace(First, End);
+
+ // Ignore this as a module directive if the next character can't be part of
+ // an import.
+
+ switch (*First) {
+ case ':':
+ case '<':
+ case '"':
+ break;
+ default:
+ if (!isAsciiIdentifierContinue(*First)) {
+ skipLine(First, End);
+ return false;
+ }
+ }
+
+ TheLexer.seek(getOffsetAt(First), /*IsAtStartOfLine*/ false);
+
+ DirectiveKind Kind;
+ if (Id == "module")
+ Kind = Export ? cxx_export_module_decl : cxx_module_decl;
+ else
+ Kind = Export ? cxx_export_import_decl : cxx_import_decl;
+
+ return lexModuleDirectiveBody(Kind, First, End);
+}
+
+bool Scanner::lexPragma(const char *&First, const char *const End) {
+ Optional<StringRef> FoundId = tryLexIdentifierOrSkipLine(First, End);
+ if (!FoundId)
+ return false;
+
+ StringRef Id = *FoundId;
+ auto Kind = llvm::StringSwitch<DirectiveKind>(Id)
+ .Case("once", pp_pragma_once)
+ .Case("push_macro", pp_pragma_push_macro)
+ .Case("pop_macro", pp_pragma_pop_macro)
+ .Case("include_alias", pp_pragma_include_alias)
+ .Default(pp_none);
+ if (Kind != pp_none) {
+ lexPPDirectiveBody(First, End);
+ pushDirective(Kind);
+ return false;
+ }
+
+ if (Id != "clang") {
+ skipLine(First, End);
+ return false;
+ }
+
+ // #pragma clang.
+ if (!isNextIdentifierOrSkipLine("module", First, End))
+ return false;
+
+ // #pragma clang module.
+ if (!isNextIdentifierOrSkipLine("import", First, End))
+ return false;
+
+ // #pragma clang module import.
+ lexPPDirectiveBody(First, End);
+ pushDirective(pp_pragma_import);
+ return false;
+}
+
+bool Scanner::lexEndif(const char *&First, const char *const End) {
+ // Strip out "#else" if it's empty.
+ if (topDirective() == pp_else)
+ popDirective();
+
+ // If "#ifdef" is empty, strip it and skip the "#endif".
+ //
+ // FIXME: Once/if Clang starts disallowing __has_include in macro expansions,
+ // we can skip empty `#if` and `#elif` blocks as well after scanning for a
+ // literal __has_include in the condition. Even without that rule we could
+ // drop the tokens if we scan for identifiers in the condition and find none.
+ if (topDirective() == pp_ifdef || topDirective() == pp_ifndef) {
+ popDirective();
+ skipLine(First, End);
+ return false;
+ }
+
+ return lexDefault(pp_endif, First, End);
+}
+
+bool Scanner::lexDefault(DirectiveKind Kind, const char *&First,
+ const char *const End) {
+ lexPPDirectiveBody(First, End);
+ pushDirective(Kind);
+ return false;
+}
+
+static bool isStartOfRelevantLine(char First) {
+ switch (First) {
+ case '#':
+ case '@':
+ case 'i':
+ case 'e':
+ case 'm':
+ return true;
+ }
+ return false;
+}
+
+bool Scanner::lexPPLine(const char *&First, const char *const End) {
+ assert(First != End);
+
+ skipWhitespace(First, End);
+ assert(First <= End);
+ if (First == End)
+ return false;
+
+ if (!isStartOfRelevantLine(*First)) {
+ skipLine(First, End);
+ assert(First <= End);
+ return false;
+ }
+
+ TheLexer.seek(getOffsetAt(First), /*IsAtStartOfLine*/ true);
+
+ auto ScEx1 = make_scope_exit([&]() {
+ /// Clear Scanner's CurDirToks before returning, in case we didn't push a
+ /// new directive.
+ CurDirToks.clear();
+ });
+
+ // Handle "@import".
+ if (*First == '@')
+ return lexAt(First, End);
+
+ if (*First == 'i' || *First == 'e' || *First == 'm')
+ return lexModule(First, End);
+
+ // Handle preprocessing directives.
+
+ TheLexer.setParsingPreprocessorDirective(true);
+ auto ScEx2 = make_scope_exit(
+ [&]() { TheLexer.setParsingPreprocessorDirective(false); });
+
+ // Lex '#'.
+ const dependency_directives_scan::Token &HashTok = lexToken(First, End);
+ assert(HashTok.is(tok::hash));
+ (void)HashTok;
+
+ Optional<StringRef> FoundId = tryLexIdentifierOrSkipLine(First, End);
+ if (!FoundId)
+ return false;
+
+ StringRef Id = *FoundId;
+
+ if (Id == "pragma")
+ return lexPragma(First, End);
+
+ auto Kind = llvm::StringSwitch<DirectiveKind>(Id)
+ .Case("include", pp_include)
+ .Case("__include_macros", pp___include_macros)
+ .Case("define", pp_define)
+ .Case("undef", pp_undef)
+ .Case("import", pp_import)
+ .Case("include_next", pp_include_next)
+ .Case("if", pp_if)
+ .Case("ifdef", pp_ifdef)
+ .Case("ifndef", pp_ifndef)
+ .Case("elif", pp_elif)
+ .Case("elifdef", pp_elifdef)
+ .Case("elifndef", pp_elifndef)
+ .Case("else", pp_else)
+ .Case("endif", pp_endif)
+ .Default(pp_none);
+ if (Kind == pp_none) {
+ skipDirective(Id, First, End);
+ return false;
+ }
+
+ if (Kind == pp_endif)
+ return lexEndif(First, End);
+
+ switch (Kind) {
+ case pp_include:
+ case pp___include_macros:
+ case pp_include_next:
+ case pp_import:
+ lexIncludeFilename(First, End);
+ break;
+ default:
+ break;
+ }
+
+ // Everything else.
+ return lexDefault(Kind, First, End);
+}
+
+static void skipUTF8ByteOrderMark(const char *&First, const char *const End) {
+ if ((End - First) >= 3 && First[0] == '\xef' && First[1] == '\xbb' &&
+ First[2] == '\xbf')
+ First += 3;
+}
+
+bool Scanner::scanImpl(const char *First, const char *const End) {
+ skipUTF8ByteOrderMark(First, End);
+ while (First != End)
+ if (lexPPLine(First, End))
+ return true;
+ return false;
+}
+
+bool Scanner::scan(SmallVectorImpl<Directive> &Directives) {
+ bool Error = scanImpl(Input.begin(), Input.end());
+
+ if (!Error) {
+ // Add an EOF on success.
+ pushDirective(pp_eof);
+ }
+
+ ArrayRef<dependency_directives_scan::Token> RemainingTokens = Tokens;
+ for (const DirectiveWithTokens &DirWithToks : DirsWithToks) {
+ assert(RemainingTokens.size() >= DirWithToks.NumTokens);
+ Directives.emplace_back(DirWithToks.Kind,
+ RemainingTokens.take_front(DirWithToks.NumTokens));
+ RemainingTokens = RemainingTokens.drop_front(DirWithToks.NumTokens);
+ }
+ assert(RemainingTokens.empty());
+
+ return Error;
+}
+
+bool clang::scanSourceForDependencyDirectives(
+ StringRef Input, SmallVectorImpl<dependency_directives_scan::Token> &Tokens,
+ SmallVectorImpl<Directive> &Directives, DiagnosticsEngine *Diags,
+ SourceLocation InputSourceLoc) {
+ return Scanner(Input, Tokens, Diags, InputSourceLoc).scan(Directives);
+}
+
+void clang::printDependencyDirectivesAsSource(
+ StringRef Source,
+ ArrayRef<dependency_directives_scan::Directive> Directives,
+ llvm::raw_ostream &OS) {
+ // Add a space separator where it is convenient for testing purposes.
+ auto needsSpaceSeparator =
+ [](tok::TokenKind Prev,
+ const dependency_directives_scan::Token &Tok) -> bool {
+ if (Prev == Tok.Kind)
+ return !Tok.isOneOf(tok::l_paren, tok::r_paren, tok::l_square,
+ tok::r_square);
+ if (Prev == tok::raw_identifier &&
+ Tok.isOneOf(tok::hash, tok::numeric_constant, tok::string_literal,
+ tok::char_constant, tok::header_name))
+ return true;
+ if (Prev == tok::r_paren &&
+ Tok.isOneOf(tok::raw_identifier, tok::hash, tok::string_literal,
+ tok::char_constant, tok::unknown))
+ return true;
+ if (Prev == tok::comma &&
+ Tok.isOneOf(tok::l_paren, tok::string_literal, tok::less))
+ return true;
+ return false;
+ };
+
+ for (const dependency_directives_scan::Directive &Directive : Directives) {
+ Optional<tok::TokenKind> PrevTokenKind;
+ for (const dependency_directives_scan::Token &Tok : Directive.Tokens) {
+ if (PrevTokenKind && needsSpaceSeparator(*PrevTokenKind, Tok))
+ OS << ' ';
+ PrevTokenKind = Tok.Kind;
+ OS << Source.slice(Tok.Offset, Tok.getEnd());
+ }
+ }
+}
diff --git a/contrib/llvm-project/clang/lib/Lex/DependencyDirectivesSourceMinimizer.cpp b/contrib/llvm-project/clang/lib/Lex/DependencyDirectivesSourceMinimizer.cpp
deleted file mode 100644
index f597c56837fb..000000000000
--- a/contrib/llvm-project/clang/lib/Lex/DependencyDirectivesSourceMinimizer.cpp
+++ /dev/null
@@ -1,982 +0,0 @@
-//===- DependencyDirectivesSourceMinimizer.cpp - -------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-///
-/// \file
-/// This is the implementation for minimizing header and source files to the
-/// minimum necessary preprocessor directives for evaluating includes. It
-/// reduces the source down to #define, #include, #import, @import, and any
-/// conditional preprocessor logic that contains one of those.
-///
-//===----------------------------------------------------------------------===//
-
-#include "clang/Lex/DependencyDirectivesSourceMinimizer.h"
-#include "clang/Basic/CharInfo.h"
-#include "clang/Basic/Diagnostic.h"
-#include "clang/Lex/LexDiagnostic.h"
-#include "llvm/ADT/StringMap.h"
-#include "llvm/ADT/StringSwitch.h"
-#include "llvm/Support/MemoryBuffer.h"
-
-using namespace llvm;
-using namespace clang;
-using namespace clang::minimize_source_to_dependency_directives;
-
-namespace {
-
-struct Minimizer {
- /// Minimized output.
- SmallVectorImpl<char> &Out;
- /// The known tokens encountered during the minimization.
- SmallVectorImpl<Token> &Tokens;
-
- Minimizer(SmallVectorImpl<char> &Out, SmallVectorImpl<Token> &Tokens,
- StringRef Input, DiagnosticsEngine *Diags,
- SourceLocation InputSourceLoc)
- : Out(Out), Tokens(Tokens), Input(Input), Diags(Diags),
- InputSourceLoc(InputSourceLoc) {}
-
- /// Lex the provided source and emit the minimized output.
- ///
- /// \returns True on error.
- bool minimize();
-
-private:
- struct IdInfo {
- const char *Last;
- StringRef Name;
- };
-
- /// Lex an identifier.
- ///
- /// \pre First points at a valid identifier head.
- LLVM_NODISCARD IdInfo lexIdentifier(const char *First, const char *const End);
- LLVM_NODISCARD bool isNextIdentifier(StringRef Id, const char *&First,
- const char *const End);
- LLVM_NODISCARD bool minimizeImpl(const char *First, const char *const End);
- LLVM_NODISCARD bool lexPPLine(const char *&First, const char *const End);
- LLVM_NODISCARD bool lexAt(const char *&First, const char *const End);
- LLVM_NODISCARD bool lexModule(const char *&First, const char *const End);
- LLVM_NODISCARD bool lexDefine(const char *&First, const char *const End);
- LLVM_NODISCARD bool lexPragma(const char *&First, const char *const End);
- LLVM_NODISCARD bool lexEndif(const char *&First, const char *const End);
- LLVM_NODISCARD bool lexDefault(TokenKind Kind, StringRef Directive,
- const char *&First, const char *const End);
- Token &makeToken(TokenKind K) {
- Tokens.emplace_back(K, Out.size());
- return Tokens.back();
- }
- void popToken() {
- Out.resize(Tokens.back().Offset);
- Tokens.pop_back();
- }
- TokenKind top() const { return Tokens.empty() ? pp_none : Tokens.back().K; }
-
- Minimizer &put(char Byte) {
- Out.push_back(Byte);
- return *this;
- }
- Minimizer &append(StringRef S) { return append(S.begin(), S.end()); }
- Minimizer &append(const char *First, const char *Last) {
- Out.append(First, Last);
- return *this;
- }
-
- void printToNewline(const char *&First, const char *const End);
- void printAdjacentModuleNameParts(const char *&First, const char *const End);
- LLVM_NODISCARD bool printAtImportBody(const char *&First,
- const char *const End);
- void printDirectiveBody(const char *&First, const char *const End);
- void printAdjacentMacroArgs(const char *&First, const char *const End);
- LLVM_NODISCARD bool printMacroArgs(const char *&First, const char *const End);
-
- /// Reports a diagnostic if the diagnostic engine is provided. Always returns
- /// true at the end.
- bool reportError(const char *CurPtr, unsigned Err);
-
- StringMap<char> SplitIds;
- StringRef Input;
- DiagnosticsEngine *Diags;
- SourceLocation InputSourceLoc;
-};
-
-} // end anonymous namespace
-
-bool Minimizer::reportError(const char *CurPtr, unsigned Err) {
- if (!Diags)
- return true;
- assert(CurPtr >= Input.data() && "invalid buffer ptr");
- Diags->Report(InputSourceLoc.getLocWithOffset(CurPtr - Input.data()), Err);
- return true;
-}
-
-static void skipOverSpaces(const char *&First, const char *const End) {
- while (First != End && isHorizontalWhitespace(*First))
- ++First;
-}
-
-LLVM_NODISCARD static bool isRawStringLiteral(const char *First,
- const char *Current) {
- assert(First <= Current);
-
- // Check if we can even back up.
- if (*Current != '"' || First == Current)
- return false;
-
- // Check for an "R".
- --Current;
- if (*Current != 'R')
- return false;
- if (First == Current || !isAsciiIdentifierContinue(*--Current))
- return true;
-
- // Check for a prefix of "u", "U", or "L".
- if (*Current == 'u' || *Current == 'U' || *Current == 'L')
- return First == Current || !isAsciiIdentifierContinue(*--Current);
-
- // Check for a prefix of "u8".
- if (*Current != '8' || First == Current || *Current-- != 'u')
- return false;
- return First == Current || !isAsciiIdentifierContinue(*--Current);
-}
-
-static void skipRawString(const char *&First, const char *const End) {
- assert(First[0] == '"');
- assert(First[-1] == 'R');
-
- const char *Last = ++First;
- while (Last != End && *Last != '(')
- ++Last;
- if (Last == End) {
- First = Last; // Hit the end... just give up.
- return;
- }
-
- StringRef Terminator(First, Last - First);
- for (;;) {
- // Move First to just past the next ")".
- First = Last;
- while (First != End && *First != ')')
- ++First;
- if (First == End)
- return;
- ++First;
-
- // Look ahead for the terminator sequence.
- Last = First;
- while (Last != End && size_t(Last - First) < Terminator.size() &&
- Terminator[Last - First] == *Last)
- ++Last;
-
- // Check if we hit it (or the end of the file).
- if (Last == End) {
- First = Last;
- return;
- }
- if (size_t(Last - First) < Terminator.size())
- continue;
- if (*Last != '"')
- continue;
- First = Last + 1;
- return;
- }
-}
-
-// Returns the length of EOL, either 0 (no end-of-line), 1 (\n) or 2 (\r\n)
-static unsigned isEOL(const char *First, const char *const End) {
- if (First == End)
- return 0;
- if (End - First > 1 && isVerticalWhitespace(First[0]) &&
- isVerticalWhitespace(First[1]) && First[0] != First[1])
- return 2;
- return !!isVerticalWhitespace(First[0]);
-}
-
-static void skipString(const char *&First, const char *const End) {
- assert(*First == '\'' || *First == '"' || *First == '<');
- const char Terminator = *First == '<' ? '>' : *First;
- for (++First; First != End && *First != Terminator; ++First) {
- // String and character literals don't extend past the end of the line.
- if (isVerticalWhitespace(*First))
- return;
- if (*First != '\\')
- continue;
- // Skip past backslash to the next character. This ensures that the
- // character right after it is skipped as well, which matters if it's
- // the terminator.
- if (++First == End)
- return;
- if (!isWhitespace(*First))
- continue;
- // Whitespace after the backslash might indicate a line continuation.
- const char *FirstAfterBackslashPastSpace = First;
- skipOverSpaces(FirstAfterBackslashPastSpace, End);
- if (unsigned NLSize = isEOL(FirstAfterBackslashPastSpace, End)) {
- // Advance the character pointer to the next line for the next
- // iteration.
- First = FirstAfterBackslashPastSpace + NLSize - 1;
- }
- }
- if (First != End)
- ++First; // Finish off the string.
-}
-
-// Returns the length of the skipped newline
-static unsigned skipNewline(const char *&First, const char *End) {
- if (First == End)
- return 0;
- assert(isVerticalWhitespace(*First));
- unsigned Len = isEOL(First, End);
- assert(Len && "expected newline");
- First += Len;
- return Len;
-}
-
-static bool wasLineContinuation(const char *First, unsigned EOLLen) {
- return *(First - (int)EOLLen - 1) == '\\';
-}
-
-static void skipToNewlineRaw(const char *&First, const char *const End) {
- for (;;) {
- if (First == End)
- return;
-
- unsigned Len = isEOL(First, End);
- if (Len)
- return;
-
- do {
- if (++First == End)
- return;
- Len = isEOL(First, End);
- } while (!Len);
-
- if (First[-1] != '\\')
- return;
-
- First += Len;
- // Keep skipping lines...
- }
-}
-
-static const char *findLastNonSpace(const char *First, const char *Last) {
- assert(First <= Last);
- while (First != Last && isHorizontalWhitespace(Last[-1]))
- --Last;
- return Last;
-}
-
-static const char *findFirstTrailingSpace(const char *First,
- const char *Last) {
- const char *LastNonSpace = findLastNonSpace(First, Last);
- if (Last == LastNonSpace)
- return Last;
- assert(isHorizontalWhitespace(LastNonSpace[0]));
- return LastNonSpace + 1;
-}
-
-static void skipLineComment(const char *&First, const char *const End) {
- assert(First[0] == '/' && First[1] == '/');
- First += 2;
- skipToNewlineRaw(First, End);
-}
-
-static void skipBlockComment(const char *&First, const char *const End) {
- assert(First[0] == '/' && First[1] == '*');
- if (End - First < 4) {
- First = End;
- return;
- }
- for (First += 3; First != End; ++First)
- if (First[-1] == '*' && First[0] == '/') {
- ++First;
- return;
- }
-}
-
-/// \returns True if the current single quotation mark character is a C++ 14
-/// digit separator.
-static bool isQuoteCppDigitSeparator(const char *const Start,
- const char *const Cur,
- const char *const End) {
- assert(*Cur == '\'' && "expected quotation character");
- // skipLine called in places where we don't expect a valid number
- // body before `start` on the same line, so always return false at the start.
- if (Start == Cur)
- return false;
- // The previous character must be a valid PP number character.
- // Make sure that the L, u, U, u8 prefixes don't get marked as a
- // separator though.
- char Prev = *(Cur - 1);
- if (Prev == 'L' || Prev == 'U' || Prev == 'u')
- return false;
- if (Prev == '8' && (Cur - 1 != Start) && *(Cur - 2) == 'u')
- return false;
- if (!isPreprocessingNumberBody(Prev))
- return false;
- // The next character should be a valid identifier body character.
- return (Cur + 1) < End && isAsciiIdentifierContinue(*(Cur + 1));
-}
-
-static void skipLine(const char *&First, const char *const End) {
- for (;;) {
- assert(First <= End);
- if (First == End)
- return;
-
- if (isVerticalWhitespace(*First)) {
- skipNewline(First, End);
- return;
- }
- const char *Start = First;
- while (First != End && !isVerticalWhitespace(*First)) {
- // Iterate over strings correctly to avoid comments and newlines.
- if (*First == '"' ||
- (*First == '\'' && !isQuoteCppDigitSeparator(Start, First, End))) {
- if (isRawStringLiteral(Start, First))
- skipRawString(First, End);
- else
- skipString(First, End);
- continue;
- }
-
- // Iterate over comments correctly.
- if (*First != '/' || End - First < 2) {
- ++First;
- continue;
- }
-
- if (First[1] == '/') {
- // "//...".
- skipLineComment(First, End);
- continue;
- }
-
- if (First[1] != '*') {
- ++First;
- continue;
- }
-
- // "/*...*/".
- skipBlockComment(First, End);
- }
- if (First == End)
- return;
-
- // Skip over the newline.
- unsigned Len = skipNewline(First, End);
- if (!wasLineContinuation(First, Len)) // Continue past line-continuations.
- break;
- }
-}
-
-static void skipDirective(StringRef Name, const char *&First,
- const char *const End) {
- if (llvm::StringSwitch<bool>(Name)
- .Case("warning", true)
- .Case("error", true)
- .Default(false))
- // Do not process quotes or comments.
- skipToNewlineRaw(First, End);
- else
- skipLine(First, End);
-}
-
-void Minimizer::printToNewline(const char *&First, const char *const End) {
- while (First != End && !isVerticalWhitespace(*First)) {
- const char *Last = First;
- do {
- // Iterate over strings correctly to avoid comments and newlines.
- if (*Last == '"' || *Last == '\'' ||
- (*Last == '<' && top() == pp_include)) {
- if (LLVM_UNLIKELY(isRawStringLiteral(First, Last)))
- skipRawString(Last, End);
- else
- skipString(Last, End);
- continue;
- }
- if (*Last != '/' || End - Last < 2) {
- ++Last;
- continue; // Gather the rest up to print verbatim.
- }
-
- if (Last[1] != '/' && Last[1] != '*') {
- ++Last;
- continue;
- }
-
- // Deal with "//..." and "/*...*/".
- append(First, findFirstTrailingSpace(First, Last));
- First = Last;
-
- if (Last[1] == '/') {
- skipLineComment(First, End);
- return;
- }
-
- put(' ');
- skipBlockComment(First, End);
- skipOverSpaces(First, End);
- Last = First;
- } while (Last != End && !isVerticalWhitespace(*Last));
-
- // Print out the string.
- const char *LastBeforeTrailingSpace = findLastNonSpace(First, Last);
- if (Last == End || LastBeforeTrailingSpace == First ||
- LastBeforeTrailingSpace[-1] != '\\') {
- append(First, LastBeforeTrailingSpace);
- First = Last;
- skipNewline(First, End);
- return;
- }
-
- // Print up to the backslash, backing up over spaces. Preserve at least one
- // space, as the space matters when tokens are separated by a line
- // continuation.
- append(First, findFirstTrailingSpace(
- First, LastBeforeTrailingSpace - 1));
-
- First = Last;
- skipNewline(First, End);
- skipOverSpaces(First, End);
- }
-}
-
-static void skipWhitespace(const char *&First, const char *const End) {
- for (;;) {
- assert(First <= End);
- skipOverSpaces(First, End);
-
- if (End - First < 2)
- return;
-
- if (First[0] == '\\' && isVerticalWhitespace(First[1])) {
- skipNewline(++First, End);
- continue;
- }
-
- // Check for a non-comment character.
- if (First[0] != '/')
- return;
-
- // "// ...".
- if (First[1] == '/') {
- skipLineComment(First, End);
- return;
- }
-
- // Cannot be a comment.
- if (First[1] != '*')
- return;
-
- // "/*...*/".
- skipBlockComment(First, End);
- }
-}
-
-void Minimizer::printAdjacentModuleNameParts(const char *&First,
- const char *const End) {
- // Skip over parts of the body.
- const char *Last = First;
- do
- ++Last;
- while (Last != End && (isAsciiIdentifierContinue(*Last) || *Last == '.'));
- append(First, Last);
- First = Last;
-}
-
-bool Minimizer::printAtImportBody(const char *&First, const char *const End) {
- for (;;) {
- skipWhitespace(First, End);
- if (First == End)
- return true;
-
- if (isVerticalWhitespace(*First)) {
- skipNewline(First, End);
- continue;
- }
-
- // Found a semicolon.
- if (*First == ';') {
- put(*First++).put('\n');
- return false;
- }
-
- // Don't handle macro expansions inside @import for now.
- if (!isAsciiIdentifierContinue(*First) && *First != '.')
- return true;
-
- printAdjacentModuleNameParts(First, End);
- }
-}
-
-void Minimizer::printDirectiveBody(const char *&First, const char *const End) {
- skipWhitespace(First, End); // Skip initial whitespace.
- printToNewline(First, End);
- while (Out.back() == ' ')
- Out.pop_back();
- put('\n');
-}
-
-LLVM_NODISCARD static const char *lexRawIdentifier(const char *First,
- const char *const End) {
- assert(isAsciiIdentifierContinue(*First) && "invalid identifer");
- const char *Last = First + 1;
- while (Last != End && isAsciiIdentifierContinue(*Last))
- ++Last;
- return Last;
-}
-
-LLVM_NODISCARD static const char *
-getIdentifierContinuation(const char *First, const char *const End) {
- if (End - First < 3 || First[0] != '\\' || !isVerticalWhitespace(First[1]))
- return nullptr;
-
- ++First;
- skipNewline(First, End);
- if (First == End)
- return nullptr;
- return isAsciiIdentifierContinue(First[0]) ? First : nullptr;
-}
-
-Minimizer::IdInfo Minimizer::lexIdentifier(const char *First,
- const char *const End) {
- const char *Last = lexRawIdentifier(First, End);
- const char *Next = getIdentifierContinuation(Last, End);
- if (LLVM_LIKELY(!Next))
- return IdInfo{Last, StringRef(First, Last - First)};
-
- // Slow path, where identifiers are split over lines.
- SmallVector<char, 64> Id(First, Last);
- while (Next) {
- Last = lexRawIdentifier(Next, End);
- Id.append(Next, Last);
- Next = getIdentifierContinuation(Last, End);
- }
- return IdInfo{
- Last,
- SplitIds.try_emplace(StringRef(Id.begin(), Id.size()), 0).first->first()};
-}
-
-void Minimizer::printAdjacentMacroArgs(const char *&First,
- const char *const End) {
- // Skip over parts of the body.
- const char *Last = First;
- do
- ++Last;
- while (Last != End &&
- (isAsciiIdentifierContinue(*Last) || *Last == '.' || *Last == ','));
- append(First, Last);
- First = Last;
-}
-
-bool Minimizer::printMacroArgs(const char *&First, const char *const End) {
- assert(*First == '(');
- put(*First++);
- for (;;) {
- skipWhitespace(First, End);
- if (First == End)
- return true;
-
- if (*First == ')') {
- put(*First++);
- return false;
- }
-
- // This is intentionally fairly liberal.
- if (!(isAsciiIdentifierContinue(*First) || *First == '.' || *First == ','))
- return true;
-
- printAdjacentMacroArgs(First, End);
- }
-}
-
-/// Looks for an identifier starting from Last.
-///
-/// Updates "First" to just past the next identifier, if any. Returns true iff
-/// the identifier matches "Id".
-bool Minimizer::isNextIdentifier(StringRef Id, const char *&First,
- const char *const End) {
- skipWhitespace(First, End);
- if (First == End || !isAsciiIdentifierStart(*First))
- return false;
-
- IdInfo FoundId = lexIdentifier(First, End);
- First = FoundId.Last;
- return FoundId.Name == Id;
-}
-
-bool Minimizer::lexAt(const char *&First, const char *const End) {
- // Handle "@import".
- const char *ImportLoc = First++;
- if (!isNextIdentifier("import", First, End)) {
- skipLine(First, End);
- return false;
- }
- makeToken(decl_at_import);
- append("@import ");
- if (printAtImportBody(First, End))
- return reportError(
- ImportLoc, diag::err_dep_source_minimizer_missing_sema_after_at_import);
- skipWhitespace(First, End);
- if (First == End)
- return false;
- if (!isVerticalWhitespace(*First))
- return reportError(
- ImportLoc, diag::err_dep_source_minimizer_unexpected_tokens_at_import);
- skipNewline(First, End);
- return false;
-}
-
-bool Minimizer::lexModule(const char *&First, const char *const End) {
- IdInfo Id = lexIdentifier(First, End);
- First = Id.Last;
- bool Export = false;
- if (Id.Name == "export") {
- Export = true;
- skipWhitespace(First, End);
- if (!isAsciiIdentifierContinue(*First)) {
- skipLine(First, End);
- return false;
- }
- Id = lexIdentifier(First, End);
- First = Id.Last;
- }
-
- if (Id.Name != "module" && Id.Name != "import") {
- skipLine(First, End);
- return false;
- }
-
- skipWhitespace(First, End);
-
- // Ignore this as a module directive if the next character can't be part of
- // an import.
-
- switch (*First) {
- case ':':
- case '<':
- case '"':
- break;
- default:
- if (!isAsciiIdentifierContinue(*First)) {
- skipLine(First, End);
- return false;
- }
- }
-
- if (Export) {
- makeToken(cxx_export_decl);
- append("export ");
- }
-
- if (Id.Name == "module")
- makeToken(cxx_module_decl);
- else
- makeToken(cxx_import_decl);
- append(Id.Name);
- append(" ");
- printToNewline(First, End);
- append("\n");
- return false;
-}
-
-bool Minimizer::lexDefine(const char *&First, const char *const End) {
- makeToken(pp_define);
- append("#define ");
- skipWhitespace(First, End);
-
- if (!isAsciiIdentifierStart(*First))
- return reportError(First, diag::err_pp_macro_not_identifier);
-
- IdInfo Id = lexIdentifier(First, End);
- const char *Last = Id.Last;
- append(Id.Name);
- if (Last == End)
- return false;
- if (*Last == '(') {
- size_t Size = Out.size();
- if (printMacroArgs(Last, End)) {
- // Be robust to bad macro arguments, since they can show up in disabled
- // code.
- Out.resize(Size);
- append("(/* invalid */\n");
- skipLine(Last, End);
- return false;
- }
- }
- skipWhitespace(Last, End);
- if (Last == End)
- return false;
- if (!isVerticalWhitespace(*Last))
- put(' ');
- printDirectiveBody(Last, End);
- First = Last;
- return false;
-}
-
-bool Minimizer::lexPragma(const char *&First, const char *const End) {
- // #pragma.
- skipWhitespace(First, End);
- if (First == End || !isAsciiIdentifierStart(*First))
- return false;
-
- IdInfo FoundId = lexIdentifier(First, End);
- First = FoundId.Last;
- if (FoundId.Name == "once") {
- // #pragma once
- skipLine(First, End);
- makeToken(pp_pragma_once);
- append("#pragma once\n");
- return false;
- }
- if (FoundId.Name == "push_macro") {
- // #pragma push_macro
- makeToken(pp_pragma_push_macro);
- append("#pragma push_macro");
- printDirectiveBody(First, End);
- return false;
- }
- if (FoundId.Name == "pop_macro") {
- // #pragma pop_macro
- makeToken(pp_pragma_pop_macro);
- append("#pragma pop_macro");
- printDirectiveBody(First, End);
- return false;
- }
- if (FoundId.Name == "include_alias") {
- // #pragma include_alias
- makeToken(pp_pragma_include_alias);
- append("#pragma include_alias");
- printDirectiveBody(First, End);
- return false;
- }
-
- if (FoundId.Name != "clang") {
- skipLine(First, End);
- return false;
- }
-
- // #pragma clang.
- if (!isNextIdentifier("module", First, End)) {
- skipLine(First, End);
- return false;
- }
-
- // #pragma clang module.
- if (!isNextIdentifier("import", First, End)) {
- skipLine(First, End);
- return false;
- }
-
- // #pragma clang module import.
- makeToken(pp_pragma_import);
- append("#pragma clang module import ");
- printDirectiveBody(First, End);
- return false;
-}
-
-bool Minimizer::lexEndif(const char *&First, const char *const End) {
- // Strip out "#else" if it's empty.
- if (top() == pp_else)
- popToken();
-
- // If "#ifdef" is empty, strip it and skip the "#endif".
- //
- // FIXME: Once/if Clang starts disallowing __has_include in macro expansions,
- // we can skip empty `#if` and `#elif` blocks as well after scanning for a
- // literal __has_include in the condition. Even without that rule we could
- // drop the tokens if we scan for identifiers in the condition and find none.
- if (top() == pp_ifdef || top() == pp_ifndef) {
- popToken();
- skipLine(First, End);
- return false;
- }
-
- return lexDefault(pp_endif, "endif", First, End);
-}
-
-bool Minimizer::lexDefault(TokenKind Kind, StringRef Directive,
- const char *&First, const char *const End) {
- makeToken(Kind);
- put('#').append(Directive).put(' ');
- printDirectiveBody(First, End);
- return false;
-}
-
-static bool isStartOfRelevantLine(char First) {
- switch (First) {
- case '#':
- case '@':
- case 'i':
- case 'e':
- case 'm':
- return true;
- }
- return false;
-}
-
-bool Minimizer::lexPPLine(const char *&First, const char *const End) {
- assert(First != End);
-
- skipWhitespace(First, End);
- assert(First <= End);
- if (First == End)
- return false;
-
- if (!isStartOfRelevantLine(*First)) {
- skipLine(First, End);
- assert(First <= End);
- return false;
- }
-
- // Handle "@import".
- if (*First == '@')
- return lexAt(First, End);
-
- if (*First == 'i' || *First == 'e' || *First == 'm')
- return lexModule(First, End);
-
- // Handle preprocessing directives.
- ++First; // Skip over '#'.
- skipWhitespace(First, End);
-
- if (First == End)
- return reportError(First, diag::err_pp_expected_eol);
-
- if (!isAsciiIdentifierStart(*First)) {
- skipLine(First, End);
- return false;
- }
-
- // Figure out the token.
- IdInfo Id = lexIdentifier(First, End);
- First = Id.Last;
-
- if (Id.Name == "pragma")
- return lexPragma(First, End);
-
- auto Kind = llvm::StringSwitch<TokenKind>(Id.Name)
- .Case("include", pp_include)
- .Case("__include_macros", pp___include_macros)
- .Case("define", pp_define)
- .Case("undef", pp_undef)
- .Case("import", pp_import)
- .Case("include_next", pp_include_next)
- .Case("if", pp_if)
- .Case("ifdef", pp_ifdef)
- .Case("ifndef", pp_ifndef)
- .Case("elif", pp_elif)
- .Case("elifdef", pp_elifdef)
- .Case("elifndef", pp_elifndef)
- .Case("else", pp_else)
- .Case("endif", pp_endif)
- .Default(pp_none);
- if (Kind == pp_none) {
- skipDirective(Id.Name, First, End);
- return false;
- }
-
- if (Kind == pp_endif)
- return lexEndif(First, End);
-
- if (Kind == pp_define)
- return lexDefine(First, End);
-
- // Everything else.
- return lexDefault(Kind, Id.Name, First, End);
-}
-
-static void skipUTF8ByteOrderMark(const char *&First, const char *const End) {
- if ((End - First) >= 3 && First[0] == '\xef' && First[1] == '\xbb' &&
- First[2] == '\xbf')
- First += 3;
-}
-
-bool Minimizer::minimizeImpl(const char *First, const char *const End) {
- skipUTF8ByteOrderMark(First, End);
- while (First != End)
- if (lexPPLine(First, End))
- return true;
- return false;
-}
-
-bool Minimizer::minimize() {
- bool Error = minimizeImpl(Input.begin(), Input.end());
-
- if (!Error) {
- // Add a trailing newline and an EOF on success.
- if (!Out.empty() && Out.back() != '\n')
- Out.push_back('\n');
- makeToken(pp_eof);
- }
-
- // Null-terminate the output. This way the memory buffer that's passed to
- // Clang will not have to worry about the terminating '\0'.
- Out.push_back(0);
- Out.pop_back();
- return Error;
-}
-
-bool clang::minimize_source_to_dependency_directives::computeSkippedRanges(
- ArrayRef<Token> Input, llvm::SmallVectorImpl<SkippedRange> &Range) {
- struct Directive {
- enum DirectiveKind {
- If, // if/ifdef/ifndef
- Else // elif/elifdef/elifndef, else
- };
- int Offset;
- DirectiveKind Kind;
- };
- llvm::SmallVector<Directive, 32> Offsets;
- for (const Token &T : Input) {
- switch (T.K) {
- case pp_if:
- case pp_ifdef:
- case pp_ifndef:
- Offsets.push_back({T.Offset, Directive::If});
- break;
-
- case pp_elif:
- case pp_elifdef:
- case pp_elifndef:
- case pp_else: {
- if (Offsets.empty())
- return true;
- int PreviousOffset = Offsets.back().Offset;
- Range.push_back({PreviousOffset, T.Offset - PreviousOffset});
- Offsets.push_back({T.Offset, Directive::Else});
- break;
- }
-
- case pp_endif: {
- if (Offsets.empty())
- return true;
- int PreviousOffset = Offsets.back().Offset;
- Range.push_back({PreviousOffset, T.Offset - PreviousOffset});
- do {
- Directive::DirectiveKind Kind = Offsets.pop_back_val().Kind;
- if (Kind == Directive::If)
- break;
- } while (!Offsets.empty());
- break;
- }
- default:
- break;
- }
- }
- return false;
-}
-
-bool clang::minimizeSourceToDependencyDirectives(
- StringRef Input, SmallVectorImpl<char> &Output,
- SmallVectorImpl<Token> &Tokens, DiagnosticsEngine *Diags,
- SourceLocation InputSourceLoc) {
- Output.clear();
- Tokens.clear();
- return Minimizer(Output, Tokens, Input, Diags, InputSourceLoc).minimize();
-}
diff --git a/contrib/llvm-project/clang/lib/Lex/HeaderSearch.cpp b/contrib/llvm-project/clang/lib/Lex/HeaderSearch.cpp
index 39c125c395ef..60fd42bc1127 100644
--- a/contrib/llvm-project/clang/lib/Lex/HeaderSearch.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/HeaderSearch.cpp
@@ -172,6 +172,10 @@ void HeaderSearch::getHeaderMapFileNames(
std::string HeaderSearch::getCachedModuleFileName(Module *Module) {
const FileEntry *ModuleMap =
getModuleMap().getModuleMapFileForUniquing(Module);
+ // The ModuleMap maybe a nullptr, when we load a cached C++ module without
+ // *.modulemap file. In this case, just return an empty string.
+ if (ModuleMap == nullptr)
+ return {};
return getCachedModuleFileName(Module->Name, ModuleMap->getName());
}
@@ -190,10 +194,19 @@ std::string HeaderSearch::getPrebuiltModuleFileName(StringRef ModuleName,
for (const std::string &Dir : HSOpts->PrebuiltModulePaths) {
SmallString<256> Result(Dir);
llvm::sys::fs::make_absolute(Result);
- llvm::sys::path::append(Result, ModuleName + ".pcm");
+ if (ModuleName.contains(':'))
+ // The separator of C++20 modules partitions (':') is not good for file
+ // systems, here clang and gcc choose '-' by default since it is not a
+ // valid character of C++ indentifiers. So we could avoid conflicts.
+ llvm::sys::path::append(Result, ModuleName.split(':').first + "-" +
+ ModuleName.split(':').second +
+ ".pcm");
+ else
+ llvm::sys::path::append(Result, ModuleName + ".pcm");
if (getFileMgr().getFile(Result.str()))
return std::string(Result);
}
+
return {};
}
@@ -295,21 +308,20 @@ Module *HeaderSearch::lookupModule(StringRef ModuleName, StringRef SearchName,
SourceLocation ImportLoc,
bool AllowExtraModuleMapSearch) {
Module *Module = nullptr;
- unsigned Idx;
// Look through the various header search paths to load any available module
// maps, searching for a module map that describes this module.
- for (Idx = 0; Idx != SearchDirs.size(); ++Idx) {
- if (SearchDirs[Idx].isFramework()) {
+ for (DirectoryLookup Dir : search_dir_range()) {
+ if (Dir.isFramework()) {
// Search for or infer a module map for a framework. Here we use
// SearchName rather than ModuleName, to permit finding private modules
// named FooPrivate in buggy frameworks named Foo.
SmallString<128> FrameworkDirName;
- FrameworkDirName += SearchDirs[Idx].getFrameworkDir()->getName();
+ FrameworkDirName += Dir.getFrameworkDir()->getName();
llvm::sys::path::append(FrameworkDirName, SearchName + ".framework");
- if (auto FrameworkDir = FileMgr.getDirectory(FrameworkDirName)) {
- bool IsSystem
- = SearchDirs[Idx].getDirCharacteristic() != SrcMgr::C_User;
+ if (auto FrameworkDir =
+ FileMgr.getOptionalDirectoryRef(FrameworkDirName)) {
+ bool IsSystem = Dir.getDirCharacteristic() != SrcMgr::C_User;
Module = loadFrameworkModule(ModuleName, *FrameworkDir, IsSystem);
if (Module)
break;
@@ -319,12 +331,14 @@ Module *HeaderSearch::lookupModule(StringRef ModuleName, StringRef SearchName,
// FIXME: Figure out how header maps and module maps will work together.
// Only deal with normal search directories.
- if (!SearchDirs[Idx].isNormalDir())
+ if (!Dir.isNormalDir())
continue;
- bool IsSystem = SearchDirs[Idx].isSystemHeaderDirectory();
+ bool IsSystem = Dir.isSystemHeaderDirectory();
+ // Only returns None if not a normal directory, which we just checked
+ DirectoryEntryRef NormalDir = *Dir.getDirRef();
// Search for a module map file in this directory.
- if (loadModuleMapFile(SearchDirs[Idx].getDir(), IsSystem,
+ if (loadModuleMapFile(NormalDir, IsSystem,
/*IsFramework*/false) == LMM_NewlyLoaded) {
// We just loaded a module map file; check whether the module is
// available now.
@@ -336,7 +350,7 @@ Module *HeaderSearch::lookupModule(StringRef ModuleName, StringRef SearchName,
// Search for a module map in a subdirectory with the same name as the
// module.
SmallString<128> NestedModuleMapDirName;
- NestedModuleMapDirName = SearchDirs[Idx].getDir()->getName();
+ NestedModuleMapDirName = Dir.getDir()->getName();
llvm::sys::path::append(NestedModuleMapDirName, ModuleName);
if (loadModuleMapFile(NestedModuleMapDirName, IsSystem,
/*IsFramework*/false) == LMM_NewlyLoaded){
@@ -348,13 +362,13 @@ Module *HeaderSearch::lookupModule(StringRef ModuleName, StringRef SearchName,
// If we've already performed the exhaustive search for module maps in this
// search directory, don't do it again.
- if (SearchDirs[Idx].haveSearchedAllModuleMaps())
+ if (Dir.haveSearchedAllModuleMaps())
continue;
// Load all module maps in the immediate subdirectories of this search
// directory if ModuleName was from @import.
if (AllowExtraModuleMapSearch)
- loadSubdirectoryModuleMaps(SearchDirs[Idx]);
+ loadSubdirectoryModuleMaps(Dir);
// Look again for the module.
Module = ModMap.findModule(ModuleName);
@@ -362,9 +376,6 @@ Module *HeaderSearch::lookupModule(StringRef ModuleName, StringRef SearchName,
break;
}
- if (Module)
- noteLookupUsage(Idx, ImportLoc);
-
return Module;
}
@@ -428,10 +439,10 @@ Optional<FileEntryRef> DirectoryLookup::LookupFile(
SmallString<1024> TmpDir;
if (isNormalDir()) {
// Concatenate the requested file onto the directory.
- TmpDir = getDir()->getName();
+ TmpDir = getDirRef()->getName();
llvm::sys::path::append(TmpDir, Filename);
if (SearchPath) {
- StringRef SearchPathRef(getDir()->getName());
+ StringRef SearchPathRef(getDirRef()->getName());
SearchPath->clear();
SearchPath->append(SearchPathRef.begin(), SearchPathRef.end());
}
@@ -489,7 +500,7 @@ Optional<FileEntryRef> DirectoryLookup::LookupFile(
// The case where the target file **exists** is handled by callee of this
// function as part of the regular logic that applies to include search paths.
// The case where the target file **does not exist** is handled here:
- HS.noteLookupUsage(*HS.searchDirIdx(*this), IncludeLoc);
+ HS.noteLookupUsage(HS.searchDirIdx(*this), IncludeLoc);
return None;
}
@@ -499,7 +510,7 @@ Optional<FileEntryRef> DirectoryLookup::LookupFile(
/// \param DirName The name of the framework directory.
/// \param SubmodulePath Will be populated with the submodule path from the
/// returned top-level module to the originally named framework.
-static const DirectoryEntry *
+static Optional<DirectoryEntryRef>
getTopFrameworkDir(FileManager &FileMgr, StringRef DirName,
SmallVectorImpl<std::string> &SubmodulePath) {
assert(llvm::sys::path::extension(DirName) == ".framework" &&
@@ -519,12 +530,10 @@ getTopFrameworkDir(FileManager &FileMgr, StringRef DirName,
//
// Similar issues occur when a top-level framework has moved into an
// embedded framework.
- const DirectoryEntry *TopFrameworkDir = nullptr;
- if (auto TopFrameworkDirOrErr = FileMgr.getDirectory(DirName))
- TopFrameworkDir = *TopFrameworkDirOrErr;
+ auto TopFrameworkDir = FileMgr.getOptionalDirectoryRef(DirName);
if (TopFrameworkDir)
- DirName = FileMgr.getCanonicalName(TopFrameworkDir);
+ DirName = FileMgr.getCanonicalName(*TopFrameworkDir);
do {
// Get the parent directory name.
DirName = llvm::sys::path::parent_path(DirName);
@@ -532,7 +541,7 @@ getTopFrameworkDir(FileManager &FileMgr, StringRef DirName,
break;
// Determine whether this directory exists.
- auto Dir = FileMgr.getDirectory(DirName);
+ auto Dir = FileMgr.getOptionalDirectoryRef(DirName);
if (!Dir)
break;
@@ -573,7 +582,7 @@ Optional<FileEntryRef> DirectoryLookup::DoFrameworkLookup(
HS.LookupFrameworkCache(Filename.substr(0, SlashPos));
// If it is known and in some other directory, fail.
- if (CacheEntry.Directory && CacheEntry.Directory != getFrameworkDir())
+ if (CacheEntry.Directory && CacheEntry.Directory != getFrameworkDirRef())
return None;
// Otherwise, construct the path to this framework dir.
@@ -602,7 +611,7 @@ Optional<FileEntryRef> DirectoryLookup::DoFrameworkLookup(
// Otherwise, if it does, remember that this is the right direntry for this
// framework.
- CacheEntry.Directory = getFrameworkDir();
+ CacheEntry.Directory = getFrameworkDirRef();
// If this is a user search directory, check if the framework has been
// user-specified as a system framework.
@@ -617,7 +626,7 @@ Optional<FileEntryRef> DirectoryLookup::DoFrameworkLookup(
// Set out flags.
InUserSpecifiedSystemFramework = CacheEntry.IsUserSpecifiedSystemFramework;
- IsFrameworkFound = CacheEntry.Directory;
+ IsFrameworkFound = CacheEntry.Directory.has_value();
if (RelativePath) {
RelativePath->clear();
@@ -695,9 +704,10 @@ Optional<FileEntryRef> DirectoryLookup::DoFrameworkLookup(
}
void HeaderSearch::cacheLookupSuccess(LookupFileCacheInfo &CacheLookup,
- unsigned HitIdx, SourceLocation Loc) {
- CacheLookup.HitIdx = HitIdx;
- noteLookupUsage(HitIdx, Loc);
+ ConstSearchDirIterator HitIt,
+ SourceLocation Loc) {
+ CacheLookup.HitIt = HitIt;
+ noteLookupUsage(HitIt.Idx, Loc);
}
void HeaderSearch::noteLookupUsage(unsigned HitIdx, SourceLocation Loc) {
@@ -825,14 +835,14 @@ diagnoseFrameworkInclude(DiagnosticsEngine &Diags, SourceLocation IncludeLoc,
/// search is needed. Microsoft mode will pass all \#including files.
Optional<FileEntryRef> HeaderSearch::LookupFile(
StringRef Filename, SourceLocation IncludeLoc, bool isAngled,
- const DirectoryLookup *FromDir, const DirectoryLookup **CurDirArg,
+ ConstSearchDirIterator FromDir, ConstSearchDirIterator *CurDirArg,
ArrayRef<std::pair<const FileEntry *, const DirectoryEntry *>> Includers,
SmallVectorImpl<char> *SearchPath, SmallVectorImpl<char> *RelativePath,
Module *RequestingModule, ModuleMap::KnownHeader *SuggestedModule,
bool *IsMapped, bool *IsFrameworkFound, bool SkipCache,
bool BuildSystemModule) {
- const DirectoryLookup *CurDirLocal = nullptr;
- const DirectoryLookup *&CurDir = CurDirArg ? *CurDirArg : CurDirLocal;
+ ConstSearchDirIterator CurDirLocal = nullptr;
+ ConstSearchDirIterator &CurDir = CurDirArg ? *CurDirArg : CurDirLocal;
if (IsMapped)
*IsMapped = false;
@@ -955,12 +965,13 @@ Optional<FileEntryRef> HeaderSearch::LookupFile(
CurDir = nullptr;
// If this is a system #include, ignore the user #include locs.
- unsigned i = isAngled ? AngledDirIdx : 0;
+ ConstSearchDirIterator It =
+ isAngled ? angled_dir_begin() : search_dir_begin();
// If this is a #include_next request, start searching after the directory the
// file was found in.
if (FromDir)
- i = FromDir-&SearchDirs[0];
+ It = FromDir;
// Cache all of the lookups performed by this method. Many headers are
// multiply included, and the "pragma once" optimization prevents them from
@@ -968,12 +979,15 @@ Optional<FileEntryRef> HeaderSearch::LookupFile(
// (potentially huge) series of SearchDirs to find it.
LookupFileCacheInfo &CacheLookup = LookupFileCache[Filename];
+ ConstSearchDirIterator NextIt = std::next(It);
+
// If the entry has been previously looked up, the first value will be
// non-zero. If the value is equal to i (the start point of our search), then
// this is a matching hit.
- if (!SkipCache && CacheLookup.StartIdx == i+1) {
+ if (!SkipCache && CacheLookup.StartIt == NextIt) {
// Skip querying potentially lots of directories for this lookup.
- i = CacheLookup.HitIdx;
+ if (CacheLookup.HitIt)
+ It = CacheLookup.HitIt;
if (CacheLookup.MappedName) {
Filename = CacheLookup.MappedName;
if (IsMapped)
@@ -983,17 +997,17 @@ Optional<FileEntryRef> HeaderSearch::LookupFile(
// Otherwise, this is the first query, or the previous query didn't match
// our search start. We will fill in our found location below, so prime the
// start point value.
- CacheLookup.reset(/*StartIdx=*/i+1);
+ CacheLookup.reset(/*NewStartIt=*/NextIt);
}
SmallString<64> MappedName;
// Check each directory in sequence to see if it contains this file.
- for (; i != SearchDirs.size(); ++i) {
+ for (; It != search_dir_end(); ++It) {
bool InUserSpecifiedSystemFramework = false;
bool IsInHeaderMap = false;
bool IsFrameworkFoundInDir = false;
- Optional<FileEntryRef> File = SearchDirs[i].LookupFile(
+ Optional<FileEntryRef> File = It->LookupFile(
Filename, *this, IncludeLoc, SearchPath, RelativePath, RequestingModule,
SuggestedModule, InUserSpecifiedSystemFramework, IsFrameworkFoundInDir,
IsInHeaderMap, MappedName);
@@ -1015,10 +1029,13 @@ Optional<FileEntryRef> HeaderSearch::LookupFile(
if (!File)
continue;
- CurDir = &SearchDirs[i];
+ CurDir = It;
+
+ const auto FE = &File->getFileEntry();
+ IncludeNames[FE] = Filename;
// This file is a system header or C++ unfriendly if the dir is.
- HeaderFileInfo &HFI = getFileInfo(&File->getFileEntry());
+ HeaderFileInfo &HFI = getFileInfo(FE);
HFI.DirInfo = CurDir->getDirCharacteristic();
// If the directory characteristic is User but this framework was
@@ -1037,8 +1054,9 @@ Optional<FileEntryRef> HeaderSearch::LookupFile(
}
}
- // If this file is found in a header map and uses the framework style of
- // includes, then this header is part of a framework we're building.
+ // Set the `Framework` info if this file is in a header map with framework
+ // style include spelling or found in a framework dir. The header map case
+ // is possible when building frameworks which use header maps.
if (CurDir->isHeaderMap() && isAngled) {
size_t SlashPos = Filename.find('/');
if (SlashPos != StringRef::npos)
@@ -1046,6 +1064,11 @@ Optional<FileEntryRef> HeaderSearch::LookupFile(
getUniqueFrameworkName(StringRef(Filename.begin(), SlashPos));
if (CurDir->isIndexHeaderMap())
HFI.IndexHeaderMapHeader = 1;
+ } else if (CurDir->isFramework()) {
+ size_t SlashPos = Filename.find('/');
+ if (SlashPos != StringRef::npos)
+ HFI.Framework =
+ getUniqueFrameworkName(StringRef(Filename.begin(), SlashPos));
}
if (checkMSVCHeaderSearch(Diags, MSFE ? &MSFE->getFileEntry() : nullptr,
@@ -1062,7 +1085,7 @@ Optional<FileEntryRef> HeaderSearch::LookupFile(
&File->getFileEntry(), isAngled, FoundByHeaderMap);
// Remember this location for the next lookup we do.
- cacheLookupSuccess(CacheLookup, i, IncludeLoc);
+ cacheLookupSuccess(CacheLookup, It, IncludeLoc);
return File;
}
@@ -1093,7 +1116,7 @@ Optional<FileEntryRef> HeaderSearch::LookupFile(
}
cacheLookupSuccess(LookupFileCache[Filename],
- LookupFileCache[ScratchFilename].HitIdx, IncludeLoc);
+ LookupFileCache[ScratchFilename].HitIt, IncludeLoc);
// FIXME: SuggestedModule.
return File;
}
@@ -1107,7 +1130,7 @@ Optional<FileEntryRef> HeaderSearch::LookupFile(
}
// Otherwise, didn't find it. Remember we didn't find this.
- CacheLookup.HitIdx = SearchDirs.size();
+ CacheLookup.HitIt = search_dir_end();
return None;
}
@@ -1164,13 +1187,13 @@ Optional<FileEntryRef> HeaderSearch::LookupSubframeworkHeader(
++NumSubFrameworkLookups;
// If the framework dir doesn't exist, we fail.
- auto Dir = FileMgr.getDirectory(FrameworkName);
+ auto Dir = FileMgr.getOptionalDirectoryRef(FrameworkName);
if (!Dir)
return None;
// Otherwise, if it does, remember that this is the right direntry for this
// framework.
- CacheLookup.second.Directory = *Dir;
+ CacheLookup.second.Directory = Dir;
}
@@ -1433,17 +1456,21 @@ size_t HeaderSearch::getTotalMemory() const {
+ FrameworkMap.getAllocator().getTotalMemory();
}
-Optional<unsigned> HeaderSearch::searchDirIdx(const DirectoryLookup &DL) const {
- for (unsigned I = 0; I < SearchDirs.size(); ++I)
- if (&SearchDirs[I] == &DL)
- return I;
- return None;
+unsigned HeaderSearch::searchDirIdx(const DirectoryLookup &DL) const {
+ return &DL - &*SearchDirs.begin();
}
StringRef HeaderSearch::getUniqueFrameworkName(StringRef Framework) {
return FrameworkNames.insert(Framework).first->first();
}
+StringRef HeaderSearch::getIncludeNameForHeader(const FileEntry *File) const {
+ auto It = IncludeNames.find(File);
+ if (It == IncludeNames.end())
+ return {};
+ return It->second;
+}
+
bool HeaderSearch::hasModuleMap(StringRef FileName,
const DirectoryEntry *Root,
bool IsSystem) {
@@ -1460,13 +1487,13 @@ bool HeaderSearch::hasModuleMap(StringRef FileName,
return false;
// Determine whether this directory exists.
- auto Dir = FileMgr.getDirectory(DirName);
+ auto Dir = FileMgr.getOptionalDirectoryRef(DirName);
if (!Dir)
return false;
// Try to load the module map file in this directory.
switch (loadModuleMapFile(*Dir, IsSystem,
- llvm::sys::path::extension((*Dir)->getName()) ==
+ llvm::sys::path::extension(Dir->getName()) ==
".framework")) {
case LMM_NewlyLoaded:
case LMM_AlreadyLoaded:
@@ -1563,15 +1590,16 @@ bool HeaderSearch::findUsableModuleForFrameworkHeader(
if (needModuleLookup(RequestingModule, SuggestedModule)) {
// Find the top-level framework based on this framework.
SmallVector<std::string, 4> SubmodulePath;
- const DirectoryEntry *TopFrameworkDir
- = ::getTopFrameworkDir(FileMgr, FrameworkName, SubmodulePath);
+ Optional<DirectoryEntryRef> TopFrameworkDir =
+ ::getTopFrameworkDir(FileMgr, FrameworkName, SubmodulePath);
+ assert(TopFrameworkDir && "Could not find the top-most framework dir");
// Determine the name of the top-level framework.
StringRef ModuleName = llvm::sys::path::stem(TopFrameworkDir->getName());
// Load this framework module. If that succeeds, find the suggested module
// for this header, if any.
- loadFrameworkModule(ModuleName, TopFrameworkDir, IsSystemFramework);
+ loadFrameworkModule(ModuleName, *TopFrameworkDir, IsSystemFramework);
// FIXME: This can find a module not part of ModuleName, which is
// important so that we're consistent about whether this header
@@ -1602,39 +1630,40 @@ bool HeaderSearch::loadModuleMapFile(const FileEntry *File, bool IsSystem,
StringRef OriginalModuleMapFile) {
// Find the directory for the module. For frameworks, that may require going
// up from the 'Modules' directory.
- const DirectoryEntry *Dir = nullptr;
+ Optional<DirectoryEntryRef> Dir;
if (getHeaderSearchOpts().ModuleMapFileHomeIsCwd) {
- if (auto DirOrErr = FileMgr.getDirectory("."))
- Dir = *DirOrErr;
+ Dir = FileMgr.getOptionalDirectoryRef(".");
} else {
if (!OriginalModuleMapFile.empty()) {
// We're building a preprocessed module map. Find or invent the directory
// that it originally occupied.
- auto DirOrErr = FileMgr.getDirectory(
+ Dir = FileMgr.getOptionalDirectoryRef(
llvm::sys::path::parent_path(OriginalModuleMapFile));
- if (DirOrErr) {
- Dir = *DirOrErr;
- } else {
- auto *FakeFile = FileMgr.getVirtualFile(OriginalModuleMapFile, 0, 0);
- Dir = FakeFile->getDir();
+ if (!Dir) {
+ auto FakeFile = FileMgr.getVirtualFileRef(OriginalModuleMapFile, 0, 0);
+ Dir = FakeFile.getDir();
}
} else {
- Dir = File->getDir();
+ // TODO: Replace with `Dir = File.getDir()` when `File` is switched to
+ // `FileEntryRef`.
+ Dir = FileMgr.getOptionalDirectoryRef(File->getDir()->getName());
}
+ assert(Dir && "parent must exist");
StringRef DirName(Dir->getName());
if (llvm::sys::path::filename(DirName) == "Modules") {
DirName = llvm::sys::path::parent_path(DirName);
if (DirName.endswith(".framework"))
- if (auto DirOrErr = FileMgr.getDirectory(DirName))
- Dir = *DirOrErr;
+ if (auto MaybeDir = FileMgr.getOptionalDirectoryRef(DirName))
+ Dir = *MaybeDir;
// FIXME: This assert can fail if there's a race between the above check
// and the removal of the directory.
assert(Dir && "parent must exist");
}
}
- switch (loadModuleMapFileImpl(File, IsSystem, Dir, ID, Offset)) {
+ assert(Dir && "module map home directory must exist");
+ switch (loadModuleMapFileImpl(File, IsSystem, *Dir, ID, Offset)) {
case LMM_AlreadyLoaded:
case LMM_NewlyLoaded:
return false;
@@ -1647,7 +1676,7 @@ bool HeaderSearch::loadModuleMapFile(const FileEntry *File, bool IsSystem,
HeaderSearch::LoadModuleMapResult
HeaderSearch::loadModuleMapFileImpl(const FileEntry *File, bool IsSystem,
- const DirectoryEntry *Dir, FileID ID,
+ DirectoryEntryRef Dir, FileID ID,
unsigned *Offset) {
assert(File && "expected FileEntry");
@@ -1705,8 +1734,7 @@ HeaderSearch::lookupModuleMapFile(const DirectoryEntry *Dir, bool IsFramework) {
return nullptr;
}
-Module *HeaderSearch::loadFrameworkModule(StringRef Name,
- const DirectoryEntry *Dir,
+Module *HeaderSearch::loadFrameworkModule(StringRef Name, DirectoryEntryRef Dir,
bool IsSystem) {
if (Module *Module = ModMap.findModule(Name))
return Module;
@@ -1733,14 +1761,14 @@ Module *HeaderSearch::loadFrameworkModule(StringRef Name,
HeaderSearch::LoadModuleMapResult
HeaderSearch::loadModuleMapFile(StringRef DirName, bool IsSystem,
bool IsFramework) {
- if (auto Dir = FileMgr.getDirectory(DirName))
+ if (auto Dir = FileMgr.getOptionalDirectoryRef(DirName))
return loadModuleMapFile(*Dir, IsSystem, IsFramework);
return LMM_NoDirectory;
}
HeaderSearch::LoadModuleMapResult
-HeaderSearch::loadModuleMapFile(const DirectoryEntry *Dir, bool IsSystem,
+HeaderSearch::loadModuleMapFile(DirectoryEntryRef Dir, bool IsSystem,
bool IsFramework) {
auto KnownDir = DirectoryHasModuleMap.find(Dir);
if (KnownDir != DirectoryHasModuleMap.end())
@@ -1766,13 +1794,12 @@ void HeaderSearch::collectAllModules(SmallVectorImpl<Module *> &Modules) {
if (HSOpts->ImplicitModuleMaps) {
// Load module maps for each of the header search directories.
- for (unsigned Idx = 0, N = SearchDirs.size(); Idx != N; ++Idx) {
- bool IsSystem = SearchDirs[Idx].isSystemHeaderDirectory();
- if (SearchDirs[Idx].isFramework()) {
+ for (DirectoryLookup &DL : search_dir_range()) {
+ bool IsSystem = DL.isSystemHeaderDirectory();
+ if (DL.isFramework()) {
std::error_code EC;
SmallString<128> DirNative;
- llvm::sys::path::native(SearchDirs[Idx].getFrameworkDir()->getName(),
- DirNative);
+ llvm::sys::path::native(DL.getFrameworkDir()->getName(), DirNative);
// Search each of the ".framework" directories to load them as modules.
llvm::vfs::FileSystem &FS = FileMgr.getVirtualFileSystem();
@@ -1782,8 +1809,7 @@ void HeaderSearch::collectAllModules(SmallVectorImpl<Module *> &Modules) {
if (llvm::sys::path::extension(Dir->path()) != ".framework")
continue;
- auto FrameworkDir =
- FileMgr.getDirectory(Dir->path());
+ auto FrameworkDir = FileMgr.getOptionalDirectoryRef(Dir->path());
if (!FrameworkDir)
continue;
@@ -1795,16 +1821,15 @@ void HeaderSearch::collectAllModules(SmallVectorImpl<Module *> &Modules) {
}
// FIXME: Deal with header maps.
- if (SearchDirs[Idx].isHeaderMap())
+ if (DL.isHeaderMap())
continue;
// Try to load a module map file for the search directory.
- loadModuleMapFile(SearchDirs[Idx].getDir(), IsSystem,
- /*IsFramework*/ false);
+ loadModuleMapFile(*DL.getDirRef(), IsSystem, /*IsFramework*/ false);
// Try to load module map files for immediate subdirectories of this
// search directory.
- loadSubdirectoryModuleMaps(SearchDirs[Idx]);
+ loadSubdirectoryModuleMaps(DL);
}
}
@@ -1818,16 +1843,14 @@ void HeaderSearch::loadTopLevelSystemModules() {
return;
// Load module maps for each of the header search directories.
- for (unsigned Idx = 0, N = SearchDirs.size(); Idx != N; ++Idx) {
+ for (const DirectoryLookup &DL : search_dir_range()) {
// We only care about normal header directories.
- if (!SearchDirs[Idx].isNormalDir()) {
+ if (!DL.isNormalDir())
continue;
- }
// Try to load a module map file for the search directory.
- loadModuleMapFile(SearchDirs[Idx].getDir(),
- SearchDirs[Idx].isSystemHeaderDirectory(),
- SearchDirs[Idx].isFramework());
+ loadModuleMapFile(*DL.getDirRef(), DL.isSystemHeaderDirectory(),
+ DL.isFramework());
}
}
@@ -1924,19 +1947,19 @@ std::string HeaderSearch::suggestPathToFileForDiagnostics(
};
bool BestPrefixIsFramework = false;
- for (unsigned I = 0; I != SearchDirs.size(); ++I) {
- if (SearchDirs[I].isNormalDir()) {
- StringRef Dir = SearchDirs[I].getDir()->getName();
+ for (const DirectoryLookup &DL : search_dir_range()) {
+ if (DL.isNormalDir()) {
+ StringRef Dir = DL.getDir()->getName();
if (CheckDir(Dir)) {
if (IsSystem)
- *IsSystem = BestPrefixLength ? I >= SystemDirIdx : false;
+ *IsSystem = BestPrefixLength && isSystem(DL.getDirCharacteristic());
BestPrefixIsFramework = false;
}
- } else if (SearchDirs[I].isFramework()) {
- StringRef Dir = SearchDirs[I].getFrameworkDir()->getName();
+ } else if (DL.isFramework()) {
+ StringRef Dir = DL.getFrameworkDir()->getName();
if (CheckDir(Dir)) {
if (IsSystem)
- *IsSystem = BestPrefixLength ? I >= SystemDirIdx : false;
+ *IsSystem = BestPrefixLength && isSystem(DL.getDirCharacteristic());
BestPrefixIsFramework = true;
}
}
@@ -1951,14 +1974,14 @@ std::string HeaderSearch::suggestPathToFileForDiagnostics(
}
// Try resolving resulting filename via reverse search in header maps,
- // key from header name is user prefered name for the include file.
+ // key from header name is user preferred name for the include file.
StringRef Filename = File.drop_front(BestPrefixLength);
- for (unsigned I = 0; I != SearchDirs.size(); ++I) {
- if (!SearchDirs[I].isHeaderMap())
+ for (const DirectoryLookup &DL : search_dir_range()) {
+ if (!DL.isHeaderMap())
continue;
StringRef SpelledFilename =
- SearchDirs[I].getHeaderMap()->reverseLookupFilename(Filename);
+ DL.getHeaderMap()->reverseLookupFilename(Filename);
if (!SpelledFilename.empty()) {
Filename = SpelledFilename;
BestPrefixIsFramework = false;
diff --git a/contrib/llvm-project/clang/lib/Lex/InitHeaderSearch.cpp b/contrib/llvm-project/clang/lib/Lex/InitHeaderSearch.cpp
index ec7b1505c7bf..158b5667151f 100644
--- a/contrib/llvm-project/clang/lib/Lex/InitHeaderSearch.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/InitHeaderSearch.cpp
@@ -239,6 +239,7 @@ void InitHeaderSearch::AddDefaultCIncludePaths(const llvm::Triple &triple,
case llvm::Triple::OpenBSD:
case llvm::Triple::NaCl:
case llvm::Triple::PS4:
+ case llvm::Triple::PS5:
case llvm::Triple::ELFIAMCU:
case llvm::Triple::Fuchsia:
break;
@@ -351,11 +352,14 @@ void InitHeaderSearch::AddDefaultCIncludePaths(const llvm::Triple &triple,
case llvm::Triple::ELFIAMCU:
case llvm::Triple::Fuchsia:
break;
- case llvm::Triple::PS4: {
+ case llvm::Triple::PS4:
+ case llvm::Triple::PS5: {
// <isysroot> gets prepended later in AddPath().
std::string BaseSDKPath;
if (!HasSysroot) {
- const char *envValue = getenv("SCE_ORBIS_SDK_DIR");
+ const char *EnvVar = (os == llvm::Triple::PS4) ? "SCE_ORBIS_SDK_DIR"
+ : "SCE_PROSPERO_SDK_DIR";
+ const char *envValue = getenv(EnvVar);
if (envValue)
BaseSDKPath = envValue;
else {
@@ -370,9 +374,8 @@ void InitHeaderSearch::AddDefaultCIncludePaths(const llvm::Triple &triple,
}
}
AddPath(BaseSDKPath + "/target/include", System, false);
- if (triple.isPS4CPU())
- AddPath(BaseSDKPath + "/target/include_common", System, false);
- LLVM_FALLTHROUGH;
+ AddPath(BaseSDKPath + "/target/include_common", System, false);
+ break;
}
default:
AddPath("/usr/include", ExternCSystem, false);
diff --git a/contrib/llvm-project/clang/lib/Lex/Lexer.cpp b/contrib/llvm-project/clang/lib/Lex/Lexer.cpp
index a180bba365cf..6820057642be 100644
--- a/contrib/llvm-project/clang/lib/Lex/Lexer.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/Lexer.cpp
@@ -37,6 +37,7 @@
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/MemoryBufferRef.h"
#include "llvm/Support/NativeFormatting.h"
+#include "llvm/Support/Unicode.h"
#include "llvm/Support/UnicodeCharRanges.h"
#include <algorithm>
#include <cassert>
@@ -136,7 +137,8 @@ Lexer::Lexer(FileID FID, const llvm::MemoryBufferRef &InputFile,
Preprocessor &PP, bool IsFirstIncludeOfFile)
: PreprocessorLexer(&PP, FID),
FileLoc(PP.getSourceManager().getLocForStartOfFile(FID)),
- LangOpts(PP.getLangOpts()), IsFirstTimeLexingFile(IsFirstIncludeOfFile) {
+ LangOpts(PP.getLangOpts()), LineComment(LangOpts.LineComment),
+ IsFirstTimeLexingFile(IsFirstIncludeOfFile) {
InitLexer(InputFile.getBufferStart(), InputFile.getBufferStart(),
InputFile.getBufferEnd());
@@ -149,7 +151,7 @@ Lexer::Lexer(FileID FID, const llvm::MemoryBufferRef &InputFile,
Lexer::Lexer(SourceLocation fileloc, const LangOptions &langOpts,
const char *BufStart, const char *BufPtr, const char *BufEnd,
bool IsFirstIncludeOfFile)
- : FileLoc(fileloc), LangOpts(langOpts),
+ : FileLoc(fileloc), LangOpts(langOpts), LineComment(LangOpts.LineComment),
IsFirstTimeLexingFile(IsFirstIncludeOfFile) {
InitLexer(BufStart, BufPtr, BufEnd);
@@ -225,13 +227,11 @@ Lexer *Lexer::Create_PragmaLexer(SourceLocation SpellingLoc,
return L;
}
-bool Lexer::skipOver(unsigned NumBytes) {
- IsAtPhysicalStartOfLine = true;
- IsAtStartOfLine = true;
- if ((BufferPtr + NumBytes) > BufferEnd)
- return true;
- BufferPtr += NumBytes;
- return false;
+void Lexer::seek(unsigned Offset, bool IsAtStartOfLine) {
+ this->IsAtPhysicalStartOfLine = IsAtStartOfLine;
+ this->IsAtStartOfLine = IsAtStartOfLine;
+ assert((BufferStart + Offset) <= BufferEnd);
+ BufferPtr = BufferStart + Offset;
}
template <typename T> static void StringifyImpl(T &Str, char Quote) {
@@ -1193,11 +1193,11 @@ static char GetTrigraphCharForLetter(char Letter) {
/// prefixed with ??, emit a trigraph warning. If trigraphs are enabled,
/// return the result character. Finally, emit a warning about trigraph use
/// whether trigraphs are enabled or not.
-static char DecodeTrigraphChar(const char *CP, Lexer *L) {
+static char DecodeTrigraphChar(const char *CP, Lexer *L, bool Trigraphs) {
char Res = GetTrigraphCharForLetter(*CP);
if (!Res || !L) return Res;
- if (!L->getLangOpts().Trigraphs) {
+ if (!Trigraphs) {
if (!L->isLexingRawMode())
L->Diag(CP-2, diag::trigraph_ignored);
return 0;
@@ -1371,7 +1371,8 @@ Slash:
if (Ptr[0] == '?' && Ptr[1] == '?') {
// If this is actually a legal trigraph (not something like "??x"), emit
// a trigraph warning. If so, and if trigraphs are enabled, return it.
- if (char C = DecodeTrigraphChar(Ptr+2, Tok ? this : nullptr)) {
+ if (char C = DecodeTrigraphChar(Ptr + 2, Tok ? this : nullptr,
+ LangOpts.Trigraphs)) {
// Remember that this token needs to be cleaned.
if (Tok) Tok->setFlag(Token::NeedsCleaning);
@@ -1881,7 +1882,7 @@ bool Lexer::LexNumericConstant(Token &Result, const char *CurPtr) {
if (!LangOpts.C99) {
if (!isHexaLiteral(BufferPtr, LangOpts))
IsHexFloat = false;
- else if (!getLangOpts().CPlusPlus17 &&
+ else if (!LangOpts.CPlusPlus17 &&
std::find(BufferPtr, CurPtr, '_') != CurPtr)
IsHexFloat = false;
}
@@ -1890,12 +1891,12 @@ bool Lexer::LexNumericConstant(Token &Result, const char *CurPtr) {
}
// If we have a digit separator, continue.
- if (C == '\'' && (getLangOpts().CPlusPlus14 || getLangOpts().C2x)) {
+ if (C == '\'' && (LangOpts.CPlusPlus14 || LangOpts.C2x)) {
unsigned NextSize;
- char Next = getCharAndSizeNoWarn(CurPtr + Size, NextSize, getLangOpts());
+ char Next = getCharAndSizeNoWarn(CurPtr + Size, NextSize, LangOpts);
if (isAsciiIdentifierContinue(Next)) {
if (!isLexingRawMode())
- Diag(CurPtr, getLangOpts().CPlusPlus
+ Diag(CurPtr, LangOpts.CPlusPlus
? diag::warn_cxx11_compat_digit_separator
: diag::warn_c2x_compat_digit_separator);
CurPtr = ConsumeChar(CurPtr, Size, Result);
@@ -1921,7 +1922,7 @@ bool Lexer::LexNumericConstant(Token &Result, const char *CurPtr) {
/// in C++11, or warn on a ud-suffix in C++98.
const char *Lexer::LexUDSuffix(Token &Result, const char *CurPtr,
bool IsStringLiteral) {
- assert(getLangOpts().CPlusPlus);
+ assert(LangOpts.CPlusPlus);
// Maximally munch an identifier.
unsigned Size;
@@ -1937,7 +1938,7 @@ const char *Lexer::LexUDSuffix(Token &Result, const char *CurPtr,
return CurPtr;
}
- if (!getLangOpts().CPlusPlus11) {
+ if (!LangOpts.CPlusPlus11) {
if (!isLexingRawMode())
Diag(CurPtr,
C == '_' ? diag::warn_cxx11_compat_user_defined_literal
@@ -1955,7 +1956,7 @@ const char *Lexer::LexUDSuffix(Token &Result, const char *CurPtr,
bool IsUDSuffix = false;
if (C == '_')
IsUDSuffix = true;
- else if (IsStringLiteral && getLangOpts().CPlusPlus14) {
+ else if (IsStringLiteral && LangOpts.CPlusPlus14) {
// In C++1y, we need to look ahead a few characters to see if this is a
// valid suffix for a string literal or a numeric literal (this could be
// the 'operator""if' defining a numeric literal operator).
@@ -1965,13 +1966,12 @@ const char *Lexer::LexUDSuffix(Token &Result, const char *CurPtr,
unsigned Chars = 1;
while (true) {
unsigned NextSize;
- char Next = getCharAndSizeNoWarn(CurPtr + Consumed, NextSize,
- getLangOpts());
+ char Next = getCharAndSizeNoWarn(CurPtr + Consumed, NextSize, LangOpts);
if (!isAsciiIdentifierContinue(Next)) {
// End of suffix. Check whether this is on the allowed list.
const StringRef CompleteSuffix(Buffer, Chars);
- IsUDSuffix = StringLiteralParser::isValidUDSuffix(getLangOpts(),
- CompleteSuffix);
+ IsUDSuffix =
+ StringLiteralParser::isValidUDSuffix(LangOpts, CompleteSuffix);
break;
}
@@ -1986,10 +1986,10 @@ const char *Lexer::LexUDSuffix(Token &Result, const char *CurPtr,
if (!IsUDSuffix) {
if (!isLexingRawMode())
- Diag(CurPtr, getLangOpts().MSVCCompat
+ Diag(CurPtr, LangOpts.MSVCCompat
? diag::ext_ms_reserved_user_defined_literal
: diag::ext_reserved_user_defined_literal)
- << FixItHint::CreateInsertion(getSourceLocation(CurPtr), " ");
+ << FixItHint::CreateInsertion(getSourceLocation(CurPtr), " ");
return CurPtr;
}
@@ -2022,9 +2022,8 @@ bool Lexer::LexStringLiteral(Token &Result, const char *CurPtr,
(Kind == tok::utf8_string_literal ||
Kind == tok::utf16_string_literal ||
Kind == tok::utf32_string_literal))
- Diag(BufferPtr, getLangOpts().CPlusPlus
- ? diag::warn_cxx98_compat_unicode_literal
- : diag::warn_c99_compat_unicode_literal);
+ Diag(BufferPtr, LangOpts.CPlusPlus ? diag::warn_cxx98_compat_unicode_literal
+ : diag::warn_c99_compat_unicode_literal);
char C = getAndAdvanceChar(CurPtr, Result);
while (C != '"') {
@@ -2058,7 +2057,7 @@ bool Lexer::LexStringLiteral(Token &Result, const char *CurPtr,
}
// If we are in C++11, lex the optional ud-suffix.
- if (getLangOpts().CPlusPlus)
+ if (LangOpts.CPlusPlus)
CurPtr = LexUDSuffix(Result, CurPtr, true);
// If a nul character existed in the string, warn about it.
@@ -2142,7 +2141,7 @@ bool Lexer::LexRawStringLiteral(Token &Result, const char *CurPtr,
}
// If we are in C++11, lex the optional ud-suffix.
- if (getLangOpts().CPlusPlus)
+ if (LangOpts.CPlusPlus)
CurPtr = LexUDSuffix(Result, CurPtr, true);
// Update the location of token as well as BufferPtr.
@@ -2238,7 +2237,7 @@ bool Lexer::LexCharConstant(Token &Result, const char *CurPtr,
if (!isLexingRawMode()) {
if (Kind == tok::utf16_char_constant || Kind == tok::utf32_char_constant)
- Diag(BufferPtr, getLangOpts().CPlusPlus
+ Diag(BufferPtr, LangOpts.CPlusPlus
? diag::warn_cxx98_compat_unicode_literal
: diag::warn_c99_compat_unicode_literal);
else if (Kind == tok::utf8_char_constant)
@@ -2280,7 +2279,7 @@ bool Lexer::LexCharConstant(Token &Result, const char *CurPtr,
}
// If we are in C++11, lex the optional ud-suffix.
- if (getLangOpts().CPlusPlus)
+ if (LangOpts.CPlusPlus)
CurPtr = LexUDSuffix(Result, CurPtr, false);
// If a nul character existed in the character, warn about it.
@@ -2378,13 +2377,13 @@ bool Lexer::SkipLineComment(Token &Result, const char *CurPtr,
bool &TokAtPhysicalStartOfLine) {
// If Line comments aren't explicitly enabled for this language, emit an
// extension warning.
- if (!LangOpts.LineComment) {
+ if (!LineComment) {
if (!isLexingRawMode()) // There's no PP in raw mode, so can't emit diags.
Diag(BufferPtr, diag::ext_line_comment);
// Mark them enabled so we only emit one warning for this translation
// unit.
- LangOpts.LineComment = true;
+ LineComment = true;
}
// Scan over the body of the comment. The common case, when scanning, is that
@@ -2544,8 +2543,8 @@ bool Lexer::SaveLineComment(Token &Result, const char *CurPtr) {
/// isBlockCommentEndOfEscapedNewLine - Return true if the specified newline
/// character (either \\n or \\r) is part of an escaped newline sequence. Issue
/// a diagnostic if so. We know that the newline is inside of a block comment.
-static bool isEndOfBlockCommentWithEscapedNewLine(const char *CurPtr,
- Lexer *L) {
+static bool isEndOfBlockCommentWithEscapedNewLine(const char *CurPtr, Lexer *L,
+ bool Trigraphs) {
assert(CurPtr[0] == '\n' || CurPtr[0] == '\r');
// Position of the first trigraph in the ending sequence.
@@ -2596,7 +2595,7 @@ static bool isEndOfBlockCommentWithEscapedNewLine(const char *CurPtr,
if (TrigraphPos) {
// If no trigraphs are enabled, warn that we ignored this trigraph and
// ignore this * character.
- if (!L->getLangOpts().Trigraphs) {
+ if (!Trigraphs) {
if (!L->isLexingRawMode())
L->Diag(TrigraphPos, diag::trigraph_ignored_block_comment);
return false;
@@ -2726,7 +2725,8 @@ bool Lexer::SkipBlockComment(Token &Result, const char *CurPtr,
break;
if ((CurPtr[-2] == '\n' || CurPtr[-2] == '\r')) {
- if (isEndOfBlockCommentWithEscapedNewLine(CurPtr-2, this)) {
+ if (isEndOfBlockCommentWithEscapedNewLine(CurPtr - 2, this,
+ LangOpts.Trigraphs)) {
// We found the final */, though it had an escaped newline between the
// * and /. We're done!
break;
@@ -2901,11 +2901,11 @@ bool Lexer::LexEndOfFile(Token &Result, const char *CurPtr) {
ConditionalStack.pop_back();
}
- SourceLocation EndLoc = getSourceLocation(BufferEnd);
// C99 5.1.1.2p2: If the file is non-empty and didn't end in a newline, issue
// a pedwarn.
if (CurPtr != BufferStart && (CurPtr[-1] != '\n' && CurPtr[-1] != '\r')) {
DiagnosticsEngine &Diags = PP->getDiagnostics();
+ SourceLocation EndLoc = getSourceLocation(BufferEnd);
unsigned DiagID;
if (LangOpts.CPlusPlus11) {
@@ -2928,7 +2928,7 @@ bool Lexer::LexEndOfFile(Token &Result, const char *CurPtr) {
BufferPtr = CurPtr;
// Finally, let the preprocessor handle this.
- return PP->HandleEndOfFile(Result, EndLoc, isPragmaLexer());
+ return PP->HandleEndOfFile(Result, isPragmaLexer());
}
/// isNextPPTokenLParen - Return 1 if the next unexpanded token lexed from
@@ -2938,6 +2938,13 @@ bool Lexer::LexEndOfFile(Token &Result, const char *CurPtr) {
unsigned Lexer::isNextPPTokenLParen() {
assert(!LexingRawMode && "How can we expand a macro from a skipping buffer?");
+ if (isDependencyDirectivesLexer()) {
+ if (NextDepDirectiveTokenIndex == DepDirectives.front().Tokens.size())
+ return 2;
+ return DepDirectives.front().Tokens[NextDepDirectiveTokenIndex].is(
+ tok::l_paren);
+ }
+
// Switch to 'skipping' mode. This will ensure that we can lex a token
// without emitting diagnostics, disables macro expansion, and will cause EOF
// to return an EOF token instead of popping the include stack.
@@ -3113,27 +3120,28 @@ bool Lexer::isCodeCompletionPoint(const char *CurPtr) const {
return false;
}
-uint32_t Lexer::tryReadUCN(const char *&StartPtr, const char *SlashLoc,
- Token *Result) {
+llvm::Optional<uint32_t> Lexer::tryReadNumericUCN(const char *&StartPtr,
+ const char *SlashLoc,
+ Token *Result) {
unsigned CharSize;
char Kind = getCharAndSize(StartPtr, CharSize);
- bool Delimited = false;
- bool FoundEndDelimiter = false;
- unsigned Count = 0;
- bool Diagnose = Result && !isLexingRawMode();
+ assert((Kind == 'u' || Kind == 'U') && "expected a UCN");
unsigned NumHexDigits;
if (Kind == 'u')
NumHexDigits = 4;
else if (Kind == 'U')
NumHexDigits = 8;
- else
- return 0;
+
+ bool Delimited = false;
+ bool FoundEndDelimiter = false;
+ unsigned Count = 0;
+ bool Diagnose = Result && !isLexingRawMode();
if (!LangOpts.CPlusPlus && !LangOpts.C99) {
if (Diagnose)
Diag(SlashLoc, diag::warn_ucn_not_valid_in_c89);
- return 0;
+ return llvm::None;
}
const char *CurPtr = StartPtr + CharSize;
@@ -3160,14 +3168,14 @@ uint32_t Lexer::tryReadUCN(const char *&StartPtr, const char *SlashLoc,
break;
if (Diagnose)
Diag(BufferPtr, diag::warn_delimited_ucn_incomplete)
- << StringRef(&C, 1);
- return 0;
+ << StringRef(KindLoc, 1);
+ return llvm::None;
}
if (CodePoint & 0xF000'0000) {
if (Diagnose)
Diag(KindLoc, diag::err_escape_too_large) << 0;
- return 0;
+ return llvm::None;
}
CodePoint <<= 4;
@@ -3181,7 +3189,13 @@ uint32_t Lexer::tryReadUCN(const char *&StartPtr, const char *SlashLoc,
Diag(StartPtr, FoundEndDelimiter ? diag::warn_delimited_ucn_empty
: diag::warn_ucn_escape_no_digits)
<< StringRef(KindLoc, 1);
- return 0;
+ return llvm::None;
+ }
+
+ if (Delimited && Kind == 'U') {
+ if (Diagnose)
+ Diag(StartPtr, diag::err_hex_escape_no_digits) << StringRef(KindLoc, 1);
+ return llvm::None;
}
if (!Delimited && Count != NumHexDigits) {
@@ -3194,11 +3208,11 @@ uint32_t Lexer::tryReadUCN(const char *&StartPtr, const char *SlashLoc,
<< FixItHint::CreateReplacement(URange, "u");
}
}
- return 0;
+ return llvm::None;
}
if (Delimited && PP) {
- Diag(BufferPtr, diag::ext_delimited_escape_sequence);
+ Diag(BufferPtr, diag::ext_delimited_escape_sequence) << /*delimited*/ 0;
}
if (Result) {
@@ -3211,6 +3225,110 @@ uint32_t Lexer::tryReadUCN(const char *&StartPtr, const char *SlashLoc,
} else {
StartPtr = CurPtr;
}
+ return CodePoint;
+}
+
+llvm::Optional<uint32_t> Lexer::tryReadNamedUCN(const char *&StartPtr,
+ Token *Result) {
+ unsigned CharSize;
+ bool Diagnose = Result && !isLexingRawMode();
+
+ char C = getCharAndSize(StartPtr, CharSize);
+ assert(C == 'N' && "expected \\N{...}");
+
+ const char *CurPtr = StartPtr + CharSize;
+ const char *KindLoc = &CurPtr[-1];
+
+ C = getCharAndSize(CurPtr, CharSize);
+ if (C != '{') {
+ if (Diagnose)
+ Diag(StartPtr, diag::warn_ucn_escape_incomplete);
+ return llvm::None;
+ }
+ CurPtr += CharSize;
+ const char *StartName = CurPtr;
+ bool FoundEndDelimiter = false;
+ llvm::SmallVector<char, 30> Buffer;
+ while (C) {
+ C = getCharAndSize(CurPtr, CharSize);
+ CurPtr += CharSize;
+ if (C == '}') {
+ FoundEndDelimiter = true;
+ break;
+ }
+
+ if (!isAlphanumeric(C) && C != '_' && C != '-' && C != ' ')
+ break;
+ Buffer.push_back(C);
+ }
+
+ if (!FoundEndDelimiter || Buffer.empty()) {
+ if (Diagnose)
+ Diag(StartPtr, FoundEndDelimiter ? diag::warn_delimited_ucn_empty
+ : diag::warn_delimited_ucn_incomplete)
+ << StringRef(KindLoc, 1);
+ return llvm::None;
+ }
+
+ StringRef Name(Buffer.data(), Buffer.size());
+ llvm::Optional<char32_t> Res =
+ llvm::sys::unicode::nameToCodepointStrict(Name);
+ llvm::Optional<llvm::sys::unicode::LooseMatchingResult> LooseMatch;
+ if (!Res) {
+ if (!isLexingRawMode()) {
+ Diag(StartPtr, diag::err_invalid_ucn_name)
+ << StringRef(Buffer.data(), Buffer.size());
+ LooseMatch = llvm::sys::unicode::nameToCodepointLooseMatching(Name);
+ if (LooseMatch) {
+ Diag(StartName, diag::note_invalid_ucn_name_loose_matching)
+ << FixItHint::CreateReplacement(
+ makeCharRange(*this, StartName, CurPtr - CharSize),
+ LooseMatch->Name);
+ }
+ }
+ // When finding a match using Unicode loose matching rules
+ // recover after having emitted a diagnostic.
+ if (!LooseMatch)
+ return llvm::None;
+ // We do not offer missspelled character names suggestions here
+ // as the set of what would be a valid suggestion depends on context,
+ // and we should not make invalid suggestions.
+ }
+
+ if (Diagnose && PP && !LooseMatch)
+ Diag(BufferPtr, diag::ext_delimited_escape_sequence) << /*named*/ 1;
+
+ if (LooseMatch)
+ Res = LooseMatch->CodePoint;
+
+ if (Result) {
+ Result->setFlag(Token::HasUCN);
+ if (CurPtr - StartPtr == (ptrdiff_t)(Buffer.size() + 4))
+ StartPtr = CurPtr;
+ else
+ while (StartPtr != CurPtr)
+ (void)getAndAdvanceChar(StartPtr, *Result);
+ } else {
+ StartPtr = CurPtr;
+ }
+ return *Res;
+}
+
+uint32_t Lexer::tryReadUCN(const char *&StartPtr, const char *SlashLoc,
+ Token *Result) {
+
+ unsigned CharSize;
+ llvm::Optional<uint32_t> CodePointOpt;
+ char Kind = getCharAndSize(StartPtr, CharSize);
+ if (Kind == 'u' || Kind == 'U')
+ CodePointOpt = tryReadNumericUCN(StartPtr, SlashLoc, Result);
+ else if (Kind == 'N')
+ CodePointOpt = tryReadNamedUCN(StartPtr, Result);
+
+ if (!CodePointOpt)
+ return 0;
+
+ uint32_t CodePoint = *CodePointOpt;
// Don't apply C family restrictions to UCNs in assembly mode
if (LangOpts.AsmPreprocessor)
@@ -3280,6 +3398,8 @@ void Lexer::PropagateLineStartLeadingSpaceInfo(Token &Result) {
}
bool Lexer::Lex(Token &Result) {
+ assert(!isDependencyDirectivesLexer());
+
// Start a new token.
Result.startToken();
@@ -3435,8 +3555,7 @@ LexNextToken:
// If the next token is obviously a // or /* */ comment, skip it efficiently
// too (without going through the big switch stmt).
if (CurPtr[0] == '/' && CurPtr[1] == '/' && !inKeepCommentMode() &&
- LangOpts.LineComment &&
- (LangOpts.CPlusPlus || !LangOpts.TraditionalCPP)) {
+ LineComment && (LangOpts.CPlusPlus || !LangOpts.TraditionalCPP)) {
if (SkipLineComment(Result, CurPtr+2, TokAtPhysicalStartOfLine))
return true; // There is a token to return.
goto SkipIgnoredUnits;
@@ -3459,7 +3578,10 @@ LexNextToken:
MIOpt.ReadToken();
return LexNumericConstant(Result, CurPtr);
- case 'u': // Identifier (uber) or C11/C++11 UTF-8 or UTF-16 string literal
+ // Identifier (e.g., uber), or
+ // UTF-8 (C2x/C++17) or UTF-16 (C11/C++11) character literal, or
+ // UTF-8 or UTF-16 string literal (C11/C++11).
+ case 'u':
// Notify MIOpt that we read a non-whitespace/non-comment token.
MIOpt.ReadToken();
@@ -3493,7 +3615,7 @@ LexNextToken:
ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
SizeTmp2, Result),
tok::utf8_string_literal);
- if (Char2 == '\'' && LangOpts.CPlusPlus17)
+ if (Char2 == '\'' && (LangOpts.CPlusPlus17 || LangOpts.C2x))
return LexCharConstant(
Result, ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
SizeTmp2, Result),
@@ -3517,7 +3639,7 @@ LexNextToken:
// treat u like the start of an identifier.
return LexIdentifierContinue(Result, CurPtr);
- case 'U': // Identifier (Uber) or C11/C++11 UTF-32 string literal
+ case 'U': // Identifier (e.g. Uber) or C11/C++11 UTF-32 string literal
// Notify MIOpt that we read a non-whitespace/non-comment token.
MIOpt.ReadToken();
@@ -3743,8 +3865,8 @@ LexNextToken:
// "foo". Check to see if the character after the second slash is a '*'.
// If so, we will lex that as a "/" instead of the start of a comment.
// However, we never do this if we are just preprocessing.
- bool TreatAsComment = LangOpts.LineComment &&
- (LangOpts.CPlusPlus || !LangOpts.TraditionalCPP);
+ bool TreatAsComment =
+ LineComment && (LangOpts.CPlusPlus || !LangOpts.TraditionalCPP);
if (!TreatAsComment)
if (!(PP && PP->isPreprocessedOutput()))
TreatAsComment = getCharAndSize(CurPtr+SizeTmp, SizeTmp2) != '*';
@@ -3841,7 +3963,7 @@ LexNextToken:
} else if (Char == '=') {
char After = getCharAndSize(CurPtr+SizeTmp, SizeTmp2);
if (After == '>') {
- if (getLangOpts().CPlusPlus20) {
+ if (LangOpts.CPlusPlus20) {
if (!isLexingRawMode())
Diag(BufferPtr, diag::warn_cxx17_compat_spaceship);
CurPtr = ConsumeChar(ConsumeChar(CurPtr, SizeTmp, Result),
@@ -3851,7 +3973,7 @@ LexNextToken:
}
// Suggest adding a space between the '<=' and the '>' to avoid a
// change in semantics if this turns up in C++ <=17 mode.
- if (getLangOpts().CPlusPlus && !isLexingRawMode()) {
+ if (LangOpts.CPlusPlus && !isLexingRawMode()) {
Diag(BufferPtr, diag::warn_cxx20_compat_spaceship)
<< FixItHint::CreateInsertion(
getSourceLocation(CurPtr + SizeTmp, SizeTmp2), " ");
@@ -4099,3 +4221,134 @@ HandleDirective:
// We parsed the directive; lex a token with the new state.
return false;
}
+
+const char *Lexer::convertDependencyDirectiveToken(
+ const dependency_directives_scan::Token &DDTok, Token &Result) {
+ const char *TokPtr = BufferStart + DDTok.Offset;
+ Result.startToken();
+ Result.setLocation(getSourceLocation(TokPtr));
+ Result.setKind(DDTok.Kind);
+ Result.setFlag((Token::TokenFlags)DDTok.Flags);
+ Result.setLength(DDTok.Length);
+ BufferPtr = TokPtr + DDTok.Length;
+ return TokPtr;
+}
+
+bool Lexer::LexDependencyDirectiveToken(Token &Result) {
+ assert(isDependencyDirectivesLexer());
+
+ using namespace dependency_directives_scan;
+
+ while (NextDepDirectiveTokenIndex == DepDirectives.front().Tokens.size()) {
+ if (DepDirectives.front().Kind == pp_eof)
+ return LexEndOfFile(Result, BufferEnd);
+ NextDepDirectiveTokenIndex = 0;
+ DepDirectives = DepDirectives.drop_front();
+ }
+
+ const dependency_directives_scan::Token &DDTok =
+ DepDirectives.front().Tokens[NextDepDirectiveTokenIndex++];
+ if (NextDepDirectiveTokenIndex > 1 || DDTok.Kind != tok::hash) {
+ // Read something other than a preprocessor directive hash.
+ MIOpt.ReadToken();
+ }
+
+ const char *TokPtr = convertDependencyDirectiveToken(DDTok, Result);
+
+ if (Result.is(tok::hash) && Result.isAtStartOfLine()) {
+ PP->HandleDirective(Result);
+ return false;
+ }
+ if (Result.is(tok::raw_identifier)) {
+ Result.setRawIdentifierData(TokPtr);
+ if (!isLexingRawMode()) {
+ IdentifierInfo *II = PP->LookUpIdentifierInfo(Result);
+ if (II->isHandleIdentifierCase())
+ return PP->HandleIdentifier(Result);
+ }
+ return true;
+ }
+ if (Result.isLiteral()) {
+ Result.setLiteralData(TokPtr);
+ return true;
+ }
+ if (Result.is(tok::colon) &&
+ (LangOpts.CPlusPlus || LangOpts.DoubleSquareBracketAttributes)) {
+ // Convert consecutive colons to 'tok::coloncolon'.
+ if (*BufferPtr == ':') {
+ assert(DepDirectives.front().Tokens[NextDepDirectiveTokenIndex].is(
+ tok::colon));
+ ++NextDepDirectiveTokenIndex;
+ Result.setKind(tok::coloncolon);
+ }
+ return true;
+ }
+ if (Result.is(tok::eod))
+ ParsingPreprocessorDirective = false;
+
+ return true;
+}
+
+bool Lexer::LexDependencyDirectiveTokenWhileSkipping(Token &Result) {
+ assert(isDependencyDirectivesLexer());
+
+ using namespace dependency_directives_scan;
+
+ bool Stop = false;
+ unsigned NestedIfs = 0;
+ do {
+ DepDirectives = DepDirectives.drop_front();
+ switch (DepDirectives.front().Kind) {
+ case pp_none:
+ llvm_unreachable("unexpected 'pp_none'");
+ case pp_include:
+ case pp___include_macros:
+ case pp_define:
+ case pp_undef:
+ case pp_import:
+ case pp_pragma_import:
+ case pp_pragma_once:
+ case pp_pragma_push_macro:
+ case pp_pragma_pop_macro:
+ case pp_pragma_include_alias:
+ case pp_include_next:
+ case decl_at_import:
+ case cxx_module_decl:
+ case cxx_import_decl:
+ case cxx_export_module_decl:
+ case cxx_export_import_decl:
+ break;
+ case pp_if:
+ case pp_ifdef:
+ case pp_ifndef:
+ ++NestedIfs;
+ break;
+ case pp_elif:
+ case pp_elifdef:
+ case pp_elifndef:
+ case pp_else:
+ if (!NestedIfs) {
+ Stop = true;
+ }
+ break;
+ case pp_endif:
+ if (!NestedIfs) {
+ Stop = true;
+ } else {
+ --NestedIfs;
+ }
+ break;
+ case pp_eof:
+ NextDepDirectiveTokenIndex = 0;
+ return LexEndOfFile(Result, BufferEnd);
+ }
+ } while (!Stop);
+
+ const dependency_directives_scan::Token &DDTok =
+ DepDirectives.front().Tokens.front();
+ assert(DDTok.is(tok::hash));
+ NextDepDirectiveTokenIndex = 1;
+
+ convertDependencyDirectiveToken(DDTok, Result);
+ return false;
+}
diff --git a/contrib/llvm-project/clang/lib/Lex/LiteralSupport.cpp b/contrib/llvm-project/clang/lib/Lex/LiteralSupport.cpp
index 76c8b324671d..ebf30c9f01a9 100644
--- a/contrib/llvm-project/clang/lib/Lex/LiteralSupport.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/LiteralSupport.cpp
@@ -27,6 +27,7 @@
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/Unicode.h"
#include <algorithm>
#include <cassert>
#include <cstddef>
@@ -233,7 +234,8 @@ static unsigned ProcessCharEscape(const char *ThisTokBegin,
HadError = true;
if (Diags)
Diag(Diags, Features, Loc, ThisTokBegin, EscapeBegin, ThisTokBuf,
- diag::err_delimited_escape_missing_brace);
+ diag::err_delimited_escape_missing_brace)
+ << "o";
break;
}
@@ -309,7 +311,8 @@ static unsigned ProcessCharEscape(const char *ThisTokBegin,
<< tok::r_brace;
else if (!HadError) {
Diag(Diags, Features, Loc, ThisTokBegin, EscapeBegin, ThisTokBuf,
- diag::ext_delimited_escape_sequence);
+ diag::ext_delimited_escape_sequence)
+ << /*delimited*/ 0;
}
}
@@ -320,10 +323,8 @@ static void appendCodePoint(unsigned Codepoint,
llvm::SmallVectorImpl<char> &Str) {
char ResultBuf[4];
char *ResultPtr = ResultBuf;
- bool Res = llvm::ConvertCodePointToUTF8(Codepoint, ResultPtr);
- (void)Res;
- assert(Res && "Unexpected conversion failure");
- Str.append(ResultBuf, ResultPtr);
+ if (llvm::ConvertCodePointToUTF8(Codepoint, ResultPtr))
+ Str.append(ResultBuf, ResultPtr);
}
void clang::expandUCNs(SmallVectorImpl<char> &Buf, StringRef Input) {
@@ -337,7 +338,7 @@ void clang::expandUCNs(SmallVectorImpl<char> &Buf, StringRef Input) {
char Kind = *I;
++I;
- assert(Kind == 'u' || Kind == 'U');
+ assert(Kind == 'u' || Kind == 'U' || Kind == 'N');
uint32_t CodePoint = 0;
if (Kind == 'u' && *I == '{') {
@@ -351,6 +352,22 @@ void clang::expandUCNs(SmallVectorImpl<char> &Buf, StringRef Input) {
continue;
}
+ if (Kind == 'N') {
+ assert(*I == '{');
+ ++I;
+ auto Delim = std::find(I, Input.end(), '}');
+ assert(Delim != Input.end());
+ llvm::Optional<llvm::sys::unicode::LooseMatchingResult> Res =
+ llvm::sys::unicode::nameToCodepointLooseMatching(
+ StringRef(I, std::distance(I, Delim)));
+ assert(Res);
+ CodePoint = Res->CodePoint;
+ assert(CodePoint != 0xFFFFFFFF);
+ appendCodePoint(CodePoint, Buf);
+ I = Delim;
+ continue;
+ }
+
unsigned NumHexDigits;
if (Kind == 'u')
NumHexDigits = 4;
@@ -372,23 +389,20 @@ void clang::expandUCNs(SmallVectorImpl<char> &Buf, StringRef Input) {
}
}
-/// ProcessUCNEscape - Read the Universal Character Name, check constraints and
-/// return the UTF32.
-static bool ProcessUCNEscape(const char *ThisTokBegin, const char *&ThisTokBuf,
- const char *ThisTokEnd,
- uint32_t &UcnVal, unsigned short &UcnLen,
- FullSourceLoc Loc, DiagnosticsEngine *Diags,
- const LangOptions &Features,
- bool in_char_string_literal = false) {
+static bool ProcessNumericUCNEscape(const char *ThisTokBegin,
+ const char *&ThisTokBuf,
+ const char *ThisTokEnd, uint32_t &UcnVal,
+ unsigned short &UcnLen, bool &Delimited,
+ FullSourceLoc Loc, DiagnosticsEngine *Diags,
+ const LangOptions &Features,
+ bool in_char_string_literal = false) {
const char *UcnBegin = ThisTokBuf;
+ bool HasError = false;
+ bool EndDelimiterFound = false;
// Skip the '\u' char's.
ThisTokBuf += 2;
-
- bool Delimited = false;
- bool EndDelimiterFound = false;
- bool HasError = false;
-
+ Delimited = false;
if (UcnBegin[1] == 'u' && in_char_string_literal &&
ThisTokBuf != ThisTokEnd && *ThisTokBuf == '{') {
Delimited = true;
@@ -396,7 +410,8 @@ static bool ProcessUCNEscape(const char *ThisTokBegin, const char *&ThisTokBuf,
} else if (ThisTokBuf == ThisTokEnd || !isHexDigit(*ThisTokBuf)) {
if (Diags)
Diag(Diags, Features, Loc, ThisTokBegin, UcnBegin, ThisTokBuf,
- diag::err_hex_escape_no_digits) << StringRef(&ThisTokBuf[-1], 1);
+ diag::err_hex_escape_no_digits)
+ << StringRef(&ThisTokBuf[-1], 1);
return false;
}
UcnLen = (ThisTokBuf[-1] == 'u' ? 4 : 8);
@@ -457,7 +472,136 @@ static bool ProcessUCNEscape(const char *ThisTokBegin, const char *&ThisTokBuf,
: diag::err_ucn_escape_incomplete);
return false;
}
+ return !HasError;
+}
+
+static void DiagnoseInvalidUnicodeCharacterName(
+ DiagnosticsEngine *Diags, const LangOptions &Features, FullSourceLoc Loc,
+ const char *TokBegin, const char *TokRangeBegin, const char *TokRangeEnd,
+ llvm::StringRef Name) {
+
+ Diag(Diags, Features, Loc, TokBegin, TokRangeBegin, TokRangeEnd,
+ diag::err_invalid_ucn_name)
+ << Name;
+
+ namespace u = llvm::sys::unicode;
+
+ llvm::Optional<u::LooseMatchingResult> Res =
+ u::nameToCodepointLooseMatching(Name);
+ if (Res) {
+ Diag(Diags, Features, Loc, TokBegin, TokRangeBegin, TokRangeEnd,
+ diag::note_invalid_ucn_name_loose_matching)
+ << FixItHint::CreateReplacement(
+ MakeCharSourceRange(Features, Loc, TokBegin, TokRangeBegin,
+ TokRangeEnd),
+ Res->Name);
+ return;
+ }
+
+ unsigned Distance = 0;
+ SmallVector<u::MatchForCodepointName> Matches =
+ u::nearestMatchesForCodepointName(Name, 5);
+ assert(!Matches.empty() && "No unicode characters found");
+
+ for (const auto &Match : Matches) {
+ if (Distance == 0)
+ Distance = Match.Distance;
+ if (std::max(Distance, Match.Distance) -
+ std::min(Distance, Match.Distance) >
+ 3)
+ break;
+ Distance = Match.Distance;
+
+ std::string Str;
+ llvm::UTF32 V = Match.Value;
+ LLVM_ATTRIBUTE_UNUSED bool Converted =
+ llvm::convertUTF32ToUTF8String(llvm::ArrayRef<llvm::UTF32>(&V, 1), Str);
+ assert(Converted && "Found a match wich is not a unicode character");
+
+ Diag(Diags, Features, Loc, TokBegin, TokRangeBegin, TokRangeEnd,
+ diag::note_invalid_ucn_name_candidate)
+ << Match.Name << llvm::utohexstr(Match.Value)
+ << Str // FIXME: Fix the rendering of non printable characters
+ << FixItHint::CreateReplacement(
+ MakeCharSourceRange(Features, Loc, TokBegin, TokRangeBegin,
+ TokRangeEnd),
+ Match.Name);
+ }
+}
+
+static bool ProcessNamedUCNEscape(const char *ThisTokBegin,
+ const char *&ThisTokBuf,
+ const char *ThisTokEnd, uint32_t &UcnVal,
+ unsigned short &UcnLen, FullSourceLoc Loc,
+ DiagnosticsEngine *Diags,
+ const LangOptions &Features) {
+ const char *UcnBegin = ThisTokBuf;
+ assert(UcnBegin[0] == '\\' && UcnBegin[1] == 'N');
+ ThisTokBuf += 2;
+ if (ThisTokBuf == ThisTokEnd || *ThisTokBuf != '{') {
+ if (Diags) {
+ Diag(Diags, Features, Loc, ThisTokBegin, UcnBegin, ThisTokBuf,
+ diag::err_delimited_escape_missing_brace)
+ << StringRef(&ThisTokBuf[-1], 1);
+ }
+ ThisTokBuf++;
+ return false;
+ }
+ ThisTokBuf++;
+ const char *ClosingBrace =
+ std::find_if_not(ThisTokBuf, ThisTokEnd, [](char C) {
+ return llvm::isAlnum(C) || llvm::isSpace(C) || C == '_' || C == '-';
+ });
+ bool Incomplete = ClosingBrace == ThisTokEnd || *ClosingBrace != '}';
+ bool Empty = ClosingBrace == ThisTokBuf;
+ if (Incomplete || Empty) {
+ if (Diags) {
+ Diag(Diags, Features, Loc, ThisTokBegin, UcnBegin, ThisTokBuf,
+ Incomplete ? diag::err_ucn_escape_incomplete
+ : diag::err_delimited_escape_empty)
+ << StringRef(&UcnBegin[1], 1);
+ }
+ ThisTokBuf = ClosingBrace == ThisTokEnd ? ClosingBrace : ClosingBrace + 1;
+ return false;
+ }
+ StringRef Name(ThisTokBuf, ClosingBrace - ThisTokBuf);
+ ThisTokBuf = ClosingBrace + 1;
+ llvm::Optional<char32_t> Res =
+ llvm::sys::unicode::nameToCodepointStrict(Name);
+ if (!Res) {
+ if (Diags)
+ DiagnoseInvalidUnicodeCharacterName(Diags, Features, Loc, ThisTokBegin,
+ &UcnBegin[3], ClosingBrace, Name);
+ return false;
+ }
+ UcnVal = *Res;
+ UcnLen = UcnVal > 0xFFFF ? 8 : 4;
+ return true;
+}
+
+/// ProcessUCNEscape - Read the Universal Character Name, check constraints and
+/// return the UTF32.
+static bool ProcessUCNEscape(const char *ThisTokBegin, const char *&ThisTokBuf,
+ const char *ThisTokEnd, uint32_t &UcnVal,
+ unsigned short &UcnLen, FullSourceLoc Loc,
+ DiagnosticsEngine *Diags,
+ const LangOptions &Features,
+ bool in_char_string_literal = false) {
+ bool HasError;
+ const char *UcnBegin = ThisTokBuf;
+ bool IsDelimitedEscapeSequence = false;
+ bool IsNamedEscapeSequence = false;
+ if (ThisTokBuf[1] == 'N') {
+ IsNamedEscapeSequence = true;
+ HasError = !ProcessNamedUCNEscape(ThisTokBegin, ThisTokBuf, ThisTokEnd,
+ UcnVal, UcnLen, Loc, Diags, Features);
+ } else {
+ HasError =
+ !ProcessNumericUCNEscape(ThisTokBegin, ThisTokBuf, ThisTokEnd, UcnVal,
+ UcnLen, IsDelimitedEscapeSequence, Loc, Diags,
+ Features, in_char_string_literal);
+ }
if (HasError)
return false;
@@ -495,9 +639,10 @@ static bool ProcessUCNEscape(const char *ThisTokBegin, const char *&ThisTokBuf,
Diag(Diags, Features, Loc, ThisTokBegin, UcnBegin, ThisTokBuf,
diag::warn_ucn_not_valid_in_c89_literal);
- if (Delimited && Diags)
+ if ((IsDelimitedEscapeSequence || IsNamedEscapeSequence) && Diags)
Diag(Diags, Features, Loc, ThisTokBegin, UcnBegin, ThisTokBuf,
- diag::ext_delimited_escape_sequence);
+ diag::ext_delimited_escape_sequence)
+ << (IsNamedEscapeSequence ? 1 : 0);
return true;
}
@@ -711,6 +856,7 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
isFract = false;
isAccum = false;
hadError = false;
+ isBitInt = false;
// This routine assumes that the range begin/end matches the regex for integer
// and FP constants (specifically, the 'pp-number' regex), and assumes that
@@ -895,6 +1041,24 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
if (isImaginary) break; // Cannot be repeated.
isImaginary = true;
continue; // Success.
+ case 'w':
+ case 'W':
+ if (isFPConstant)
+ break; // Invalid for floats.
+ if (HasSize)
+ break; // Invalid if we already have a size for the literal.
+
+ // wb and WB are allowed, but a mixture of cases like Wb or wB is not. We
+ // explicitly do not support the suffix in C++ as an extension because a
+ // library-based UDL that resolves to a library type may be more
+ // appropriate there.
+ if (!LangOpts.CPlusPlus && ((s[0] == 'w' && s[1] == 'b') ||
+ (s[0] == 'W' && s[1] == 'B'))) {
+ isBitInt = true;
+ HasSize = true;
+ ++s; // Skip both characters (2nd char skipped on continue).
+ continue; // Success.
+ }
}
// If we reached here, there was an error or a ud-suffix.
break;
@@ -916,6 +1080,7 @@ NumericLiteralParser::NumericLiteralParser(StringRef TokSpelling,
isFloat16 = false;
isHalf = false;
isImaginary = false;
+ isBitInt = false;
MicrosoftInteger = 0;
saw_fixed_point_suffix = false;
isFract = false;
@@ -1145,8 +1310,14 @@ void NumericLiteralParser::ParseNumberStartingWithZero(SourceLocation TokLoc) {
// floating point constant, the radix will change to 10. Octal floating
// point constants are not permitted (only decimal and hexadecimal).
radix = 8;
- DigitsBegin = s;
+ const char *PossibleNewDigitStart = s;
s = SkipOctalDigits(s);
+ // When the value is 0 followed by a suffix (like 0wb), we want to leave 0
+ // as the start of the digits. So if skipping octal digits does not skip
+ // anything, we leave the digit start where it was.
+ if (s != PossibleNewDigitStart)
+ DigitsBegin = PossibleNewDigitStart;
+
if (s == ThisTokEnd)
return; // Done, simple octal number like 01234
@@ -1510,7 +1681,7 @@ CharLiteralParser::CharLiteralParser(const char *begin, const char *end,
// If we see bad encoding for unprefixed character literals, warn and
// simply copy the byte values, for compatibility with gcc and
// older versions of clang.
- bool NoErrorOnBadEncoding = isAscii();
+ bool NoErrorOnBadEncoding = isOrdinary();
unsigned Msg = diag::err_bad_character_encoding;
if (NoErrorOnBadEncoding)
Msg = diag::warn_bad_character_encoding;
@@ -1535,7 +1706,7 @@ CharLiteralParser::CharLiteralParser(const char *begin, const char *end,
continue;
}
// Is this a Universal Character Name escape?
- if (begin[1] == 'u' || begin[1] == 'U') {
+ if (begin[1] == 'u' || begin[1] == 'U' || begin[1] == 'N') {
unsigned short UcnLen = 0;
if (!ProcessUCNEscape(TokBegin, begin, end, *buffer_begin, UcnLen,
FullSourceLoc(Loc, PP.getSourceManager()),
@@ -1560,9 +1731,9 @@ CharLiteralParser::CharLiteralParser(const char *begin, const char *end,
unsigned NumCharsSoFar = buffer_begin - &codepoint_buffer.front();
if (NumCharsSoFar > 1) {
- if (isAscii() && NumCharsSoFar == 4)
+ if (isOrdinary() && NumCharsSoFar == 4)
PP.Diag(Loc, diag::warn_four_char_character_literal);
- else if (isAscii())
+ else if (isOrdinary())
PP.Diag(Loc, diag::warn_multichar_character_literal);
else {
PP.Diag(Loc, diag::err_multichar_character_literal) << (isWide() ? 0 : 1);
@@ -1578,7 +1749,7 @@ CharLiteralParser::CharLiteralParser(const char *begin, const char *end,
// Narrow character literals act as though their value is concatenated
// in this implementation, but warn on overflow.
bool multi_char_too_long = false;
- if (isAscii() && isMultiChar()) {
+ if (isOrdinary() && isMultiChar()) {
LitVal = 0;
for (size_t i = 0; i < NumCharsSoFar; ++i) {
// check for enough leading zeros to shift into
@@ -1602,7 +1773,7 @@ CharLiteralParser::CharLiteralParser(const char *begin, const char *end,
// if 'char' is signed for this target (C99 6.4.4.4p10). Note that multiple
// character constants are not sign extended in the this implementation:
// '\xFF\xFF' = 65536 and '\x0\xFF' = 255, which matches GCC.
- if (isAscii() && NumCharsSoFar == 1 && (Value & 128) &&
+ if (isOrdinary() && NumCharsSoFar == 1 && (Value & 128) &&
PP.getLangOpts().CharIsSigned)
Value = (signed char)Value;
}
@@ -1707,7 +1878,7 @@ void StringLiteralParser::init(ArrayRef<Token> StringToks){
// Remember if we see any wide or utf-8/16/32 strings.
// Also check for illegal concatenations.
if (StringToks[i].isNot(Kind) && StringToks[i].isNot(tok::string_literal)) {
- if (isAscii()) {
+ if (isOrdinary()) {
Kind = StringToks[i].getKind();
} else {
if (Diags)
@@ -1895,7 +2066,8 @@ void StringLiteralParser::init(ArrayRef<Token> StringToks){
continue;
}
// Is this a Universal Character Name escape?
- if (ThisTokBuf[1] == 'u' || ThisTokBuf[1] == 'U') {
+ if (ThisTokBuf[1] == 'u' || ThisTokBuf[1] == 'U' ||
+ ThisTokBuf[1] == 'N') {
EncodeUCNEscape(ThisTokBegin, ThisTokBuf, ThisTokEnd,
ResultPtr, hadError,
FullSourceLoc(StringToks[i].getLocation(), SM),
@@ -1990,7 +2162,7 @@ bool StringLiteralParser::CopyStringFragment(const Token &Tok,
// If we see bad encoding for unprefixed string literals, warn and
// simply copy the byte values, for compatibility with gcc and older
// versions of clang.
- bool NoErrorOnBadEncoding = isAscii();
+ bool NoErrorOnBadEncoding = isOrdinary();
if (NoErrorOnBadEncoding) {
memcpy(ResultPtr, Fragment.data(), Fragment.size());
ResultPtr += Fragment.size();
@@ -2088,7 +2260,8 @@ unsigned StringLiteralParser::getOffsetOfStringByte(const Token &Tok,
// Otherwise, this is an escape character. Advance over it.
bool HadError = false;
- if (SpellingPtr[1] == 'u' || SpellingPtr[1] == 'U') {
+ if (SpellingPtr[1] == 'u' || SpellingPtr[1] == 'U' ||
+ SpellingPtr[1] == 'N') {
const char *EscapePtr = SpellingPtr;
unsigned Len = MeasureUCNEscape(SpellingStart, SpellingPtr, SpellingEnd,
1, Features, HadError);
diff --git a/contrib/llvm-project/clang/lib/Lex/MacroInfo.cpp b/contrib/llvm-project/clang/lib/Lex/MacroInfo.cpp
index 1ccd140364ae..310b95f36771 100644
--- a/contrib/llvm-project/clang/lib/Lex/MacroInfo.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/MacroInfo.cpp
@@ -28,6 +28,25 @@
using namespace clang;
+namespace {
+
+// MacroInfo is expected to take 40 bytes on platforms with an 8 byte pointer
+// and 4 byte SourceLocation.
+template <int> class MacroInfoSizeChecker {
+public:
+ constexpr static bool AsExpected = true;
+};
+template <> class MacroInfoSizeChecker<8> {
+public:
+ constexpr static bool AsExpected =
+ sizeof(MacroInfo) == (32 + sizeof(SourceLocation) * 2);
+};
+
+static_assert(MacroInfoSizeChecker<sizeof(void *)>::AsExpected,
+ "Unexpected size of MacroInfo");
+
+} // end namespace
+
MacroInfo::MacroInfo(SourceLocation DefLoc)
: Location(DefLoc), IsDefinitionLengthCached(false), IsFunctionLike(false),
IsC99Varargs(false), IsGNUVarargs(false), IsBuiltinMacro(false),
@@ -39,6 +58,7 @@ unsigned MacroInfo::getDefinitionLengthSlow(const SourceManager &SM) const {
assert(!IsDefinitionLengthCached);
IsDefinitionLengthCached = true;
+ ArrayRef<Token> ReplacementTokens = tokens();
if (ReplacementTokens.empty())
return (DefinitionLength = 0);
@@ -76,7 +96,7 @@ bool MacroInfo::isIdenticalTo(const MacroInfo &Other, Preprocessor &PP,
bool Lexically = !Syntactically;
// Check # tokens in replacement, number of args, and various flags all match.
- if (ReplacementTokens.size() != Other.ReplacementTokens.size() ||
+ if (getNumTokens() != Other.getNumTokens() ||
getNumParams() != Other.getNumParams() ||
isFunctionLike() != Other.isFunctionLike() ||
isC99Varargs() != Other.isC99Varargs() ||
@@ -92,7 +112,7 @@ bool MacroInfo::isIdenticalTo(const MacroInfo &Other, Preprocessor &PP,
}
// Check all the tokens.
- for (unsigned i = 0, e = ReplacementTokens.size(); i != e; ++i) {
+ for (unsigned i = 0; i != NumReplacementTokens; ++i) {
const Token &A = ReplacementTokens[i];
const Token &B = Other.ReplacementTokens[i];
if (A.getKind() != B.getKind())
@@ -157,7 +177,7 @@ LLVM_DUMP_METHOD void MacroInfo::dump() const {
}
bool First = true;
- for (const Token &Tok : ReplacementTokens) {
+ for (const Token &Tok : tokens()) {
// Leading space is semantically meaningful in a macro definition,
// so preserve it in the dump output.
if (First || Tok.hasLeadingSpace())
@@ -181,8 +201,7 @@ MacroDirective::DefInfo MacroDirective::getDefinition() {
Optional<bool> isPublic;
for (; MD; MD = MD->getPrevious()) {
if (DefMacroDirective *DefMD = dyn_cast<DefMacroDirective>(MD))
- return DefInfo(DefMD, UndefLoc,
- !isPublic.hasValue() || isPublic.getValue());
+ return DefInfo(DefMD, UndefLoc, !isPublic || *isPublic);
if (UndefMacroDirective *UndefMD = dyn_cast<UndefMacroDirective>(MD)) {
UndefLoc = UndefMD->getLocation();
@@ -190,12 +209,11 @@ MacroDirective::DefInfo MacroDirective::getDefinition() {
}
VisibilityMacroDirective *VisMD = cast<VisibilityMacroDirective>(MD);
- if (!isPublic.hasValue())
+ if (!isPublic)
isPublic = VisMD->isPublic();
}
- return DefInfo(nullptr, UndefLoc,
- !isPublic.hasValue() || isPublic.getValue());
+ return DefInfo(nullptr, UndefLoc, !isPublic || isPublic.getValue());
}
const MacroDirective::DefInfo
diff --git a/contrib/llvm-project/clang/lib/Lex/ModuleMap.cpp b/contrib/llvm-project/clang/lib/Lex/ModuleMap.cpp
index 0b136aeb580f..c791e3e4e5ca 100644
--- a/contrib/llvm-project/clang/lib/Lex/ModuleMap.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/ModuleMap.cpp
@@ -473,8 +473,7 @@ static Module *getTopLevelOrNull(Module *M) {
void ModuleMap::diagnoseHeaderInclusion(Module *RequestingModule,
bool RequestingModuleIsModuleInterface,
SourceLocation FilenameLoc,
- StringRef Filename,
- const FileEntry *File) {
+ StringRef Filename, FileEntryRef File) {
// No errors for indirect modules. This may be a bit of a problem for modules
// with no source files.
if (getTopLevelOrNull(RequestingModule) != getTopLevelOrNull(SourceModule))
@@ -482,7 +481,7 @@ void ModuleMap::diagnoseHeaderInclusion(Module *RequestingModule,
if (RequestingModule) {
resolveUses(RequestingModule, /*Complain=*/false);
- resolveHeaderDirectives(RequestingModule);
+ resolveHeaderDirectives(RequestingModule, /*File=*/llvm::None);
}
bool Excluded = false;
@@ -542,7 +541,7 @@ void ModuleMap::diagnoseHeaderInclusion(Module *RequestingModule,
diag::warn_non_modular_include_in_framework_module :
diag::warn_non_modular_include_in_module;
Diags.Report(FilenameLoc, DiagID) << RequestingModule->getFullModuleName()
- << File->getName();
+ << File.getName();
}
}
@@ -905,6 +904,19 @@ Module *ModuleMap::createHeaderModule(StringRef Name,
return Result;
}
+Module *ModuleMap::createHeaderUnit(SourceLocation Loc, StringRef Name,
+ Module::Header H) {
+ assert(LangOpts.CurrentModule == Name && "module name mismatch");
+ assert(!Modules[Name] && "redefining existing module");
+
+ auto *Result = new Module(Name, Loc, nullptr, /*IsFramework*/ false,
+ /*IsExplicit*/ false, NumCreatedModules++);
+ Result->Kind = Module::ModuleHeaderUnit;
+ Modules[Name] = SourceModule = Result;
+ addHeader(Result, H, NormalHeader);
+ return Result;
+}
+
/// For a framework module, infer the framework against which we
/// should link.
static void inferFrameworkLink(Module *Mod, const DirectoryEntry *FrameworkDir,
@@ -1191,25 +1203,35 @@ void ModuleMap::resolveHeaderDirectives(const FileEntry *File) const {
auto BySize = LazyHeadersBySize.find(File->getSize());
if (BySize != LazyHeadersBySize.end()) {
for (auto *M : BySize->second)
- resolveHeaderDirectives(M);
+ resolveHeaderDirectives(M, File);
LazyHeadersBySize.erase(BySize);
}
auto ByModTime = LazyHeadersByModTime.find(File->getModificationTime());
if (ByModTime != LazyHeadersByModTime.end()) {
for (auto *M : ByModTime->second)
- resolveHeaderDirectives(M);
+ resolveHeaderDirectives(M, File);
LazyHeadersByModTime.erase(ByModTime);
}
}
-void ModuleMap::resolveHeaderDirectives(Module *Mod) const {
+void ModuleMap::resolveHeaderDirectives(
+ Module *Mod, llvm::Optional<const FileEntry *> File) const {
bool NeedsFramework = false;
- for (auto &Header : Mod->UnresolvedHeaders)
- // This operation is logically const; we're just changing how we represent
- // the header information for this file.
- const_cast<ModuleMap*>(this)->resolveHeader(Mod, Header, NeedsFramework);
- Mod->UnresolvedHeaders.clear();
+ SmallVector<Module::UnresolvedHeaderDirective, 1> NewHeaders;
+ const auto Size = File ? File.getValue()->getSize() : 0;
+ const auto ModTime = File ? File.getValue()->getModificationTime() : 0;
+
+ for (auto &Header : Mod->UnresolvedHeaders) {
+ if (File && ((Header.ModTime && Header.ModTime != ModTime) ||
+ (Header.Size && Header.Size != Size)))
+ NewHeaders.push_back(Header);
+ else
+ // This operation is logically const; we're just changing how we represent
+ // the header information for this file.
+ const_cast<ModuleMap *>(this)->resolveHeader(Mod, Header, NeedsFramework);
+ }
+ Mod->UnresolvedHeaders.swap(NewHeaders);
}
void ModuleMap::addHeader(Module *Mod, Module::Header Header,
@@ -1615,7 +1637,7 @@ retry:
SpellingBuffer.resize(LToken.getLength() + 1);
const char *Start = SpellingBuffer.data();
unsigned Length =
- Lexer::getSpelling(LToken, Start, SourceMgr, L.getLangOpts());
+ Lexer::getSpelling(LToken, Start, SourceMgr, Map.LangOpts);
uint64_t Value;
if (StringRef(Start, Length).getAsInteger(0, Value)) {
Diags.Report(Tok.getLocation(), diag::err_mmap_unknown_token);
diff --git a/contrib/llvm-project/clang/lib/Lex/PPDirectives.cpp b/contrib/llvm-project/clang/lib/Lex/PPDirectives.cpp
index f3aefdd22b51..4dcef01e3e4c 100644
--- a/contrib/llvm-project/clang/lib/Lex/PPDirectives.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/PPDirectives.cpp
@@ -33,15 +33,16 @@
#include "clang/Lex/Token.h"
#include "clang/Lex/VariadicMacroSupport.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
-#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/StringSwitch.h"
#include "llvm/ADT/StringRef.h"
+#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/AlignOf.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Path.h"
+#include "llvm/Support/SaveAndRestore.h"
#include <algorithm>
#include <cassert>
#include <cstring>
@@ -266,6 +267,51 @@ static bool warnByDefaultOnWrongCase(StringRef Include) {
.Default(false);
}
+/// Find a similar string in `Candidates`.
+///
+/// \param LHS a string for a similar string in `Candidates`
+///
+/// \param Candidates the candidates to find a similar string.
+///
+/// \returns a similar string if exists. If no similar string exists,
+/// returns None.
+static Optional<StringRef> findSimilarStr(
+ StringRef LHS, const std::vector<StringRef> &Candidates) {
+ // We need to check if `Candidates` has the exact case-insensitive string
+ // because the Levenshtein distance match does not care about it.
+ for (StringRef C : Candidates) {
+ if (LHS.equals_insensitive(C)) {
+ return C;
+ }
+ }
+
+ // Keep going with the Levenshtein distance match.
+ // If the LHS size is less than 3, use the LHS size minus 1 and if not,
+ // use the LHS size divided by 3.
+ size_t Length = LHS.size();
+ size_t MaxDist = Length < 3 ? Length - 1 : Length / 3;
+
+ Optional<std::pair<StringRef, size_t>> SimilarStr = None;
+ for (StringRef C : Candidates) {
+ size_t CurDist = LHS.edit_distance(C, true);
+ if (CurDist <= MaxDist) {
+ if (!SimilarStr) {
+ // The first similar string found.
+ SimilarStr = {C, CurDist};
+ } else if (CurDist < SimilarStr->second) {
+ // More similar string found.
+ SimilarStr = {C, CurDist};
+ }
+ }
+ }
+
+ if (SimilarStr) {
+ return SimilarStr->first;
+ } else {
+ return None;
+ }
+}
+
bool Preprocessor::CheckMacroName(Token &MacroNameTok, MacroUse isDefineUndef,
bool *ShadowFlag) {
// Missing macro name?
@@ -398,39 +444,29 @@ SourceLocation Preprocessor::CheckEndOfDirective(const char *DirType,
return DiscardUntilEndOfDirective().getEnd();
}
-Optional<unsigned> Preprocessor::getSkippedRangeForExcludedConditionalBlock(
- SourceLocation HashLoc) {
- if (!ExcludedConditionalDirectiveSkipMappings)
- return None;
- if (!HashLoc.isFileID())
- return None;
-
- std::pair<FileID, unsigned> HashFileOffset =
- SourceMgr.getDecomposedLoc(HashLoc);
- Optional<llvm::MemoryBufferRef> Buf =
- SourceMgr.getBufferOrNone(HashFileOffset.first);
- if (!Buf)
- return None;
- auto It =
- ExcludedConditionalDirectiveSkipMappings->find(Buf->getBufferStart());
- if (It == ExcludedConditionalDirectiveSkipMappings->end())
- return None;
-
- const PreprocessorSkippedRangeMapping &SkippedRanges = *It->getSecond();
- // Check if the offset of '#' is mapped in the skipped ranges.
- auto MappingIt = SkippedRanges.find(HashFileOffset.second);
- if (MappingIt == SkippedRanges.end())
- return None;
-
- unsigned BytesToSkip = MappingIt->getSecond();
- unsigned CurLexerBufferOffset = CurLexer->getCurrentBufferOffset();
- assert(CurLexerBufferOffset >= HashFileOffset.second &&
- "lexer is before the hash?");
- // Take into account the fact that the lexer has already advanced, so the
- // number of bytes to skip must be adjusted.
- unsigned LengthDiff = CurLexerBufferOffset - HashFileOffset.second;
- assert(BytesToSkip >= LengthDiff && "lexer is after the skipped range?");
- return BytesToSkip - LengthDiff;
+void Preprocessor::SuggestTypoedDirective(const Token &Tok,
+ StringRef Directive) const {
+ // If this is a `.S` file, treat unknown # directives as non-preprocessor
+ // directives.
+ if (getLangOpts().AsmPreprocessor) return;
+
+ std::vector<StringRef> Candidates = {
+ "if", "ifdef", "ifndef", "elif", "else", "endif"
+ };
+ if (LangOpts.C2x || LangOpts.CPlusPlus2b)
+ Candidates.insert(Candidates.end(), {"elifdef", "elifndef"});
+
+ if (Optional<StringRef> Sugg = findSimilarStr(Directive, Candidates)) {
+ // Directive cannot be coming from macro.
+ assert(Tok.getLocation().isFileID());
+ CharSourceRange DirectiveRange = CharSourceRange::getCharRange(
+ Tok.getLocation(),
+ Tok.getLocation().getLocWithOffset(Directive.size()));
+ StringRef SuggValue = *Sugg;
+
+ auto Hint = FixItHint::CreateReplacement(DirectiveRange, SuggValue);
+ Diag(Tok, diag::warn_pp_invalid_directive) << 1 << SuggValue << Hint;
+ }
}
/// SkipExcludedConditionalBlock - We just read a \#if or related directive and
@@ -446,6 +482,19 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
bool FoundNonSkipPortion,
bool FoundElse,
SourceLocation ElseLoc) {
+ // In SkippingRangeStateTy we are depending on SkipExcludedConditionalBlock()
+ // not getting called recursively by storing the RecordedSkippedRanges
+ // DenseMap lookup pointer (field SkipRangePtr). SkippingRangeStateTy expects
+ // that RecordedSkippedRanges won't get modified and SkipRangePtr won't be
+ // invalidated. If this changes and there is a need to call
+ // SkipExcludedConditionalBlock() recursively, SkippingRangeStateTy should
+ // change to do a second lookup in endLexPass function instead of reusing the
+ // lookup pointer.
+ assert(!SkippingExcludedConditionalBlock &&
+ "calling SkipExcludedConditionalBlock recursively");
+ llvm::SaveAndRestore<bool> SARSkipping(SkippingExcludedConditionalBlock,
+ true);
+
++NumSkipped;
assert(!CurTokenLexer && CurPPLexer && "Lexing a macro, not a file?");
@@ -459,36 +508,85 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
// disabling warnings, etc.
CurPPLexer->LexingRawMode = true;
Token Tok;
- if (auto SkipLength =
- getSkippedRangeForExcludedConditionalBlock(HashTokenLoc)) {
- // Skip to the next '#endif' / '#else' / '#elif'.
- CurLexer->skipOver(*SkipLength);
- }
SourceLocation endLoc;
- while (true) {
- CurLexer->Lex(Tok);
- if (Tok.is(tok::code_completion)) {
- setCodeCompletionReached();
- if (CodeComplete)
- CodeComplete->CodeCompleteInConditionalExclusion();
- continue;
+ /// Keeps track and caches skipped ranges and also retrieves a prior skipped
+ /// range if the same block is re-visited.
+ struct SkippingRangeStateTy {
+ Preprocessor &PP;
+
+ const char *BeginPtr = nullptr;
+ unsigned *SkipRangePtr = nullptr;
+
+ SkippingRangeStateTy(Preprocessor &PP) : PP(PP) {}
+
+ void beginLexPass() {
+ if (BeginPtr)
+ return; // continue skipping a block.
+
+ // Initiate a skipping block and adjust the lexer if we already skipped it
+ // before.
+ BeginPtr = PP.CurLexer->getBufferLocation();
+ SkipRangePtr = &PP.RecordedSkippedRanges[BeginPtr];
+ if (*SkipRangePtr) {
+ PP.CurLexer->seek(PP.CurLexer->getCurrentBufferOffset() + *SkipRangePtr,
+ /*IsAtStartOfLine*/ true);
+ }
}
- // If this is the end of the buffer, we have an error.
- if (Tok.is(tok::eof)) {
- // We don't emit errors for unterminated conditionals here,
- // Lexer::LexEndOfFile can do that properly.
- // Just return and let the caller lex after this #include.
- if (PreambleConditionalStack.isRecording())
- PreambleConditionalStack.SkipInfo.emplace(
- HashTokenLoc, IfTokenLoc, FoundNonSkipPortion, FoundElse, ElseLoc);
- break;
+ void endLexPass(const char *Hashptr) {
+ if (!BeginPtr) {
+ // Not doing normal lexing.
+ assert(PP.CurLexer->isDependencyDirectivesLexer());
+ return;
+ }
+
+ // Finished skipping a block, record the range if it's first time visited.
+ if (!*SkipRangePtr) {
+ *SkipRangePtr = Hashptr - BeginPtr;
+ }
+ assert(*SkipRangePtr == Hashptr - BeginPtr);
+ BeginPtr = nullptr;
+ SkipRangePtr = nullptr;
}
+ } SkippingRangeState(*this);
- // If this token is not a preprocessor directive, just skip it.
- if (Tok.isNot(tok::hash) || !Tok.isAtStartOfLine())
- continue;
+ while (true) {
+ if (CurLexer->isDependencyDirectivesLexer()) {
+ CurLexer->LexDependencyDirectiveTokenWhileSkipping(Tok);
+ } else {
+ SkippingRangeState.beginLexPass();
+ while (true) {
+ CurLexer->Lex(Tok);
+
+ if (Tok.is(tok::code_completion)) {
+ setCodeCompletionReached();
+ if (CodeComplete)
+ CodeComplete->CodeCompleteInConditionalExclusion();
+ continue;
+ }
+
+ // If this is the end of the buffer, we have an error.
+ if (Tok.is(tok::eof)) {
+ // We don't emit errors for unterminated conditionals here,
+ // Lexer::LexEndOfFile can do that properly.
+ // Just return and let the caller lex after this #include.
+ if (PreambleConditionalStack.isRecording())
+ PreambleConditionalStack.SkipInfo.emplace(HashTokenLoc, IfTokenLoc,
+ FoundNonSkipPortion,
+ FoundElse, ElseLoc);
+ break;
+ }
+
+ // If this token is not a preprocessor directive, just skip it.
+ if (Tok.isNot(tok::hash) || !Tok.isAtStartOfLine())
+ continue;
+
+ break;
+ }
+ }
+ if (Tok.is(tok::eof))
+ break;
// We just parsed a # character at the start of a line, so we're in
// directive mode. Tell the lexer this so any newlines we see will be
@@ -496,6 +594,9 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
CurPPLexer->ParsingPreprocessorDirective = true;
if (CurLexer) CurLexer->SetKeepWhitespaceMode(false);
+ assert(Tok.is(tok::hash));
+ const char *Hashptr = CurLexer->getBufferLocation() - Tok.getLength();
+ assert(CurLexer->getSourceLocation(Hashptr) == Tok.getLocation());
// Read the next token, the directive flavor.
LexUnexpandedToken(Tok);
@@ -556,6 +657,8 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
CurPPLexer->pushConditionalLevel(Tok.getLocation(), /*wasskipping*/true,
/*foundnonskip*/false,
/*foundelse*/false);
+ } else {
+ SuggestTypoedDirective(Tok, Directive);
}
} else if (Directive[0] == 'e') {
StringRef Sub = Directive.substr(1);
@@ -568,6 +671,7 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
// If we popped the outermost skipping block, we're done skipping!
if (!CondInfo.WasSkipping) {
+ SkippingRangeState.endLexPass(Hashptr);
// Restore the value of LexingRawMode so that trailing comments
// are handled correctly, if we've reached the outermost block.
CurPPLexer->LexingRawMode = false;
@@ -585,6 +689,9 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
// as a non-skipping conditional.
PPConditionalInfo &CondInfo = CurPPLexer->peekConditionalLevel();
+ if (!CondInfo.WasSkipping)
+ SkippingRangeState.endLexPass(Hashptr);
+
// If this is a #else with a #else before it, report the error.
if (CondInfo.FoundElse)
Diag(Tok, diag::pp_err_else_after_else);
@@ -610,6 +717,9 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
} else if (Sub == "lif") { // "elif".
PPConditionalInfo &CondInfo = CurPPLexer->peekConditionalLevel();
+ if (!CondInfo.WasSkipping)
+ SkippingRangeState.endLexPass(Hashptr);
+
// If this is a #elif with a #else before it, report the error.
if (CondInfo.FoundElse)
Diag(Tok, diag::pp_err_elif_after_else) << PED_Elif;
@@ -652,6 +762,20 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
PPConditionalInfo &CondInfo = CurPPLexer->peekConditionalLevel();
Token DirectiveToken = Tok;
+ if (!CondInfo.WasSkipping)
+ SkippingRangeState.endLexPass(Hashptr);
+
+ // Warn if using `#elifdef` & `#elifndef` in not C2x & C++2b mode even
+ // if this branch is in a skipping block.
+ unsigned DiagID;
+ if (LangOpts.CPlusPlus)
+ DiagID = LangOpts.CPlusPlus2b ? diag::warn_cxx2b_compat_pp_directive
+ : diag::ext_cxx2b_pp_directive;
+ else
+ DiagID = LangOpts.C2x ? diag::warn_c2x_compat_pp_directive
+ : diag::ext_c2x_pp_directive;
+ Diag(Tok, DiagID) << (IsElifDef ? PED_Elifdef : PED_Elifndef);
+
// If this is a #elif with a #else before it, report the error.
if (CondInfo.FoundElse)
Diag(Tok, diag::pp_err_elif_after_else)
@@ -705,7 +829,11 @@ void Preprocessor::SkipExcludedConditionalBlock(SourceLocation HashTokenLoc,
break;
}
}
+ } else {
+ SuggestTypoedDirective(Tok, Directive);
}
+ } else {
+ SuggestTypoedDirective(Tok, Directive);
}
CurPPLexer->ParsingPreprocessorDirective = false;
@@ -817,13 +945,13 @@ Preprocessor::getHeaderToIncludeForDiagnostics(SourceLocation IncLoc,
Optional<FileEntryRef> Preprocessor::LookupFile(
SourceLocation FilenameLoc, StringRef Filename, bool isAngled,
- const DirectoryLookup *FromDir, const FileEntry *FromFile,
- const DirectoryLookup **CurDirArg, SmallVectorImpl<char> *SearchPath,
+ ConstSearchDirIterator FromDir, const FileEntry *FromFile,
+ ConstSearchDirIterator *CurDirArg, SmallVectorImpl<char> *SearchPath,
SmallVectorImpl<char> *RelativePath,
ModuleMap::KnownHeader *SuggestedModule, bool *IsMapped,
bool *IsFrameworkFound, bool SkipCache) {
- const DirectoryLookup *CurDirLocal = nullptr;
- const DirectoryLookup *&CurDir = CurDirArg ? *CurDirArg : CurDirLocal;
+ ConstSearchDirIterator CurDirLocal = nullptr;
+ ConstSearchDirIterator &CurDir = CurDirArg ? *CurDirArg : CurDirLocal;
Module *RequestingModule = getModuleForLocation(FilenameLoc);
bool RequestingModuleIsModuleInterface = !SourceMgr.isInMainFile(FilenameLoc);
@@ -877,8 +1005,8 @@ Optional<FileEntryRef> Preprocessor::LookupFile(
if (FromFile) {
// We're supposed to start looking from after a particular file. Search
// the include path until we find that file or run out of files.
- const DirectoryLookup *TmpCurDir = CurDir;
- const DirectoryLookup *TmpFromDir = nullptr;
+ ConstSearchDirIterator TmpCurDir = CurDir;
+ ConstSearchDirIterator TmpFromDir = nullptr;
while (Optional<FileEntryRef> FE = HeaderInfo.LookupFile(
Filename, FilenameLoc, isAngled, TmpFromDir, &TmpCurDir,
Includers, SearchPath, RelativePath, RequestingModule,
@@ -905,7 +1033,7 @@ Optional<FileEntryRef> Preprocessor::LookupFile(
if (SuggestedModule && !LangOpts.AsmPreprocessor)
HeaderInfo.getModuleMap().diagnoseHeaderInclusion(
RequestingModule, RequestingModuleIsModuleInterface, FilenameLoc,
- Filename, &FE->getFileEntry());
+ Filename, *FE);
return FE;
}
@@ -921,7 +1049,7 @@ Optional<FileEntryRef> Preprocessor::LookupFile(
if (SuggestedModule && !LangOpts.AsmPreprocessor)
HeaderInfo.getModuleMap().diagnoseHeaderInclusion(
RequestingModule, RequestingModuleIsModuleInterface, FilenameLoc,
- Filename, &FE->getFileEntry());
+ Filename, *FE);
return FE;
}
}
@@ -936,7 +1064,7 @@ Optional<FileEntryRef> Preprocessor::LookupFile(
if (SuggestedModule && !LangOpts.AsmPreprocessor)
HeaderInfo.getModuleMap().diagnoseHeaderInclusion(
RequestingModule, RequestingModuleIsModuleInterface,
- FilenameLoc, Filename, &FE->getFileEntry());
+ FilenameLoc, Filename, *FE);
return FE;
}
}
@@ -1182,7 +1310,8 @@ void Preprocessor::HandleDirective(Token &Result) {
}
// If we reached here, the preprocessing token is not valid!
- Diag(Result, diag::err_pp_invalid_directive);
+ // Start suggesting if a similar directive found.
+ Diag(Result, diag::err_pp_invalid_directive) << 0;
// Read the rest of the PP line.
DiscardUntilEndOfDirective();
@@ -1294,7 +1423,7 @@ void Preprocessor::HandleLineDirective() {
} else {
// Parse and validate the string, converting it into a unique ID.
StringLiteralParser Literal(StrTok, *this);
- assert(Literal.isAscii() && "Didn't allow wide strings in");
+ assert(Literal.isOrdinary() && "Didn't allow wide strings in");
if (Literal.hadError) {
DiscardUntilEndOfDirective();
return;
@@ -1431,6 +1560,7 @@ void Preprocessor::HandleDigitDirective(Token &DigitTok) {
// If the StrTok is "eod", then it wasn't present. Otherwise, it must be a
// string followed by eod.
if (StrTok.is(tok::eod)) {
+ Diag(StrTok, diag::ext_pp_gnu_line_directive);
// Treat this like "#line NN", which doesn't change file characteristics.
FileKind = SourceMgr.getFileCharacteristic(DigitTok.getLocation());
} else if (StrTok.isNot(tok::string_literal)) {
@@ -1444,7 +1574,7 @@ void Preprocessor::HandleDigitDirective(Token &DigitTok) {
} else {
// Parse and validate the string, converting it into a unique ID.
StringLiteralParser Literal(StrTok, *this);
- assert(Literal.isAscii() && "Didn't allow wide strings in");
+ assert(Literal.isOrdinary() && "Didn't allow wide strings in");
if (Literal.hadError) {
DiscardUntilEndOfDirective();
return;
@@ -1458,6 +1588,9 @@ void Preprocessor::HandleDigitDirective(Token &DigitTok) {
// If a filename was present, read any flags that are present.
if (ReadLineMarkerFlags(IsFileEntry, IsFileExit, FileKind, *this))
return;
+ if (!SourceMgr.isWrittenInBuiltinFile(DigitTok.getLocation()) &&
+ !SourceMgr.isWrittenInCommandLineFile(DigitTok.getLocation()))
+ Diag(StrTok, diag::ext_pp_gnu_line_directive);
// Exiting to an empty string means pop to the including file, so leave
// FilenameID as -1 in that case.
@@ -1778,6 +1911,41 @@ bool Preprocessor::checkModuleIsAvailable(const LangOptions &LangOpts,
return true;
}
+std::pair<ConstSearchDirIterator, const FileEntry *>
+Preprocessor::getIncludeNextStart(const Token &IncludeNextTok) const {
+ // #include_next is like #include, except that we start searching after
+ // the current found directory. If we can't do this, issue a
+ // diagnostic.
+ ConstSearchDirIterator Lookup = CurDirLookup;
+ const FileEntry *LookupFromFile = nullptr;
+
+ if (isInPrimaryFile() && LangOpts.IsHeaderFile) {
+ // If the main file is a header, then it's either for PCH/AST generation,
+ // or libclang opened it. Either way, handle it as a normal include below
+ // and do not complain about include_next.
+ } else if (isInPrimaryFile()) {
+ Lookup = nullptr;
+ Diag(IncludeNextTok, diag::pp_include_next_in_primary);
+ } else if (CurLexerSubmodule) {
+ // Start looking up in the directory *after* the one in which the current
+ // file would be found, if any.
+ assert(CurPPLexer && "#include_next directive in macro?");
+ LookupFromFile = CurPPLexer->getFileEntry();
+ Lookup = nullptr;
+ } else if (!Lookup) {
+ // The current file was not found by walking the include path. Either it
+ // is the primary file (handled above), or it was found by absolute path,
+ // or it was found relative to such a file.
+ // FIXME: Track enough information so we know which case we're in.
+ Diag(IncludeNextTok, diag::pp_include_next_absolute_path);
+ } else {
+ // Start looking up in the next directory.
+ ++Lookup;
+ }
+
+ return {Lookup, LookupFromFile};
+}
+
/// HandleIncludeDirective - The "\#include" tokens have just been read, read
/// the file to be included from the lexer, then include it! This is a common
/// routine with functionality shared between \#include, \#include_next and
@@ -1785,7 +1953,7 @@ bool Preprocessor::checkModuleIsAvailable(const LangOptions &LangOpts,
/// specifies the file to start searching from.
void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
Token &IncludeTok,
- const DirectoryLookup *LookupFrom,
+ ConstSearchDirIterator LookupFrom,
const FileEntry *LookupFromFile) {
Token FilenameTok;
if (LexHeaderName(FilenameTok))
@@ -1830,11 +1998,11 @@ void Preprocessor::HandleIncludeDirective(SourceLocation HashLoc,
}
Optional<FileEntryRef> Preprocessor::LookupHeaderIncludeOrImport(
- const DirectoryLookup **CurDir, StringRef& Filename,
+ ConstSearchDirIterator *CurDir, StringRef &Filename,
SourceLocation FilenameLoc, CharSourceRange FilenameRange,
const Token &FilenameTok, bool &IsFrameworkFound, bool IsImportDecl,
- bool &IsMapped, const DirectoryLookup *LookupFrom,
- const FileEntry *LookupFromFile, StringRef& LookupFilename,
+ bool &IsMapped, ConstSearchDirIterator LookupFrom,
+ const FileEntry *LookupFromFile, StringRef &LookupFilename,
SmallVectorImpl<char> &RelativePath, SmallVectorImpl<char> &SearchPath,
ModuleMap::KnownHeader &SuggestedModule, bool isAngled) {
Optional<FileEntryRef> File = LookupFile(
@@ -1845,28 +2013,6 @@ Optional<FileEntryRef> Preprocessor::LookupHeaderIncludeOrImport(
if (File)
return File;
- if (Callbacks) {
- // Give the clients a chance to recover.
- SmallString<128> RecoveryPath;
- if (Callbacks->FileNotFound(Filename, RecoveryPath)) {
- if (auto DE = FileMgr.getOptionalDirectoryRef(RecoveryPath)) {
- // Add the recovery path to the list of search paths.
- DirectoryLookup DL(*DE, SrcMgr::C_User, false);
- HeaderInfo.AddSearchPath(DL, isAngled);
-
- // Try the lookup again, skipping the cache.
- Optional<FileEntryRef> File = LookupFile(
- FilenameLoc,
- LookupFilename, isAngled,
- LookupFrom, LookupFromFile, CurDir, nullptr, nullptr,
- &SuggestedModule, &IsMapped, /*IsFrameworkFound=*/nullptr,
- /*SkipCache*/ true);
- if (File)
- return File;
- }
- }
- }
-
if (SuppressIncludeNotFoundError)
return None;
@@ -1927,7 +2073,7 @@ Optional<FileEntryRef> Preprocessor::LookupHeaderIncludeOrImport(
}
// If the file is still not found, just go with the vanilla diagnostic
- assert(!File.hasValue() && "expected missing file");
+ assert(!File && "expected missing file");
Diag(FilenameTok, diag::err_pp_file_not_found)
<< OriginalFilename << FilenameRange;
if (IsFrameworkFound) {
@@ -1960,7 +2106,7 @@ Optional<FileEntryRef> Preprocessor::LookupHeaderIncludeOrImport(
/// lookup.
Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
SourceLocation HashLoc, Token &IncludeTok, Token &FilenameTok,
- SourceLocation EndLoc, const DirectoryLookup *LookupFrom,
+ SourceLocation EndLoc, ConstSearchDirIterator LookupFrom,
const FileEntry *LookupFromFile) {
SmallString<128> FilenameBuffer;
StringRef Filename = getSpelling(FilenameTok, FilenameBuffer);
@@ -2010,7 +2156,7 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
// Search include directories.
bool IsMapped = false;
bool IsFrameworkFound = false;
- const DirectoryLookup *CurDir;
+ ConstSearchDirIterator CurDir = nullptr;
SmallString<1024> SearchPath;
SmallString<1024> RelativePath;
// We get the raw path only if we have 'Callbacks' to which we later pass
@@ -2035,10 +2181,6 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
IsFrameworkFound, IsImportDecl, IsMapped, LookupFrom, LookupFromFile,
LookupFilename, RelativePath, SearchPath, SuggestedModule, isAngled);
- // Record the header's filename for later use.
- if (File)
- CurLexer->addInclude(OriginalFilename, File->getFileEntry(), FilenameLoc);
-
if (usingPCHWithThroughHeader() && SkippingUntilPCHThroughHeader) {
if (File && isPCHThroughHeader(&File->getFileEntry()))
SkippingUntilPCHThroughHeader = false;
@@ -2179,11 +2321,11 @@ Preprocessor::ImportAction Preprocessor::HandleHeaderIncludeOrImport(
if (Callbacks && !IsImportDecl) {
// Notify the callback object that we've seen an inclusion directive.
// FIXME: Use a different callback for a pp-import?
- Callbacks->InclusionDirective(
- HashLoc, IncludeTok, LookupFilename, isAngled, FilenameRange,
- File ? &File->getFileEntry() : nullptr, SearchPath, RelativePath,
- Action == Import ? SuggestedModule.getModule() : nullptr,
- FileCharacter);
+ Callbacks->InclusionDirective(HashLoc, IncludeTok, LookupFilename, isAngled,
+ FilenameRange, File, SearchPath, RelativePath,
+ Action == Import ? SuggestedModule.getModule()
+ : nullptr,
+ FileCharacter);
if (Action == Skip && File)
Callbacks->FileSkipped(*File, FilenameTok, FileCharacter);
}
@@ -2397,34 +2539,9 @@ void Preprocessor::HandleIncludeNextDirective(SourceLocation HashLoc,
Token &IncludeNextTok) {
Diag(IncludeNextTok, diag::ext_pp_include_next_directive);
- // #include_next is like #include, except that we start searching after
- // the current found directory. If we can't do this, issue a
- // diagnostic.
- const DirectoryLookup *Lookup = CurDirLookup;
- const FileEntry *LookupFromFile = nullptr;
- if (isInPrimaryFile() && LangOpts.IsHeaderFile) {
- // If the main file is a header, then it's either for PCH/AST generation,
- // or libclang opened it. Either way, handle it as a normal include below
- // and do not complain about include_next.
- } else if (isInPrimaryFile()) {
- Lookup = nullptr;
- Diag(IncludeNextTok, diag::pp_include_next_in_primary);
- } else if (CurLexerSubmodule) {
- // Start looking up in the directory *after* the one in which the current
- // file would be found, if any.
- assert(CurPPLexer && "#include_next directive in macro?");
- LookupFromFile = CurPPLexer->getFileEntry();
- Lookup = nullptr;
- } else if (!Lookup) {
- // The current file was not found by walking the include path. Either it
- // is the primary file (handled above), or it was found by absolute path,
- // or it was found relative to such a file.
- // FIXME: Track enough information so we know which case we're in.
- Diag(IncludeNextTok, diag::pp_include_next_absolute_path);
- } else {
- // Start looking up in the next directory.
- ++Lookup;
- }
+ ConstSearchDirIterator Lookup = nullptr;
+ const FileEntry *LookupFromFile;
+ std::tie(Lookup, LookupFromFile) = getIncludeNextStart(IncludeNextTok);
return HandleIncludeDirective(HashLoc, IncludeNextTok, Lookup,
LookupFromFile);
@@ -2711,12 +2828,14 @@ MacroInfo *Preprocessor::ReadOptionalMacroParameterListAndBody(
if (!Tok.is(tok::eod))
LastTok = Tok;
+ SmallVector<Token, 16> Tokens;
+
// Read the rest of the macro body.
if (MI->isObjectLike()) {
// Object-like macros are very simple, just read their body.
while (Tok.isNot(tok::eod)) {
LastTok = Tok;
- MI->AddTokenToBody(Tok);
+ Tokens.push_back(Tok);
// Get the next token of the macro.
LexUnexpandedToken(Tok);
}
@@ -2731,7 +2850,7 @@ MacroInfo *Preprocessor::ReadOptionalMacroParameterListAndBody(
LastTok = Tok;
if (!Tok.isOneOf(tok::hash, tok::hashat, tok::hashhash)) {
- MI->AddTokenToBody(Tok);
+ Tokens.push_back(Tok);
if (VAOCtx.isVAOptToken(Tok)) {
// If we're already within a VAOPT, emit an error.
@@ -2745,7 +2864,7 @@ MacroInfo *Preprocessor::ReadOptionalMacroParameterListAndBody(
Diag(Tok, diag::err_pp_missing_lparen_in_vaopt_use);
return nullptr;
}
- MI->AddTokenToBody(Tok);
+ Tokens.push_back(Tok);
VAOCtx.sawVAOptFollowedByOpeningParens(Tok.getLocation());
LexUnexpandedToken(Tok);
if (Tok.is(tok::hashhash)) {
@@ -2756,10 +2875,10 @@ MacroInfo *Preprocessor::ReadOptionalMacroParameterListAndBody(
} else if (VAOCtx.isInVAOpt()) {
if (Tok.is(tok::r_paren)) {
if (VAOCtx.sawClosingParen()) {
- const unsigned NumTokens = MI->getNumTokens();
- assert(NumTokens >= 3 && "Must have seen at least __VA_OPT__( "
- "and a subsequent tok::r_paren");
- if (MI->getReplacementToken(NumTokens - 2).is(tok::hashhash)) {
+ assert(Tokens.size() >= 3 &&
+ "Must have seen at least __VA_OPT__( "
+ "and a subsequent tok::r_paren");
+ if (Tokens[Tokens.size() - 2].is(tok::hashhash)) {
Diag(Tok, diag::err_vaopt_paste_at_end);
return nullptr;
}
@@ -2778,7 +2897,7 @@ MacroInfo *Preprocessor::ReadOptionalMacroParameterListAndBody(
// things.
if (getLangOpts().TraditionalCPP) {
Tok.setKind(tok::unknown);
- MI->AddTokenToBody(Tok);
+ Tokens.push_back(Tok);
// Get the next token of the macro.
LexUnexpandedToken(Tok);
@@ -2794,17 +2913,16 @@ MacroInfo *Preprocessor::ReadOptionalMacroParameterListAndBody(
LexUnexpandedToken(Tok);
if (Tok.is(tok::eod)) {
- MI->AddTokenToBody(LastTok);
+ Tokens.push_back(LastTok);
break;
}
- unsigned NumTokens = MI->getNumTokens();
- if (NumTokens && Tok.getIdentifierInfo() == Ident__VA_ARGS__ &&
- MI->getReplacementToken(NumTokens-1).is(tok::comma))
+ if (!Tokens.empty() && Tok.getIdentifierInfo() == Ident__VA_ARGS__ &&
+ Tokens[Tokens.size() - 1].is(tok::comma))
MI->setHasCommaPasting();
// Things look ok, add the '##' token to the macro.
- MI->AddTokenToBody(LastTok);
+ Tokens.push_back(LastTok);
continue;
}
@@ -2823,7 +2941,7 @@ MacroInfo *Preprocessor::ReadOptionalMacroParameterListAndBody(
// confused.
if (getLangOpts().AsmPreprocessor && Tok.isNot(tok::eod)) {
LastTok.setKind(tok::unknown);
- MI->AddTokenToBody(LastTok);
+ Tokens.push_back(LastTok);
continue;
} else {
Diag(Tok, diag::err_pp_stringize_not_parameter)
@@ -2833,13 +2951,13 @@ MacroInfo *Preprocessor::ReadOptionalMacroParameterListAndBody(
}
// Things look ok, add the '#' and param name tokens to the macro.
- MI->AddTokenToBody(LastTok);
+ Tokens.push_back(LastTok);
// If the token following '#' is VAOPT, let the next iteration handle it
// and check it for correctness, otherwise add the token and prime the
// loop with the next one.
if (!VAOCtx.isVAOptToken(Tok)) {
- MI->AddTokenToBody(Tok);
+ Tokens.push_back(Tok);
LastTok = Tok;
// Get the next token of the macro.
@@ -2855,6 +2973,8 @@ MacroInfo *Preprocessor::ReadOptionalMacroParameterListAndBody(
}
}
MI->setDefinitionEndLoc(LastTok.getLocation());
+
+ MI->setTokens(Tokens, BP);
return MI;
}
/// HandleDefineDirective - Implements \#define. This consumes the entire macro
@@ -3007,7 +3127,7 @@ void Preprocessor::HandleDefineDirective(
Tok.startToken();
Tok.setKind(tok::kw__Static_assert);
Tok.setIdentifierInfo(getIdentifierInfo("_Static_assert"));
- MI->AddTokenToBody(Tok);
+ MI->setTokens({Tok}, BP);
(void)appendDefMacroDirective(getIdentifierInfo("static_assert"), MI);
}
}
@@ -3268,6 +3388,23 @@ void Preprocessor::HandleElifFamilyDirective(Token &ElifToken,
: PED_Elifndef;
++NumElse;
+ // Warn if using `#elifdef` & `#elifndef` in not C2x & C++2b mode.
+ switch (DirKind) {
+ case PED_Elifdef:
+ case PED_Elifndef:
+ unsigned DiagID;
+ if (LangOpts.CPlusPlus)
+ DiagID = LangOpts.CPlusPlus2b ? diag::warn_cxx2b_compat_pp_directive
+ : diag::ext_cxx2b_pp_directive;
+ else
+ DiagID = LangOpts.C2x ? diag::warn_c2x_compat_pp_directive
+ : diag::ext_c2x_pp_directive;
+ Diag(ElifToken, DiagID) << DirKind;
+ break;
+ default:
+ break;
+ }
+
// #elif directive in a non-skipping conditional... start skipping.
// We don't care what the condition is, because we will always skip it (since
// the block immediately before it was included).
diff --git a/contrib/llvm-project/clang/lib/Lex/PPExpressions.cpp b/contrib/llvm-project/clang/lib/Lex/PPExpressions.cpp
index 424cccfdb9ee..bd35689f18e7 100644
--- a/contrib/llvm-project/clang/lib/Lex/PPExpressions.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/PPExpressions.cpp
@@ -331,6 +331,14 @@ static bool EvaluateValue(PPValue &Result, Token &PeekTok, DefinedTracker &DT,
: diag::ext_cxx2b_size_t_suffix
: diag::err_cxx2b_size_t_suffix);
+ // 'wb/uwb' literals are a C2x feature. We explicitly do not support the
+ // suffix in C++ as an extension because a library-based UDL that resolves
+ // to a library type may be more appropriate there.
+ if (Literal.isBitInt)
+ PP.Diag(PeekTok, PP.getLangOpts().C2x
+ ? diag::warn_c2x_compat_bitint_suffix
+ : diag::ext_c2x_bitint_suffix);
+
// Parse the integer literal into Result.
if (Literal.GetIntegerValue(Result.Val)) {
// Overflow parsing integer literal.
@@ -400,9 +408,18 @@ static bool EvaluateValue(PPValue &Result, Token &PeekTok, DefinedTracker &DT,
// Set the value.
Val = Literal.getValue();
// Set the signedness. UTF-16 and UTF-32 are always unsigned
+ // UTF-8 is unsigned if -fchar8_t is specified.
if (Literal.isWide())
Val.setIsUnsigned(!TargetInfo::isTypeSigned(TI.getWCharType()));
- else if (!Literal.isUTF16() && !Literal.isUTF32())
+ else if (Literal.isUTF16() || Literal.isUTF32())
+ Val.setIsUnsigned(true);
+ else if (Literal.isUTF8()) {
+ if (PP.getLangOpts().CPlusPlus)
+ Val.setIsUnsigned(
+ PP.getLangOpts().Char8 ? true : !PP.getLangOpts().CharIsSigned);
+ else
+ Val.setIsUnsigned(true);
+ } else
Val.setIsUnsigned(!PP.getLangOpts().CharIsSigned);
if (Result.Val.getBitWidth() > Val.getBitWidth()) {
diff --git a/contrib/llvm-project/clang/lib/Lex/PPLexerChange.cpp b/contrib/llvm-project/clang/lib/Lex/PPLexerChange.cpp
index f8b0a2c5f71b..36d3aa59bb2f 100644
--- a/contrib/llvm-project/clang/lib/Lex/PPLexerChange.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/PPLexerChange.cpp
@@ -12,7 +12,6 @@
//===----------------------------------------------------------------------===//
#include "clang/Basic/FileManager.h"
-#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Lex/HeaderSearch.h"
#include "clang/Lex/LexDiagnostic.h"
@@ -66,7 +65,7 @@ PreprocessorLexer *Preprocessor::getCurrentFileLexer() const {
/// EnterSourceFile - Add a source file to the top of the include stack and
/// start lexing tokens from it instead of the current buffer.
-bool Preprocessor::EnterSourceFile(FileID FID, const DirectoryLookup *CurDir,
+bool Preprocessor::EnterSourceFile(FileID FID, ConstSearchDirIterator CurDir,
SourceLocation Loc,
bool IsFirstIncludeOfFile) {
assert(!CurTokenLexer && "Cannot #include a file inside a macro!");
@@ -92,15 +91,27 @@ bool Preprocessor::EnterSourceFile(FileID FID, const DirectoryLookup *CurDir,
CodeCompletionFileLoc.getLocWithOffset(CodeCompletionOffset);
}
- EnterSourceFileWithLexer(
- new Lexer(FID, *InputFile, *this, IsFirstIncludeOfFile), CurDir);
+ Lexer *TheLexer = new Lexer(FID, *InputFile, *this, IsFirstIncludeOfFile);
+ if (getPreprocessorOpts().DependencyDirectivesForFile &&
+ FID != PredefinesFileID) {
+ if (Optional<FileEntryRef> File = SourceMgr.getFileEntryRefForID(FID)) {
+ if (Optional<ArrayRef<dependency_directives_scan::Directive>>
+ DepDirectives =
+ getPreprocessorOpts().DependencyDirectivesForFile(*File)) {
+ TheLexer->DepDirectives = *DepDirectives;
+ }
+ }
+ }
+
+ EnterSourceFileWithLexer(TheLexer, CurDir);
return false;
}
/// EnterSourceFileWithLexer - Add a source file to the top of the include stack
/// and start lexing tokens from it instead of the current buffer.
void Preprocessor::EnterSourceFileWithLexer(Lexer *TheLexer,
- const DirectoryLookup *CurDir) {
+ ConstSearchDirIterator CurDir) {
+ PreprocessorLexer *PrevPPLexer = CurPPLexer;
// Add the current lexer to the include stack.
if (CurPPLexer || CurTokenLexer)
@@ -111,15 +122,26 @@ void Preprocessor::EnterSourceFileWithLexer(Lexer *TheLexer,
CurDirLookup = CurDir;
CurLexerSubmodule = nullptr;
if (CurLexerKind != CLK_LexAfterModuleImport)
- CurLexerKind = CLK_Lexer;
+ CurLexerKind = TheLexer->isDependencyDirectivesLexer()
+ ? CLK_DependencyDirectivesLexer
+ : CLK_Lexer;
// Notify the client, if desired, that we are in a new source file.
if (Callbacks && !CurLexer->Is_PragmaLexer) {
SrcMgr::CharacteristicKind FileType =
SourceMgr.getFileCharacteristic(CurLexer->getFileLoc());
- Callbacks->FileChanged(CurLexer->getFileLoc(),
- PPCallbacks::EnterFile, FileType);
+ FileID PrevFID;
+ SourceLocation EnterLoc;
+ if (PrevPPLexer) {
+ PrevFID = PrevPPLexer->getFileID();
+ EnterLoc = PrevPPLexer->getSourceLocation();
+ }
+ Callbacks->FileChanged(CurLexer->getFileLoc(), PPCallbacks::EnterFile,
+ FileType, PrevFID);
+ Callbacks->LexedFileChanged(CurLexer->getFileID(),
+ PPCallbacks::LexedFileChangeReason::EnterFile,
+ FileType, PrevFID, EnterLoc);
}
}
@@ -303,46 +325,10 @@ void Preprocessor::diagnoseMissingHeaderInUmbrellaDir(const Module &Mod) {
}
}
-void Preprocessor::ResolvePragmaIncludeInstead(
- const SourceLocation Location) const {
- assert(Location.isValid());
- if (CurLexer == nullptr)
- return;
-
- if (SourceMgr.isInSystemHeader(Location))
- return;
-
- for (const auto &Include : CurLexer->getIncludeHistory()) {
- StringRef Filename = Include.getKey();
- const PreprocessorLexer::IncludeInfo &Info = Include.getValue();
- ArrayRef<SmallString<32>> Aliases =
- HeaderInfo.getFileInfo(Info.File).Aliases.getArrayRef();
-
- if (Aliases.empty())
- continue;
-
- switch (Aliases.size()) {
- case 1:
- Diag(Info.Location, diag::err_pragma_include_instead_system_reserved)
- << Filename << 0 << Aliases[0];
- continue;
- case 2:
- Diag(Info.Location, diag::err_pragma_include_instead_system_reserved)
- << Filename << 1 << Aliases[0] << Aliases[1];
- continue;
- default: {
- Diag(Info.Location, diag::err_pragma_include_instead_system_reserved)
- << Filename << 2 << ("{'" + llvm::join(Aliases, "', '") + "'}");
- }
- }
- }
-}
-
/// HandleEndOfFile - This callback is invoked when the lexer hits the end of
/// the current file. This either returns the EOF token or pops a level off
/// the include stack and keeps going.
-bool Preprocessor::HandleEndOfFile(Token &Result, SourceLocation EndLoc,
- bool isEndOfMacro) {
+bool Preprocessor::HandleEndOfFile(Token &Result, bool isEndOfMacro) {
assert(!CurTokenLexer &&
"Ending a file when currently in a macro!");
@@ -412,9 +398,6 @@ bool Preprocessor::HandleEndOfFile(Token &Result, SourceLocation EndLoc,
}
}
- if (EndLoc.isValid())
- ResolvePragmaIncludeInstead(EndLoc);
-
// Complain about reaching a true EOF within arc_cf_code_audited.
// We don't want to complain about reaching the end of a macro
// instantiation or a _Pragma.
@@ -432,8 +415,13 @@ bool Preprocessor::HandleEndOfFile(Token &Result, SourceLocation EndLoc,
// instantiation or a _Pragma.
if (PragmaAssumeNonNullLoc.isValid() &&
!isEndOfMacro && !(CurLexer && CurLexer->Is_PragmaLexer)) {
- Diag(PragmaAssumeNonNullLoc, diag::err_pp_eof_in_assume_nonnull);
-
+ // If we're at the end of generating a preamble, we should record the
+ // unterminated \#pragma clang assume_nonnull so we can restore it later
+ // when the preamble is loaded into the main file.
+ if (isRecordingPreamble() && isInPrimaryFile())
+ PreambleRecordedPragmaAssumeNonNullLoc = PragmaAssumeNonNullLoc;
+ else
+ Diag(PragmaAssumeNonNullLoc, diag::err_pp_eof_in_assume_nonnull);
// Recover by leaving immediately.
PragmaAssumeNonNullLoc = SourceLocation();
}
@@ -508,16 +496,23 @@ bool Preprocessor::HandleEndOfFile(Token &Result, SourceLocation EndLoc,
// Notify the client, if desired, that we are in a new source file.
if (Callbacks && !isEndOfMacro && CurPPLexer) {
+ SourceLocation Loc = CurPPLexer->getSourceLocation();
SrcMgr::CharacteristicKind FileType =
- SourceMgr.getFileCharacteristic(CurPPLexer->getSourceLocation());
- Callbacks->FileChanged(CurPPLexer->getSourceLocation(),
- PPCallbacks::ExitFile, FileType, ExitedFID);
+ SourceMgr.getFileCharacteristic(Loc);
+ Callbacks->FileChanged(Loc, PPCallbacks::ExitFile, FileType, ExitedFID);
+ Callbacks->LexedFileChanged(CurPPLexer->getFileID(),
+ PPCallbacks::LexedFileChangeReason::ExitFile,
+ FileType, ExitedFID, Loc);
}
- // Restore conditional stack from the preamble right after exiting from the
- // predefines file.
- if (ExitedFromPredefinesFile)
+ // Restore conditional stack as well as the recorded
+ // \#pragma clang assume_nonnull from the preamble right after exiting
+ // from the predefines file.
+ if (ExitedFromPredefinesFile) {
replayPreambleConditionalStack();
+ if (PreambleRecordedPragmaAssumeNonNullLoc.isValid())
+ PragmaAssumeNonNullLoc = PreambleRecordedPragmaAssumeNonNullLoc;
+ }
if (!isEndOfMacro && CurPPLexer && FoundPCHThroughHeader &&
(isInPrimaryFile() ||
@@ -603,7 +598,7 @@ bool Preprocessor::HandleEndOfTokenLexer(Token &Result) {
TokenLexerCache[NumCachedTokenLexers++] = std::move(CurTokenLexer);
// Handle this like a #include file being popped off the stack.
- return HandleEndOfFile(Result, {}, true);
+ return HandleEndOfFile(Result, true);
}
/// RemoveTopOfLexerStack - Pop the current lexer/macro exp off the top of the
diff --git a/contrib/llvm-project/clang/lib/Lex/PPMacroExpansion.cpp b/contrib/llvm-project/clang/lib/Lex/PPMacroExpansion.cpp
index f6c95a8b67c6..bf46e5422bc8 100644
--- a/contrib/llvm-project/clang/lib/Lex/PPMacroExpansion.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/PPMacroExpansion.cpp
@@ -11,6 +11,7 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/Basic/AttributeCommonInfo.h"
#include "clang/Basic/Attributes.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/FileManager.h"
@@ -342,6 +343,7 @@ void Preprocessor::RegisterBuiltinMacros() {
Ident__TIME__ = RegisterBuiltinMacro(*this, "__TIME__");
Ident__COUNTER__ = RegisterBuiltinMacro(*this, "__COUNTER__");
Ident_Pragma = RegisterBuiltinMacro(*this, "_Pragma");
+ Ident__FLT_EVAL_METHOD__ = RegisterBuiltinMacro(*this, "__FLT_EVAL_METHOD__");
// C++ Standing Document Extensions.
if (getLangOpts().CPlusPlus)
@@ -1157,9 +1159,9 @@ static bool HasExtension(const Preprocessor &PP, StringRef Extension) {
/// EvaluateHasIncludeCommon - Process a '__has_include("path")'
/// or '__has_include_next("path")' expression.
/// Returns true if successful.
-static bool EvaluateHasIncludeCommon(Token &Tok,
- IdentifierInfo *II, Preprocessor &PP,
- const DirectoryLookup *LookupFrom,
+static bool EvaluateHasIncludeCommon(Token &Tok, IdentifierInfo *II,
+ Preprocessor &PP,
+ ConstSearchDirIterator LookupFrom,
const FileEntry *LookupFromFile) {
// Save the location of the current token. If a '(' is later found, use
// that location. If not, use the end of this location instead.
@@ -1241,48 +1243,19 @@ static bool EvaluateHasIncludeCommon(Token &Tok,
}
// Get the result value. A result of true means the file exists.
- return File.hasValue();
+ return File.has_value();
}
-/// EvaluateHasInclude - Process a '__has_include("path")' expression.
-/// Returns true if successful.
-static bool EvaluateHasInclude(Token &Tok, IdentifierInfo *II,
- Preprocessor &PP) {
- return EvaluateHasIncludeCommon(Tok, II, PP, nullptr, nullptr);
+bool Preprocessor::EvaluateHasInclude(Token &Tok, IdentifierInfo *II) {
+ return EvaluateHasIncludeCommon(Tok, II, *this, nullptr, nullptr);
}
-/// EvaluateHasIncludeNext - Process '__has_include_next("path")' expression.
-/// Returns true if successful.
-static bool EvaluateHasIncludeNext(Token &Tok,
- IdentifierInfo *II, Preprocessor &PP) {
- // __has_include_next is like __has_include, except that we start
- // searching after the current found directory. If we can't do this,
- // issue a diagnostic.
- // FIXME: Factor out duplication with
- // Preprocessor::HandleIncludeNextDirective.
- const DirectoryLookup *Lookup = PP.GetCurDirLookup();
- const FileEntry *LookupFromFile = nullptr;
- if (PP.isInPrimaryFile() && PP.getLangOpts().IsHeaderFile) {
- // If the main file is a header, then it's either for PCH/AST generation,
- // or libclang opened it. Either way, handle it as a normal include below
- // and do not complain about __has_include_next.
- } else if (PP.isInPrimaryFile()) {
- Lookup = nullptr;
- PP.Diag(Tok, diag::pp_include_next_in_primary);
- } else if (PP.getCurrentLexerSubmodule()) {
- // Start looking up in the directory *after* the one in which the current
- // file would be found, if any.
- assert(PP.getCurrentLexer() && "#include_next directive in macro?");
- LookupFromFile = PP.getCurrentLexer()->getFileEntry();
- Lookup = nullptr;
- } else if (!Lookup) {
- PP.Diag(Tok, diag::pp_include_next_absolute_path);
- } else {
- // Start looking up in the next directory.
- ++Lookup;
- }
+bool Preprocessor::EvaluateHasIncludeNext(Token &Tok, IdentifierInfo *II) {
+ ConstSearchDirIterator Lookup = nullptr;
+ const FileEntry *LookupFromFile;
+ std::tie(Lookup, LookupFromFile) = getIncludeNextStart(Tok);
- return EvaluateHasIncludeCommon(Tok, II, PP, Lookup, LookupFromFile);
+ return EvaluateHasIncludeCommon(Tok, II, *this, Lookup, LookupFromFile);
}
/// Process single-argument builtin feature-like macros that return
@@ -1338,7 +1311,7 @@ already_lexed:
case tok::l_paren:
++ParenDepth;
- if (Result.hasValue())
+ if (Result)
break;
if (!SuppressDiagnostic) {
PP.Diag(Tok.getLocation(), diag::err_pp_nested_paren) << II;
@@ -1352,7 +1325,7 @@ already_lexed:
// The last ')' has been reached; return the value if one found or
// a diagnostic and a dummy value.
- if (Result.hasValue()) {
+ if (Result) {
OS << Result.getValue();
// For strict conformance to __has_cpp_attribute rules, use 'L'
// suffix for dated literals.
@@ -1368,7 +1341,7 @@ already_lexed:
default: {
// Parse the macro argument, if one not found so far.
- if (Result.hasValue())
+ if (Result)
break;
bool HasLexedNextToken = false;
@@ -1539,7 +1512,7 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
} else {
FN += PLoc.getFilename();
}
- getLangOpts().remapPathPrefix(FN);
+ processPathForFileMacro(FN, getLangOpts(), getTargetInfo());
Lexer::Stringify(FN);
OS << '"' << FN << '"';
}
@@ -1603,6 +1576,38 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
// Surround the string with " and strip the trailing newline.
OS << '"' << StringRef(Result).drop_back() << '"';
Tok.setKind(tok::string_literal);
+ } else if (II == Ident__FLT_EVAL_METHOD__) {
+ // __FLT_EVAL_METHOD__ is set to the default value.
+ if (getTUFPEvalMethod() ==
+ LangOptions::FPEvalMethodKind::FEM_Indeterminable) {
+ // This is possible if `AllowFPReassoc` or `AllowReciprocal` is enabled.
+ // These modes can be triggered via the command line option `-ffast-math`
+ // or via a `pragam float_control`.
+ // __FLT_EVAL_METHOD__ expands to -1.
+ // The `minus` operator is the next token we read from the stream.
+ auto Toks = std::make_unique<Token[]>(1);
+ OS << "-";
+ Tok.setKind(tok::minus);
+ // Push the token `1` to the stream.
+ Token NumberToken;
+ NumberToken.startToken();
+ NumberToken.setKind(tok::numeric_constant);
+ NumberToken.setLiteralData("1");
+ NumberToken.setLength(1);
+ Toks[0] = NumberToken;
+ EnterTokenStream(std::move(Toks), 1, /*DisableMacroExpansion*/ false,
+ /*IsReinject*/ false);
+ } else {
+ OS << getTUFPEvalMethod();
+ // __FLT_EVAL_METHOD__ expands to a simple numeric value.
+ Tok.setKind(tok::numeric_constant);
+ if (getLastFPEvalPragmaLocation().isValid()) {
+ // The program is ill-formed. The value of __FLT_EVAL_METHOD__ is
+ // altered by the pragma.
+ Diag(Tok, diag::err_illegal_use_of_flt_eval_macro);
+ Diag(getLastFPEvalPragmaLocation(), diag::note_pragma_entered_here);
+ }
+ }
} else if (II == Ident__COUNTER__) {
// __COUNTER__ expands to a simple numeric value.
OS << CounterValue++;
@@ -1636,7 +1641,9 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
// usual allocation and deallocation functions. Required by libc++
return 201802;
default:
- return true;
+ return Builtin::evaluateRequiredTargetFeatures(
+ getBuiltinInfo().getRequiredFeatures(II->getBuiltinID()),
+ getTargetInfo().getTargetOpts().FeatureMap);
}
return true;
} else if (II->getTokenID() != tok::identifier ||
@@ -1683,8 +1690,9 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
[this](Token &Tok, bool &HasLexedNextToken) -> int {
IdentifierInfo *II = ExpectFeatureIdentifierInfo(Tok, *this,
diag::err_feature_check_malformed);
- return II ? hasAttribute(AttrSyntax::GNU, nullptr, II,
- getTargetInfo(), getLangOpts()) : 0;
+ return II ? hasAttribute(AttributeCommonInfo::Syntax::AS_GNU, nullptr,
+ II, getTargetInfo(), getLangOpts())
+ : 0;
});
} else if (II == Ident__has_declspec) {
EvaluateFeatureLikeBuiltinMacro(OS, Tok, II, *this, true,
@@ -1694,8 +1702,8 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
if (II) {
const LangOptions &LangOpts = getLangOpts();
return LangOpts.DeclSpecKeyword &&
- hasAttribute(AttrSyntax::Declspec, nullptr, II,
- getTargetInfo(), LangOpts);
+ hasAttribute(AttributeCommonInfo::Syntax::AS_Declspec, nullptr,
+ II, getTargetInfo(), LangOpts);
}
return false;
@@ -1724,7 +1732,9 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
diag::err_feature_check_malformed);
}
- AttrSyntax Syntax = IsCXX ? AttrSyntax::CXX : AttrSyntax::C;
+ AttributeCommonInfo::Syntax Syntax =
+ IsCXX ? AttributeCommonInfo::Syntax::AS_CXX11
+ : AttributeCommonInfo::Syntax::AS_C2x;
return II ? hasAttribute(Syntax, ScopeII, II, getTargetInfo(),
getLangOpts())
: 0;
@@ -1736,9 +1746,9 @@ void Preprocessor::ExpandBuiltinMacro(Token &Tok) {
// double-quotes ("").
bool Value;
if (II == Ident__has_include)
- Value = EvaluateHasInclude(Tok, II, *this);
+ Value = EvaluateHasInclude(Tok, II);
else
- Value = EvaluateHasIncludeNext(Tok, II, *this);
+ Value = EvaluateHasIncludeNext(Tok, II);
if (Tok.isNot(tok::r_paren))
return;
@@ -1882,3 +1892,16 @@ void Preprocessor::markMacroAsUsed(MacroInfo *MI) {
WarnUnusedMacroLocs.erase(MI->getDefinitionLoc());
MI->setIsUsed(true);
}
+
+void Preprocessor::processPathForFileMacro(SmallVectorImpl<char> &Path,
+ const LangOptions &LangOpts,
+ const TargetInfo &TI) {
+ LangOpts.remapPathPrefix(Path);
+ if (LangOpts.UseTargetPathSeparator) {
+ if (TI.getTriple().isOSWindows())
+ llvm::sys::path::remove_dots(Path, false,
+ llvm::sys::path::Style::windows_backslash);
+ else
+ llvm::sys::path::remove_dots(Path, false, llvm::sys::path::Style::posix);
+ }
+}
diff --git a/contrib/llvm-project/clang/lib/Lex/Pragma.cpp b/contrib/llvm-project/clang/lib/Lex/Pragma.cpp
index eb7e7cbc4714..fb4f2dc45758 100644
--- a/contrib/llvm-project/clang/lib/Lex/Pragma.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/Pragma.cpp
@@ -14,7 +14,6 @@
#include "clang/Lex/Pragma.h"
#include "clang/Basic/CLWarnings.h"
#include "clang/Basic/Diagnostic.h"
-#include "clang/Basic/DiagnosticLex.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
@@ -37,12 +36,10 @@
#include "clang/Lex/TokenLexer.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
-#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallString.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
-#include "llvm/ADT/StringSwitch.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Timer.h"
@@ -263,7 +260,12 @@ void Preprocessor::Handle_Pragma(Token &Tok) {
}
SourceLocation RParenLoc = Tok.getLocation();
- std::string StrVal = getSpelling(StrTok);
+ bool Invalid = false;
+ std::string StrVal = getSpelling(StrTok, &Invalid);
+ if (Invalid) {
+ Diag(PragmaLoc, diag::err__Pragma_malformed);
+ return;
+ }
// The _Pragma is lexically sound. Destringize according to C11 6.10.9.1:
// "The string literal is destringized by deleting any encoding prefix,
@@ -498,87 +500,42 @@ void Preprocessor::HandlePragmaSystemHeader(Token &SysHeaderTok) {
SrcMgr::C_System);
}
-static llvm::Optional<Token> LexHeader(Preprocessor &PP,
- Optional<FileEntryRef> &File,
- bool SuppressIncludeNotFoundError) {
+/// HandlePragmaDependency - Handle \#pragma GCC dependency "foo" blah.
+void Preprocessor::HandlePragmaDependency(Token &DependencyTok) {
Token FilenameTok;
- if (PP.LexHeaderName(FilenameTok, /*AllowConcatenation*/ false))
- return llvm::None;
+ if (LexHeaderName(FilenameTok, /*AllowConcatenation*/false))
+ return;
// If the next token wasn't a header-name, diagnose the error.
if (FilenameTok.isNot(tok::header_name)) {
- PP.Diag(FilenameTok.getLocation(), diag::err_pp_expects_filename);
- return llvm::None;
+ Diag(FilenameTok.getLocation(), diag::err_pp_expects_filename);
+ return;
}
// Reserve a buffer to get the spelling.
SmallString<128> FilenameBuffer;
bool Invalid = false;
- StringRef Filename = PP.getSpelling(FilenameTok, FilenameBuffer, &Invalid);
+ StringRef Filename = getSpelling(FilenameTok, FilenameBuffer, &Invalid);
if (Invalid)
- return llvm::None;
+ return;
bool isAngled =
- PP.GetIncludeFilenameSpelling(FilenameTok.getLocation(), Filename);
+ GetIncludeFilenameSpelling(FilenameTok.getLocation(), Filename);
// If GetIncludeFilenameSpelling set the start ptr to null, there was an
// error.
if (Filename.empty())
- return llvm::None;
+ return;
// Search include directories for this file.
- File = PP.LookupFile(FilenameTok.getLocation(), Filename, isAngled, nullptr,
- nullptr, nullptr, nullptr, nullptr, nullptr, nullptr,
- nullptr);
+ Optional<FileEntryRef> File =
+ LookupFile(FilenameTok.getLocation(), Filename, isAngled, nullptr,
+ nullptr, nullptr, nullptr, nullptr, nullptr, nullptr, nullptr);
if (!File) {
if (!SuppressIncludeNotFoundError)
- PP.Diag(FilenameTok, diag::err_pp_file_not_found) << Filename;
- return llvm::None;
- }
-
- return FilenameTok;
-}
-
-/// HandlePragmaIncludeInstead - Handle \#pragma clang include_instead(header).
-void Preprocessor::HandlePragmaIncludeInstead(Token &Tok) {
- // Get the current file lexer we're looking at. Ignore _Pragma 'files' etc.
- PreprocessorLexer *TheLexer = getCurrentFileLexer();
-
- if (!SourceMgr.isInSystemHeader(Tok.getLocation())) {
- Diag(Tok, diag::err_pragma_include_instead_not_sysheader);
- return;
- }
-
- Lex(Tok);
- if (Tok.isNot(tok::l_paren)) {
- Diag(Tok, diag::err_expected) << "(";
+ Diag(FilenameTok, diag::err_pp_file_not_found) << Filename;
return;
}
- Optional<FileEntryRef> File;
- llvm::Optional<Token> FilenameTok =
- LexHeader(*this, File, SuppressIncludeNotFoundError);
- if (!FilenameTok)
- return;
-
- Lex(Tok);
- if (Tok.isNot(tok::r_paren)) {
- Diag(Tok, diag::err_expected) << ")";
- return;
- }
-
- SmallString<128> FilenameBuffer;
- StringRef Filename = getSpelling(*FilenameTok, FilenameBuffer);
- HeaderInfo.AddFileAlias(TheLexer->getFileEntry(), Filename);
-}
-
-/// HandlePragmaDependency - Handle \#pragma GCC dependency "foo" blah.
-void Preprocessor::HandlePragmaDependency(Token &DependencyTok) {
- Optional<FileEntryRef> File;
- llvm::Optional<Token> FilenameTok =
- LexHeader(*this, File, SuppressIncludeNotFoundError);
- if (!FilenameTok)
- return;
-
const FileEntry *CurFile = getCurrentFileLexer()->getFileEntry();
// If this file is older than the file it depends on, emit a diagnostic.
@@ -594,7 +551,7 @@ void Preprocessor::HandlePragmaDependency(Token &DependencyTok) {
// Remove the trailing ' ' if present.
if (!Message.empty())
Message.erase(Message.end()-1);
- Diag(*FilenameTok, diag::pp_out_of_date_dependency) << Message;
+ Diag(FilenameTok, diag::pp_out_of_date_dependency) << Message;
}
}
@@ -1069,18 +1026,6 @@ struct PragmaSystemHeaderHandler : public PragmaHandler {
}
};
-/// PragmaIncludeInsteadHandler - "\#pragma clang include_instead(header)" marks
-/// the current file as non-includable if the including header is not a system
-/// header.
-struct PragmaIncludeInsteadHandler : public PragmaHandler {
- PragmaIncludeInsteadHandler() : PragmaHandler("include_instead") {}
-
- void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
- Token &IIToken) override {
- PP.HandlePragmaIncludeInstead(IIToken);
- }
-};
-
struct PragmaDependencyHandler : public PragmaHandler {
PragmaDependencyHandler() : PragmaHandler("dependency") {}
@@ -1999,8 +1944,6 @@ struct PragmaRegionHandler : public PragmaHandler {
static IdentifierInfo *HandleMacroAnnotationPragma(Preprocessor &PP, Token &Tok,
const char *Pragma,
std::string &MessageString) {
- std::string Macro;
-
PP.Lex(Tok);
if (Tok.isNot(tok::l_paren)) {
PP.Diag(Tok, diag::err_expected) << "(";
@@ -2089,8 +2032,6 @@ struct PragmaFinalHandler : public PragmaHandler {
void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
Token &Tok) override {
- std::string Macro;
-
PP.Lex(Tok);
if (Tok.isNot(tok::l_paren)) {
PP.Diag(Tok, diag::err_expected) << "(";
@@ -2142,7 +2083,6 @@ void Preprocessor::RegisterBuiltinPragmas() {
// #pragma clang ...
AddPragmaHandler("clang", new PragmaPoisonHandler());
AddPragmaHandler("clang", new PragmaSystemHeaderHandler());
- AddPragmaHandler("clang", new PragmaIncludeInsteadHandler());
AddPragmaHandler("clang", new PragmaDebugHandler());
AddPragmaHandler("clang", new PragmaDependencyHandler());
AddPragmaHandler("clang", new PragmaDiagnosticHandler("clang"));
diff --git a/contrib/llvm-project/clang/lib/Lex/PreprocessingRecord.cpp b/contrib/llvm-project/clang/lib/Lex/PreprocessingRecord.cpp
index ed59dbdf018d..673ef637e396 100644
--- a/contrib/llvm-project/clang/lib/Lex/PreprocessingRecord.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/PreprocessingRecord.cpp
@@ -42,7 +42,8 @@ ExternalPreprocessingRecordSource::~ExternalPreprocessingRecordSource() =
InclusionDirective::InclusionDirective(PreprocessingRecord &PPRec,
InclusionKind Kind, StringRef FileName,
bool InQuotes, bool ImportedModule,
- const FileEntry *File, SourceRange Range)
+ Optional<FileEntryRef> File,
+ SourceRange Range)
: PreprocessingDirective(InclusionDirectiveKind, Range), InQuotes(InQuotes),
Kind(Kind), ImportedModule(ImportedModule), File(File) {
char *Memory = (char *)PPRec.Allocate(FileName.size() + 1, alignof(char));
@@ -113,7 +114,7 @@ bool PreprocessingRecord::isEntityInFileID(iterator PPEI, FileID FID) {
// deserializing it.
Optional<bool> IsInFile =
ExternalSource->isPreprocessedEntityInFileID(LoadedIndex, FID);
- if (IsInFile.hasValue())
+ if (IsInFile)
return IsInFile.getValue();
// The external source did not provide a definite answer, go and deserialize
@@ -480,7 +481,7 @@ void PreprocessingRecord::InclusionDirective(
StringRef FileName,
bool IsAngled,
CharSourceRange FilenameRange,
- const FileEntry *File,
+ Optional<FileEntryRef> File,
StringRef SearchPath,
StringRef RelativePath,
const Module *Imported,
diff --git a/contrib/llvm-project/clang/lib/Lex/Preprocessor.cpp b/contrib/llvm-project/clang/lib/Lex/Preprocessor.cpp
index 3c338a2b8123..281f01fb28a4 100644
--- a/contrib/llvm-project/clang/lib/Lex/Preprocessor.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/Preprocessor.cpp
@@ -158,11 +158,6 @@ Preprocessor::Preprocessor(std::shared_ptr<PreprocessorOptions> PPOpts,
if (this->PPOpts->GeneratePreamble)
PreambleConditionalStack.startRecording();
- ExcludedConditionalDirectiveSkipMappings =
- this->PPOpts->ExcludedConditionalDirectiveSkipMappings;
- if (ExcludedConditionalDirectiveSkipMappings)
- ExcludedConditionalDirectiveSkipMappings->clear();
-
MaxTokens = LangOpts.MaxTokens;
}
@@ -208,6 +203,21 @@ void Preprocessor::Initialize(const TargetInfo &Target,
// Populate the identifier table with info about keywords for the current language.
Identifiers.AddKeywords(LangOpts);
+
+ // Initialize the __FTL_EVAL_METHOD__ macro to the TargetInfo.
+ setTUFPEvalMethod(getTargetInfo().getFPEvalMethod());
+
+ if (getLangOpts().getFPEvalMethod() == LangOptions::FEM_UnsetOnCommandLine)
+ // Use setting from TargetInfo.
+ setCurrentFPEvalMethod(SourceLocation(), Target.getFPEvalMethod());
+ else
+ // Set initial value of __FLT_EVAL_METHOD__ from the command line.
+ setCurrentFPEvalMethod(SourceLocation(), getLangOpts().getFPEvalMethod());
+ // When `-ffast-math` option is enabled, it triggers several driver math
+ // options to be enabled. Among those, only one the following two modes
+ // affect the eval-method: reciprocal or reassociate.
+ if (getLangOpts().AllowFPReassoc || getLangOpts().AllowRecip)
+ setCurrentFPEvalMethod(SourceLocation(), LangOptions::FEM_Indeterminable);
}
void Preprocessor::InitializeForModelFile() {
@@ -229,8 +239,10 @@ void Preprocessor::FinalizeForModelFile() {
}
void Preprocessor::DumpToken(const Token &Tok, bool DumpFlags) const {
- llvm::errs() << tok::getTokenName(Tok.getKind()) << " '"
- << getSpelling(Tok) << "'";
+ llvm::errs() << tok::getTokenName(Tok.getKind());
+
+ if (!Tok.isAnnotation())
+ llvm::errs() << " '" << getSpelling(Tok) << "'";
if (!DumpFlags) return;
@@ -377,7 +389,9 @@ StringRef Preprocessor::getLastMacroWithSpelling(
void Preprocessor::recomputeCurLexerKind() {
if (CurLexer)
- CurLexerKind = CLK_Lexer;
+ CurLexerKind = CurLexer->isDependencyDirectivesLexer()
+ ? CLK_DependencyDirectivesLexer
+ : CLK_Lexer;
else if (CurTokenLexer)
CurLexerKind = CLK_TokenLexer;
else
@@ -640,6 +654,9 @@ void Preprocessor::SkipTokensWhileUsingPCH() {
case CLK_CachingLexer:
CachingLex(Tok);
break;
+ case CLK_DependencyDirectivesLexer:
+ CurLexer->LexDependencyDirectiveToken(Tok);
+ break;
case CLK_LexAfterModuleImport:
LexAfterModuleImport(Tok);
break;
@@ -901,6 +918,9 @@ void Preprocessor::Lex(Token &Result) {
CachingLex(Result);
ReturnedToken = true;
break;
+ case CLK_DependencyDirectivesLexer:
+ ReturnedToken = CurLexer->LexDependencyDirectiveToken(Result);
+ break;
case CLK_LexAfterModuleImport:
ReturnedToken = LexAfterModuleImport(Result);
break;
@@ -1345,7 +1365,7 @@ bool Preprocessor::FinishLexStringLiteral(Token &Result, std::string &String,
// Concatenate and parse the strings.
StringLiteralParser Literal(StrToks, *this);
- assert(Literal.isAscii() && "Didn't allow wide strings in");
+ assert(Literal.isOrdinary() && "Didn't allow wide strings in");
if (Literal.hadError)
return false;
diff --git a/contrib/llvm-project/clang/lib/Lex/PreprocessorLexer.cpp b/contrib/llvm-project/clang/lib/Lex/PreprocessorLexer.cpp
index 5f6f4a13419b..23c80d375214 100644
--- a/contrib/llvm-project/clang/lib/Lex/PreprocessorLexer.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/PreprocessorLexer.cpp
@@ -47,6 +47,7 @@ void PreprocessorLexer::LexIncludeFilename(Token &FilenameTok) {
/// getFileEntry - Return the FileEntry corresponding to this FileID. Like
/// getFileID(), this only works for lexers with attached preprocessors.
-const FileEntry *PreprocessorLexer::getFileEntry() const {
- return PP->getSourceManager().getFileEntryForID(getFileID());
+OptionalFileEntryRefDegradesToFileEntryPtr
+PreprocessorLexer::getFileEntry() const {
+ return PP->getSourceManager().getFileEntryRefForID(getFileID());
}
diff --git a/contrib/llvm-project/clang/lib/Lex/TokenLexer.cpp b/contrib/llvm-project/clang/lib/Lex/TokenLexer.cpp
index e71a65f031e4..efda6d0046fa 100644
--- a/contrib/llvm-project/clang/lib/Lex/TokenLexer.cpp
+++ b/contrib/llvm-project/clang/lib/Lex/TokenLexer.cpp
@@ -295,7 +295,7 @@ void TokenLexer::ExpandFunctionArguments() {
// the closing r_paren of the __VA_OPT__.
if (!Tokens[I].is(tok::r_paren) || !VCtx.sawClosingParen()) {
// Lazily expand __VA_ARGS__ when we see the first __VA_OPT__.
- if (!CalledWithVariadicArguments.hasValue()) {
+ if (!CalledWithVariadicArguments) {
CalledWithVariadicArguments =
ActualArgs->invokedWithVariadicArgument(Macro, PP);
}
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseAST.cpp b/contrib/llvm-project/clang/lib/Parse/ParseAST.cpp
index 01510e8caf3b..04b3f0460bf3 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseAST.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseAST.cpp
@@ -154,8 +154,12 @@ void clang::ParseAST(Sema &S, bool PrintStats, bool SkipFunctionBodies) {
llvm::TimeTraceScope TimeScope("Frontend");
P.Initialize();
Parser::DeclGroupPtrTy ADecl;
- for (bool AtEOF = P.ParseFirstTopLevelDecl(ADecl); !AtEOF;
- AtEOF = P.ParseTopLevelDecl(ADecl)) {
+ Sema::ModuleImportState ImportState;
+ EnterExpressionEvaluationContext PotentiallyEvaluated(
+ S, Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
+
+ for (bool AtEOF = P.ParseFirstTopLevelDecl(ADecl, ImportState); !AtEOF;
+ AtEOF = P.ParseTopLevelDecl(ADecl, ImportState)) {
// If we got a null return and something *was* parsed, ignore it. This
// is due to a top-level semicolon, an action override, or a parse error
// skipping something.
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseCXXInlineMethods.cpp b/contrib/llvm-project/clang/lib/Parse/ParseCXXInlineMethods.cpp
index 7330c2b7593d..906315a078b1 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseCXXInlineMethods.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseCXXInlineMethods.cpp
@@ -22,9 +22,9 @@ using namespace clang;
/// Declarator is a well formed C++ inline method definition. Now lex its body
/// and store its tokens for parsing after the C++ class is complete.
NamedDecl *Parser::ParseCXXInlineMethodDef(
- AccessSpecifier AS, ParsedAttributes &AccessAttrs, ParsingDeclarator &D,
- const ParsedTemplateInfo &TemplateInfo, const VirtSpecifiers &VS,
- SourceLocation PureSpecLoc) {
+ AccessSpecifier AS, const ParsedAttributesView &AccessAttrs,
+ ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo,
+ const VirtSpecifiers &VS, SourceLocation PureSpecLoc) {
assert(D.isFunctionDeclarator() && "This isn't a function declarator!");
assert(Tok.isOneOf(tok::l_brace, tok::colon, tok::kw_try, tok::equal) &&
"Current token not a '{', ':', '=', or 'try'!");
@@ -720,7 +720,6 @@ void Parser::ParseLexedAttribute(LateParsedAttribute &LA,
ConsumeAnyToken(/*ConsumeCodeCompletionTok=*/true);
ParsedAttributes Attrs(AttrFactory);
- SourceLocation endLoc;
if (LA.Decls.size() > 0) {
Decl *D = LA.Decls[0];
@@ -743,7 +742,7 @@ void Parser::ParseLexedAttribute(LateParsedAttribute &LA,
Actions.ActOnReenterFunctionContext(Actions.CurScope, D);
}
- ParseGNUAttributeArgs(&LA.AttrName, LA.AttrNameLoc, Attrs, &endLoc,
+ ParseGNUAttributeArgs(&LA.AttrName, LA.AttrNameLoc, Attrs, nullptr,
nullptr, SourceLocation(), ParsedAttr::AS_GNU,
nullptr);
@@ -752,7 +751,7 @@ void Parser::ParseLexedAttribute(LateParsedAttribute &LA,
} else {
// If there are multiple decls, then the decl cannot be within the
// function scope.
- ParseGNUAttributeArgs(&LA.AttrName, LA.AttrNameLoc, Attrs, &endLoc,
+ ParseGNUAttributeArgs(&LA.AttrName, LA.AttrNameLoc, Attrs, nullptr,
nullptr, SourceLocation(), ParsedAttr::AS_GNU,
nullptr);
}
@@ -796,7 +795,7 @@ void Parser::ParseLexedPragma(LateParsedPragma &LP) {
case tok::annot_attr_openmp:
case tok::annot_pragma_openmp: {
AccessSpecifier AS = LP.getAccessSpecifier();
- ParsedAttributesWithRange Attrs(AttrFactory);
+ ParsedAttributes Attrs(AttrFactory);
(void)ParseOpenMPDeclarativeDirectiveWithExtDecl(AS, Attrs);
break;
}
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp b/contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp
index f21938c81689..2f21b7b2fef0 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseDecl.cpp
@@ -10,16 +10,17 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Parse/Parser.h"
-#include "clang/Parse/RAIIObjectsForParser.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/PrettyDeclStackTrace.h"
#include "clang/Basic/AddressSpaces.h"
+#include "clang/Basic/AttributeCommonInfo.h"
#include "clang/Basic/Attributes.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Parse/Parser.h"
+#include "clang/Parse/RAIIObjectsForParser.h"
#include "clang/Sema/Lookup.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
@@ -40,10 +41,8 @@ using namespace clang;
/// specifier-qualifier-list abstract-declarator[opt]
///
/// Called type-id in C++.
-TypeResult Parser::ParseTypeName(SourceRange *Range,
- DeclaratorContext Context,
- AccessSpecifier AS,
- Decl **OwnedType,
+TypeResult Parser::ParseTypeName(SourceRange *Range, DeclaratorContext Context,
+ AccessSpecifier AS, Decl **OwnedType,
ParsedAttributes *Attrs) {
DeclSpecContext DSC = getDeclSpecContextFromDeclaratorContext(Context);
if (DSC == DeclSpecContext::DSC_normal)
@@ -58,7 +57,7 @@ TypeResult Parser::ParseTypeName(SourceRange *Range,
*OwnedType = DS.isTypeSpecOwned() ? DS.getRepAsDecl() : nullptr;
// Parse the abstract-declarator, if present.
- Declarator DeclaratorInfo(DS, Context);
+ Declarator DeclaratorInfo(DS, ParsedAttributesView::none(), Context);
ParseDeclarator(DeclaratorInfo);
if (Range)
*Range = DeclaratorInfo.getSourceRange();
@@ -103,9 +102,7 @@ static bool FindLocsWithCommonFileID(Preprocessor &PP, SourceLocation StartLoc,
return AttrStartIsInMacro && AttrEndIsInMacro;
}
-void Parser::ParseAttributes(unsigned WhichAttrKinds,
- ParsedAttributesWithRange &Attrs,
- SourceLocation *End,
+void Parser::ParseAttributes(unsigned WhichAttrKinds, ParsedAttributes &Attrs,
LateParsedAttrList *LateAttrs) {
bool MoreToParse;
do {
@@ -113,11 +110,11 @@ void Parser::ParseAttributes(unsigned WhichAttrKinds,
// parsed, loop to ensure all specified attribute combinations are parsed.
MoreToParse = false;
if (WhichAttrKinds & PAKM_CXX11)
- MoreToParse |= MaybeParseCXX11Attributes(Attrs, End);
+ MoreToParse |= MaybeParseCXX11Attributes(Attrs);
if (WhichAttrKinds & PAKM_GNU)
- MoreToParse |= MaybeParseGNUAttributes(Attrs, End, LateAttrs);
+ MoreToParse |= MaybeParseGNUAttributes(Attrs, LateAttrs);
if (WhichAttrKinds & PAKM_Declspec)
- MoreToParse |= MaybeParseMicrosoftDeclSpecs(Attrs, End);
+ MoreToParse |= MaybeParseMicrosoftDeclSpecs(Attrs);
} while (MoreToParse);
}
@@ -162,15 +159,12 @@ void Parser::ParseAttributes(unsigned WhichAttrKinds,
/// ',' or ')' are ignored, otherwise they produce a parse error.
///
/// We follow the C++ model, but don't allow junk after the identifier.
-void Parser::ParseGNUAttributes(ParsedAttributesWithRange &Attrs,
- SourceLocation *EndLoc,
+void Parser::ParseGNUAttributes(ParsedAttributes &Attrs,
LateParsedAttrList *LateAttrs, Declarator *D) {
assert(Tok.is(tok::kw___attribute) && "Not a GNU attribute list!");
- SourceLocation StartLoc = Tok.getLocation(), Loc;
-
- if (!EndLoc)
- EndLoc = &Loc;
+ SourceLocation StartLoc = Tok.getLocation();
+ SourceLocation EndLoc = StartLoc;
while (Tok.is(tok::kw___attribute)) {
SourceLocation AttrTokLoc = ConsumeToken();
@@ -214,7 +208,7 @@ void Parser::ParseGNUAttributes(ParsedAttributesWithRange &Attrs,
// Handle "parameterized" attributes
if (!LateAttrs || !isAttributeLateParsed(*AttrName)) {
- ParseGNUAttributeArgs(AttrName, AttrNameLoc, Attrs, EndLoc, nullptr,
+ ParseGNUAttributeArgs(AttrName, AttrNameLoc, Attrs, &EndLoc, nullptr,
SourceLocation(), ParsedAttr::AS_GNU, D);
continue;
}
@@ -247,8 +241,7 @@ void Parser::ParseGNUAttributes(ParsedAttributesWithRange &Attrs,
SourceLocation Loc = Tok.getLocation();
if (ExpectAndConsume(tok::r_paren))
SkipUntil(tok::r_paren, StopAtSemi);
- if (EndLoc)
- *EndLoc = Loc;
+ EndLoc = Loc;
// If this was declared in a macro, attach the macro IdentifierInfo to the
// parsed attribute.
@@ -270,7 +263,7 @@ void Parser::ParseGNUAttributes(ParsedAttributesWithRange &Attrs,
}
}
- Attrs.Range = SourceRange(StartLoc, *EndLoc);
+ Attrs.Range = SourceRange(StartLoc, EndLoc);
}
/// Determine whether the given attribute has an identifier argument.
@@ -300,6 +293,15 @@ static bool attributeTreatsKeywordThisAsIdentifier(const IdentifierInfo &II) {
#undef CLANG_ATTR_THIS_ISA_IDENTIFIER_ARG_LIST
}
+/// Determine if an attribute accepts parameter packs.
+static bool attributeAcceptsExprPack(const IdentifierInfo &II) {
+#define CLANG_ATTR_ACCEPTS_EXPR_PACK
+ return llvm::StringSwitch<bool>(normalizeAttrName(II.getName()))
+#include "clang/Parse/AttrParserStringSwitches.inc"
+ .Default(false);
+#undef CLANG_ATTR_ACCEPTS_EXPR_PACK
+}
+
/// Determine whether the given attribute parses a type argument.
static bool attributeIsTypeArgAttr(const IdentifierInfo &II) {
#define CLANG_ATTR_TYPE_ARG_LIST
@@ -331,7 +333,6 @@ IdentifierLoc *Parser::ParseIdentifierLoc() {
void Parser::ParseAttributeWithTypeArg(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
- SourceLocation *EndLoc,
IdentifierInfo *ScopeName,
SourceLocation ScopeLoc,
ParsedAttr::Syntax Syntax) {
@@ -366,6 +367,8 @@ unsigned Parser::ParseAttributeArgsCommon(
bool ChangeKWThisToIdent = attributeTreatsKeywordThisAsIdentifier(*AttrName);
bool AttributeIsTypeArgAttr = attributeIsTypeArgAttr(*AttrName);
+ bool AttributeHasVariadicIdentifierArg =
+ attributeHasVariadicIdentifierArg(*AttrName);
// Interpret "kw_this" as an identifier if the attributed requests it.
if (ChangeKWThisToIdent && Tok.is(tok::kw_this))
@@ -374,8 +377,8 @@ unsigned Parser::ParseAttributeArgsCommon(
ArgsVector ArgExprs;
if (Tok.is(tok::identifier)) {
// If this attribute wants an 'identifier' argument, make it so.
- bool IsIdentifierArg = attributeHasIdentifierArg(*AttrName) ||
- attributeHasVariadicIdentifierArg(*AttrName);
+ bool IsIdentifierArg = AttributeHasVariadicIdentifierArg ||
+ attributeHasIdentifierArg(*AttrName);
ParsedAttr::Kind AttrKind =
ParsedAttr::getParsedKind(AttrName, ScopeName, Syntax);
@@ -397,42 +400,81 @@ unsigned Parser::ParseAttributeArgsCommon(
if (!ArgExprs.empty())
ConsumeToken();
- // Parse the non-empty comma-separated list of expressions.
- do {
- // Interpret "kw_this" as an identifier if the attributed requests it.
- if (ChangeKWThisToIdent && Tok.is(tok::kw_this))
- Tok.setKind(tok::identifier);
+ if (AttributeIsTypeArgAttr) {
+ // FIXME: Multiple type arguments are not implemented.
+ TypeResult T = ParseTypeName();
+ if (T.isInvalid()) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return 0;
+ }
+ if (T.isUsable())
+ TheParsedType = T.get();
+ } else if (AttributeHasVariadicIdentifierArg) {
+ // Parse variadic identifier arg. This can either consume identifiers or
+ // expressions. Variadic identifier args do not support parameter packs
+ // because those are typically used for attributes with enumeration
+ // arguments, and those enumerations are not something the user could
+ // express via a pack.
+ do {
+ // Interpret "kw_this" as an identifier if the attributed requests it.
+ if (ChangeKWThisToIdent && Tok.is(tok::kw_this))
+ Tok.setKind(tok::identifier);
+
+ ExprResult ArgExpr;
+ if (Tok.is(tok::identifier)) {
+ ArgExprs.push_back(ParseIdentifierLoc());
+ } else {
+ bool Uneval = attributeParsedArgsUnevaluated(*AttrName);
+ EnterExpressionEvaluationContext Unevaluated(
+ Actions,
+ Uneval ? Sema::ExpressionEvaluationContext::Unevaluated
+ : Sema::ExpressionEvaluationContext::ConstantEvaluated);
- ExprResult ArgExpr;
- if (AttributeIsTypeArgAttr) {
- TypeResult T = ParseTypeName();
- if (T.isInvalid()) {
- SkipUntil(tok::r_paren, StopAtSemi);
- return 0;
+ ExprResult ArgExpr(
+ Actions.CorrectDelayedTyposInExpr(ParseAssignmentExpression()));
+
+ if (ArgExpr.isInvalid()) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return 0;
+ }
+ ArgExprs.push_back(ArgExpr.get());
}
- if (T.isUsable())
- TheParsedType = T.get();
- break; // FIXME: Multiple type arguments are not implemented.
- } else if (Tok.is(tok::identifier) &&
- attributeHasVariadicIdentifierArg(*AttrName)) {
- ArgExprs.push_back(ParseIdentifierLoc());
- } else {
- bool Uneval = attributeParsedArgsUnevaluated(*AttrName);
- EnterExpressionEvaluationContext Unevaluated(
- Actions,
- Uneval ? Sema::ExpressionEvaluationContext::Unevaluated
- : Sema::ExpressionEvaluationContext::ConstantEvaluated);
-
- ExprResult ArgExpr(
- Actions.CorrectDelayedTyposInExpr(ParseAssignmentExpression()));
- if (ArgExpr.isInvalid()) {
+ // Eat the comma, move to the next argument
+ } while (TryConsumeToken(tok::comma));
+ } else {
+ // General case. Parse all available expressions.
+ bool Uneval = attributeParsedArgsUnevaluated(*AttrName);
+ EnterExpressionEvaluationContext Unevaluated(
+ Actions, Uneval
+ ? Sema::ExpressionEvaluationContext::Unevaluated
+ : Sema::ExpressionEvaluationContext::ConstantEvaluated);
+
+ CommaLocsTy CommaLocs;
+ ExprVector ParsedExprs;
+ if (ParseExpressionList(ParsedExprs, CommaLocs,
+ llvm::function_ref<void()>(),
+ /*FailImmediatelyOnInvalidExpr=*/true,
+ /*EarlyTypoCorrection=*/true)) {
+ SkipUntil(tok::r_paren, StopAtSemi);
+ return 0;
+ }
+
+ // Pack expansion must currently be explicitly supported by an attribute.
+ for (size_t I = 0; I < ParsedExprs.size(); ++I) {
+ if (!isa<PackExpansionExpr>(ParsedExprs[I]))
+ continue;
+
+ if (!attributeAcceptsExprPack(*AttrName)) {
+ Diag(Tok.getLocation(),
+ diag::err_attribute_argument_parm_pack_not_supported)
+ << AttrName;
SkipUntil(tok::r_paren, StopAtSemi);
return 0;
}
- ArgExprs.push_back(ArgExpr.get());
}
- // Eat the comma, move to the next argument
- } while (TryConsumeToken(tok::comma));
+
+ ArgExprs.insert(ArgExprs.end(), ParsedExprs.begin(), ParsedExprs.end());
+ }
}
SourceLocation RParen = Tok.getLocation();
@@ -456,14 +498,10 @@ unsigned Parser::ParseAttributeArgsCommon(
/// Parse the arguments to a parameterized GNU attribute or
/// a C++11 attribute in "gnu" namespace.
-void Parser::ParseGNUAttributeArgs(IdentifierInfo *AttrName,
- SourceLocation AttrNameLoc,
- ParsedAttributes &Attrs,
- SourceLocation *EndLoc,
- IdentifierInfo *ScopeName,
- SourceLocation ScopeLoc,
- ParsedAttr::Syntax Syntax,
- Declarator *D) {
+void Parser::ParseGNUAttributeArgs(
+ IdentifierInfo *AttrName, SourceLocation AttrNameLoc,
+ ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName,
+ SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax, Declarator *D) {
assert(Tok.is(tok::l_paren) && "Attribute arg list not starting with '('");
@@ -491,7 +529,7 @@ void Parser::ParseGNUAttributeArgs(IdentifierInfo *AttrName,
ScopeName, ScopeLoc, Syntax);
return;
} else if (attributeIsTypeArgAttr(*AttrName)) {
- ParseAttributeWithTypeArg(*AttrName, AttrNameLoc, Attrs, EndLoc, ScopeName,
+ ParseAttributeWithTypeArg(*AttrName, AttrNameLoc, Attrs, ScopeName,
ScopeLoc, Syntax);
return;
}
@@ -555,9 +593,11 @@ unsigned Parser::ParseClangAttributeArgs(
bool Parser::ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs) {
+ unsigned ExistingAttrs = Attrs.size();
+
// If the attribute isn't known, we will not attempt to parse any
// arguments.
- if (!hasAttribute(AttrSyntax::Declspec, nullptr, AttrName,
+ if (!hasAttribute(AttributeCommonInfo::Syntax::AS_Declspec, nullptr, AttrName,
getTargetInfo(), getLangOpts())) {
// Eat the left paren, then skip to the ending right paren.
ConsumeParen();
@@ -687,7 +727,7 @@ bool Parser::ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
// If this attribute's args were parsed, and it was expected to have
// arguments but none were provided, emit a diagnostic.
- if (!Attrs.empty() && Attrs.begin()->getMaxArgs() && !NumArgs) {
+ if (ExistingAttrs < Attrs.size() && Attrs.back().getMaxArgs() && !NumArgs) {
Diag(OpenParenLoc, diag::err_attribute_requires_arguments) << AttrName;
return false;
}
@@ -700,11 +740,13 @@ bool Parser::ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName,
/// [MS] extended-decl-modifier-seq:
/// extended-decl-modifier[opt]
/// extended-decl-modifier extended-decl-modifier-seq
-void Parser::ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
- SourceLocation *End) {
+void Parser::ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs) {
assert(getLangOpts().DeclSpecKeyword && "__declspec keyword is not enabled");
assert(Tok.is(tok::kw___declspec) && "Not a declspec!");
+ SourceLocation StartLoc = Tok.getLocation();
+ SourceLocation EndLoc = StartLoc;
+
while (Tok.is(tok::kw___declspec)) {
ConsumeToken();
BalancedDelimiterTracker T(*this, tok::l_paren);
@@ -767,9 +809,10 @@ void Parser::ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs,
ParsedAttr::AS_Declspec);
}
T.consumeClose();
- if (End)
- *End = T.getCloseLocation();
+ EndLoc = T.getCloseLocation();
}
+
+ Attrs.Range = SourceRange(StartLoc, EndLoc);
}
void Parser::ParseMicrosoftTypeAttributes(ParsedAttributes &attrs) {
@@ -855,6 +898,15 @@ void Parser::ParseOpenCLKernelAttributes(ParsedAttributes &attrs) {
}
}
+void Parser::ParseCUDAFunctionAttributes(ParsedAttributes &attrs) {
+ while (Tok.is(tok::kw___noinline__)) {
+ IdentifierInfo *AttrName = Tok.getIdentifierInfo();
+ SourceLocation AttrNameLoc = ConsumeToken();
+ attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
+ ParsedAttr::AS_Keyword);
+ }
+}
+
void Parser::ParseOpenCLQualifiers(ParsedAttributes &Attrs) {
IdentifierInfo *AttrName = Tok.getIdentifierInfo();
SourceLocation AttrNameLoc = Tok.getLocation();
@@ -1154,7 +1206,7 @@ void Parser::ParseAvailabilityAttribute(IdentifierInfo &Availability,
// Also reject wide string literals.
if (StringLiteral *MessageStringLiteral =
cast_or_null<StringLiteral>(MessageExpr.get())) {
- if (!MessageStringLiteral->isAscii()) {
+ if (!MessageStringLiteral->isOrdinary()) {
Diag(MessageStringLiteral->getSourceRange().getBegin(),
diag::err_expected_string_literal)
<< /*Source='availability attribute'*/ 2;
@@ -1379,13 +1431,10 @@ void Parser::ParseExternalSourceSymbolAttribute(
/// opt-instance_method:
/// Identifier | <empty>
///
-void Parser::ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
- SourceLocation ObjCBridgeRelatedLoc,
- ParsedAttributes &attrs,
- SourceLocation *endLoc,
- IdentifierInfo *ScopeName,
- SourceLocation ScopeLoc,
- ParsedAttr::Syntax Syntax) {
+void Parser::ParseObjCBridgeRelatedAttribute(
+ IdentifierInfo &ObjCBridgeRelated, SourceLocation ObjCBridgeRelatedLoc,
+ ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName,
+ SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax) {
// Opening '('.
BalancedDelimiterTracker T(*this, tok::l_paren);
if (T.consumeOpen()) {
@@ -1441,20 +1490,16 @@ void Parser::ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated,
if (T.consumeClose())
return;
- if (endLoc)
- *endLoc = T.getCloseLocation();
+ if (EndLoc)
+ *EndLoc = T.getCloseLocation();
// Record this attribute
- attrs.addNew(&ObjCBridgeRelated,
+ Attrs.addNew(&ObjCBridgeRelated,
SourceRange(ObjCBridgeRelatedLoc, T.getCloseLocation()),
- ScopeName, ScopeLoc,
- RelatedClass,
- ClassMethod,
- InstanceMethod,
+ ScopeName, ScopeLoc, RelatedClass, ClassMethod, InstanceMethod,
Syntax);
}
-
void Parser::ParseSwiftNewTypeAttribute(
IdentifierInfo &AttrName, SourceLocation AttrNameLoc,
ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName,
@@ -1496,7 +1541,6 @@ void Parser::ParseSwiftNewTypeAttribute(
ScopeName, ScopeLoc, Args, llvm::array_lengthof(Args), Syntax);
}
-
void Parser::ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName,
SourceLocation AttrNameLoc,
ParsedAttributes &Attrs,
@@ -1597,7 +1641,7 @@ bool Parser::DiagnoseProhibitedCXX11Attribute() {
/// attribute-specifier in a location where an attribute is not permitted, but
/// we know where the attributes ought to be written. Parse them anyway, and
/// provide a fixit moving them to the right place.
-void Parser::DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs,
+void Parser::DiagnoseMisplacedCXX11Attribute(ParsedAttributes &Attrs,
SourceLocation CorrectLocation) {
assert((Tok.is(tok::l_square) && NextToken().is(tok::l_square)) ||
Tok.is(tok::kw_alignas));
@@ -1623,8 +1667,9 @@ void Parser::DiagnoseProhibitedAttributes(
Diag(Range.getBegin(), diag::err_attributes_not_allowed) << Range;
}
-void Parser::ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs,
- unsigned DiagID, bool DiagnoseEmptyAttrs) {
+void Parser::ProhibitCXX11Attributes(ParsedAttributes &Attrs, unsigned DiagID,
+ bool DiagnoseEmptyAttrs,
+ bool WarnOnUnknownAttrs) {
if (DiagnoseEmptyAttrs && Attrs.empty() && Attrs.Range.isValid()) {
// An attribute list has been parsed, but it was empty.
@@ -1651,17 +1696,18 @@ void Parser::ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs,
for (const ParsedAttr &AL : Attrs) {
if (!AL.isCXX11Attribute() && !AL.isC2xAttribute())
continue;
- if (AL.getKind() == ParsedAttr::UnknownAttribute)
- Diag(AL.getLoc(), diag::warn_unknown_attribute_ignored)
- << AL << AL.getRange();
- else {
+ if (AL.getKind() == ParsedAttr::UnknownAttribute) {
+ if (WarnOnUnknownAttrs)
+ Diag(AL.getLoc(), diag::warn_unknown_attribute_ignored)
+ << AL << AL.getRange();
+ } else {
Diag(AL.getLoc(), DiagID) << AL;
AL.setInvalid();
}
}
}
-void Parser::DiagnoseCXX11AttributeExtension(ParsedAttributesWithRange &Attrs) {
+void Parser::DiagnoseCXX11AttributeExtension(ParsedAttributes &Attrs) {
for (const ParsedAttr &PA : Attrs) {
if (PA.isCXX11Attribute() || PA.isC2xAttribute())
Diag(PA.getLoc(), diag::ext_cxx11_attr_placement) << PA << PA.getRange();
@@ -1675,7 +1721,7 @@ void Parser::DiagnoseCXX11AttributeExtension(ParsedAttributesWithRange &Attrs) {
// Also, Microsoft-style [attributes] seem to affect the type instead of the
// variable.
// This function moves attributes that should apply to the type off DS to Attrs.
-void Parser::stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs,
+void Parser::stripTypeAttributesOffDeclSpec(ParsedAttributes &Attrs,
DeclSpec &DS,
Sema::TagUseKind TUK) {
if (TUK == Sema::TUK_Reference)
@@ -1712,10 +1758,11 @@ void Parser::stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs,
/// [C++11/C11] static_assert-declaration
/// others... [FIXME]
///
-Parser::DeclGroupPtrTy
-Parser::ParseDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd,
- ParsedAttributesWithRange &attrs,
- SourceLocation *DeclSpecStart) {
+Parser::DeclGroupPtrTy Parser::ParseDeclaration(DeclaratorContext Context,
+ SourceLocation &DeclEnd,
+ ParsedAttributes &DeclAttrs,
+ ParsedAttributes &DeclSpecAttrs,
+ SourceLocation *DeclSpecStart) {
ParenBraceBracketBalancer BalancerRAIIObj(*this);
// Must temporarily exit the objective-c container scope for
// parsing c none objective-c decls.
@@ -1725,32 +1772,40 @@ Parser::ParseDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd,
switch (Tok.getKind()) {
case tok::kw_template:
case tok::kw_export:
- ProhibitAttributes(attrs);
- SingleDecl = ParseDeclarationStartingWithTemplate(Context, DeclEnd, attrs);
+ ProhibitAttributes(DeclAttrs);
+ ProhibitAttributes(DeclSpecAttrs);
+ SingleDecl =
+ ParseDeclarationStartingWithTemplate(Context, DeclEnd, DeclAttrs);
break;
case tok::kw_inline:
// Could be the start of an inline namespace. Allowed as an ext in C++03.
if (getLangOpts().CPlusPlus && NextToken().is(tok::kw_namespace)) {
- ProhibitAttributes(attrs);
+ ProhibitAttributes(DeclAttrs);
+ ProhibitAttributes(DeclSpecAttrs);
SourceLocation InlineLoc = ConsumeToken();
return ParseNamespace(Context, DeclEnd, InlineLoc);
}
- return ParseSimpleDeclaration(Context, DeclEnd, attrs, true, nullptr,
- DeclSpecStart);
+ return ParseSimpleDeclaration(Context, DeclEnd, DeclAttrs, DeclSpecAttrs,
+ true, nullptr, DeclSpecStart);
case tok::kw_namespace:
- ProhibitAttributes(attrs);
+ ProhibitAttributes(DeclAttrs);
+ ProhibitAttributes(DeclSpecAttrs);
return ParseNamespace(Context, DeclEnd);
- case tok::kw_using:
+ case tok::kw_using: {
+ ParsedAttributes Attrs(AttrFactory);
+ takeAndConcatenateAttrs(DeclAttrs, DeclSpecAttrs, Attrs);
return ParseUsingDirectiveOrDeclaration(Context, ParsedTemplateInfo(),
- DeclEnd, attrs);
+ DeclEnd, Attrs);
+ }
case tok::kw_static_assert:
case tok::kw__Static_assert:
- ProhibitAttributes(attrs);
+ ProhibitAttributes(DeclAttrs);
+ ProhibitAttributes(DeclSpecAttrs);
SingleDecl = ParseStaticAssertDeclaration(DeclEnd);
break;
default:
- return ParseSimpleDeclaration(Context, DeclEnd, attrs, true, nullptr,
- DeclSpecStart);
+ return ParseSimpleDeclaration(Context, DeclEnd, DeclAttrs, DeclSpecAttrs,
+ true, nullptr, DeclSpecStart);
}
// This routine returns a DeclGroup, if the thing we parsed only contains a
@@ -1781,10 +1836,16 @@ Parser::ParseDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd,
/// DeclSpecStart if DeclSpecStart is non-null.
Parser::DeclGroupPtrTy Parser::ParseSimpleDeclaration(
DeclaratorContext Context, SourceLocation &DeclEnd,
- ParsedAttributesWithRange &Attrs, bool RequireSemi, ForRangeInit *FRI,
- SourceLocation *DeclSpecStart) {
+ ParsedAttributes &DeclAttrs, ParsedAttributes &DeclSpecAttrs,
+ bool RequireSemi, ForRangeInit *FRI, SourceLocation *DeclSpecStart) {
+ // Need to retain these for diagnostics before we add them to the DeclSepc.
+ ParsedAttributesView OriginalDeclSpecAttrs;
+ OriginalDeclSpecAttrs.addAll(DeclSpecAttrs.begin(), DeclSpecAttrs.end());
+ OriginalDeclSpecAttrs.Range = DeclSpecAttrs.Range;
+
// Parse the common declaration-specifiers piece.
ParsingDeclSpec DS(*this);
+ DS.takeAttributesFrom(DeclSpecAttrs);
DeclSpecContext DSContext = getDeclSpecContextFromDeclaratorContext(Context);
ParseDeclarationSpecifiers(DS, ParsedTemplateInfo(), AS_none, DSContext);
@@ -1798,12 +1859,12 @@ Parser::DeclGroupPtrTy Parser::ParseSimpleDeclaration(
// C99 6.7.2.3p6: Handle "struct-or-union identifier;", "enum { X };"
// declaration-specifiers init-declarator-list[opt] ';'
if (Tok.is(tok::semi)) {
- ProhibitAttributes(Attrs);
+ ProhibitAttributes(DeclAttrs);
DeclEnd = Tok.getLocation();
if (RequireSemi) ConsumeToken();
RecordDecl *AnonRecord = nullptr;
- Decl *TheDecl = Actions.ParsedFreeStandingDeclSpec(getCurScope(), AS_none,
- DS, AnonRecord);
+ Decl *TheDecl = Actions.ParsedFreeStandingDeclSpec(
+ getCurScope(), AS_none, DS, ParsedAttributesView::none(), AnonRecord);
DS.complete(TheDecl);
if (AnonRecord) {
Decl* decls[] = {AnonRecord, TheDecl};
@@ -1815,8 +1876,7 @@ Parser::DeclGroupPtrTy Parser::ParseSimpleDeclaration(
if (DeclSpecStart)
DS.SetRangeStart(*DeclSpecStart);
- DS.takeAttributesFrom(Attrs);
- return ParseDeclGroup(DS, Context, &DeclEnd, FRI);
+ return ParseDeclGroup(DS, Context, DeclAttrs, &DeclEnd, FRI);
}
/// Returns true if this might be the start of a declarator, or a common typo
@@ -1971,10 +2031,16 @@ void Parser::SkipMalformedDecl() {
/// result.
Parser::DeclGroupPtrTy Parser::ParseDeclGroup(ParsingDeclSpec &DS,
DeclaratorContext Context,
+ ParsedAttributes &Attrs,
SourceLocation *DeclEnd,
ForRangeInit *FRI) {
// Parse the first declarator.
- ParsingDeclarator D(*this, DS, Context);
+ // Consume all of the attributes from `Attrs` by moving them to our own local
+ // list. This ensures that we will not attempt to interpret them as statement
+ // attributes higher up the callchain.
+ ParsedAttributes LocalAttrs(AttrFactory);
+ LocalAttrs.takeAllFrom(Attrs);
+ ParsingDeclarator D(*this, DS, LocalAttrs, Context);
ParseDeclarator(D);
// Bail out if the first declarator didn't seem well-formed.
@@ -2580,7 +2646,7 @@ static bool isValidAfterIdentifierInDeclarator(const Token &T) {
bool Parser::ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
const ParsedTemplateInfo &TemplateInfo,
AccessSpecifier AS, DeclSpecContext DSC,
- ParsedAttributesWithRange &Attrs) {
+ ParsedAttributes &Attrs) {
assert(Tok.is(tok::identifier) && "should have identifier");
SourceLocation Loc = Tok.getLocation();
@@ -2602,8 +2668,8 @@ bool Parser::ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS,
// error, do lookahead to try to do better recovery. This never applies
// within a type specifier. Outside of C++, we allow this even if the
// language doesn't "officially" support implicit int -- we support
- // implicit int as an extension in C99 and C11.
- if (!isTypeSpecifier(DSC) && !getLangOpts().CPlusPlus &&
+ // implicit int as an extension in some language modes.
+ if (!isTypeSpecifier(DSC) && getLangOpts().isImplicitIntAllowed() &&
isValidAfterIdentifierInDeclarator(NextToken())) {
// If this token is valid for implicit int, e.g. "static x = 4", then
// we just avoid eating the identifier, so it will be parsed as the
@@ -2825,6 +2891,8 @@ Parser::getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context) {
if (Context == DeclaratorContext::AliasDecl ||
Context == DeclaratorContext::AliasTemplate)
return DeclSpecContext::DSC_alias_declaration;
+ if (Context == DeclaratorContext::Association)
+ return DeclSpecContext::DSC_association;
return DeclSpecContext::DSC_normal;
}
@@ -3077,7 +3145,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
bool EnteringContext = (DSContext == DeclSpecContext::DSC_class ||
DSContext == DeclSpecContext::DSC_top_level);
bool AttrsLastTime = false;
- ParsedAttributesWithRange attrs(AttrFactory);
+ ParsedAttributes attrs(AttrFactory);
// We use Sema's policy to get bool macros right.
PrintingPolicy Policy = Actions.getPrintingPolicy();
while (true) {
@@ -3129,10 +3197,30 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
if (!AttrsLastTime)
ProhibitAttributes(attrs);
else {
- // Reject C++11 attributes that appertain to decl specifiers as
- // we don't support any C++11 attributes that appertain to decl
- // specifiers. This also conforms to what g++ 4.8 is doing.
- ProhibitCXX11Attributes(attrs, diag::err_attribute_not_type_attr);
+ // Reject C++11 / C2x attributes that aren't type attributes.
+ for (const ParsedAttr &PA : attrs) {
+ if (!PA.isCXX11Attribute() && !PA.isC2xAttribute())
+ continue;
+ if (PA.getKind() == ParsedAttr::UnknownAttribute)
+ // We will warn about the unknown attribute elsewhere (in
+ // SemaDeclAttr.cpp)
+ continue;
+ // GCC ignores this attribute when placed on the DeclSpec in [[]]
+ // syntax, so we do the same.
+ if (PA.getKind() == ParsedAttr::AT_VectorSize) {
+ Diag(PA.getLoc(), diag::warn_attribute_ignored) << PA;
+ PA.setInvalid();
+ continue;
+ }
+ // We reject AT_LifetimeBound and AT_AnyX86NoCfCheck, even though they
+ // are type attributes, because we historically haven't allowed these
+ // to be used as type attributes in C++11 / C2x syntax.
+ if (PA.isTypeAttr() && PA.getKind() != ParsedAttr::AT_LifetimeBound &&
+ PA.getKind() != ParsedAttr::AT_AnyX86NoCfCheck)
+ continue;
+ Diag(PA.getLoc(), diag::err_attribute_not_type_attr) << PA;
+ PA.setInvalid();
+ }
DS.takeAttributesFrom(attrs);
}
@@ -3321,7 +3409,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
continue;
// Eat the scope spec so the identifier is current.
ConsumeAnnotationToken();
- ParsedAttributesWithRange Attrs(AttrFactory);
+ ParsedAttributes Attrs(AttrFactory);
if (ParseImplicitInt(DS, &SS, TemplateInfo, AS, DSContext, Attrs)) {
if (!Attrs.empty()) {
AttrsLastTime = true;
@@ -3482,7 +3570,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
goto DoneWithDeclSpec;
if (Tok.isNot(tok::identifier))
continue;
- ParsedAttributesWithRange Attrs(AttrFactory);
+ ParsedAttributes Attrs(AttrFactory);
if (ParseImplicitInt(DS, nullptr, TemplateInfo, AS, DSContext, Attrs)) {
if (!Attrs.empty()) {
AttrsLastTime = true;
@@ -3613,8 +3701,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// Attributes support.
case tok::kw___attribute:
case tok::kw___declspec:
- ParseAttributes(PAKM_GNU | PAKM_Declspec, DS.getAttributes(), nullptr,
- LateAttrs);
+ ParseAttributes(PAKM_GNU | PAKM_Declspec, DS.getAttributes(), LateAttrs);
continue;
// Microsoft single token adornments.
@@ -3656,6 +3743,11 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
ParseOpenCLKernelAttributes(DS.getAttributes());
continue;
+ // CUDA/HIP single token adornments.
+ case tok::kw___noinline__:
+ ParseCUDAFunctionAttributes(DS.getAttributes());
+ continue;
+
// Nullability type specifiers.
case tok::kw__Nonnull:
case tok::kw__Nullable:
@@ -4046,7 +4138,7 @@ void Parser::ParseDeclarationSpecifiers(DeclSpec &DS,
// These are attributes following class specifiers.
// To produce better diagnostic, we parse them when
// parsing class specifier.
- ParsedAttributesWithRange Attributes(AttrFactory);
+ ParsedAttributes Attributes(AttrFactory);
ParseClassSpecifier(Kind, Loc, DS, TemplateInfo, AS,
EnteringContext, DSContext, Attributes);
@@ -4251,9 +4343,8 @@ void Parser::ParseStructDeclaration(
}
// Parse leading attributes.
- ParsedAttributesWithRange Attrs(AttrFactory);
+ ParsedAttributes Attrs(AttrFactory);
MaybeParseCXX11Attributes(Attrs);
- DS.takeAttributesFrom(Attrs);
// Parse the common specifier-qualifiers-list piece.
ParseSpecifierQualifierList(DS);
@@ -4261,9 +4352,14 @@ void Parser::ParseStructDeclaration(
// If there are no declarators, this is a free-standing declaration
// specifier. Let the actions module cope with it.
if (Tok.is(tok::semi)) {
+ // C2x 6.7.2.1p9 : "The optional attribute specifier sequence in a
+ // member declaration appertains to each of the members declared by the
+ // member declarator list; it shall not appear if the optional member
+ // declarator list is omitted."
+ ProhibitAttributes(Attrs);
RecordDecl *AnonRecord = nullptr;
- Decl *TheDecl = Actions.ParsedFreeStandingDeclSpec(getCurScope(), AS_none,
- DS, AnonRecord);
+ Decl *TheDecl = Actions.ParsedFreeStandingDeclSpec(
+ getCurScope(), AS_none, DS, ParsedAttributesView::none(), AnonRecord);
assert(!AnonRecord && "Did not expect anonymous struct or union here");
DS.complete(TheDecl);
return;
@@ -4273,7 +4369,7 @@ void Parser::ParseStructDeclaration(
bool FirstDeclarator = true;
SourceLocation CommaLoc;
while (true) {
- ParsingFieldDeclarator DeclaratorInfo(*this, DS);
+ ParsingFieldDeclarator DeclaratorInfo(*this, DS, Attrs);
DeclaratorInfo.D.setCommaLoc(CommaLoc);
// Attributes are only allowed here on successive declarators.
@@ -4371,7 +4467,7 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
if (Tok.isOneOf(tok::annot_pragma_openmp, tok::annot_attr_openmp)) {
// Result can be ignored, because it must be always empty.
AccessSpecifier AS = AS_none;
- ParsedAttributesWithRange Attrs(AttrFactory);
+ ParsedAttributes Attrs(AttrFactory);
(void)ParseOpenMPDeclarativeDirectiveWithExtDecl(AS, Attrs);
continue;
}
@@ -4439,8 +4535,7 @@ void Parser::ParseStructUnionBody(SourceLocation RecordLoc,
// If attributes exist after struct contents, parse them.
MaybeParseGNUAttributes(attrs);
- SmallVector<Decl *, 32> FieldDecls(TagDecl->field_begin(),
- TagDecl->field_end());
+ SmallVector<Decl *, 32> FieldDecls(TagDecl->fields());
Actions.ActOnFields(getCurScope(), RecordLoc, TagDecl, FieldDecls,
T.getOpenLocation(), T.getCloseLocation(), attrs);
@@ -4490,14 +4585,14 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
}
// If attributes exist after tag, parse them.
- ParsedAttributesWithRange attrs(AttrFactory);
+ ParsedAttributes attrs(AttrFactory);
MaybeParseAttributes(PAKM_GNU | PAKM_Declspec | PAKM_CXX11, attrs);
SourceLocation ScopedEnumKWLoc;
bool IsScopedUsingClassTag = false;
// In C++11, recognize 'enum class' and 'enum struct'.
- if (Tok.isOneOf(tok::kw_class, tok::kw_struct)) {
+ if (Tok.isOneOf(tok::kw_class, tok::kw_struct) && getLangOpts().CPlusPlus) {
Diag(Tok, getLangOpts().CPlusPlus11 ? diag::warn_cxx98_compat_scoped_enum
: diag::ext_scoped_enum);
IsScopedUsingClassTag = Tok.is(tok::kw_class);
@@ -4524,7 +4619,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
// Determine whether this declaration is permitted to have an enum-base.
AllowDefiningTypeSpec AllowEnumSpecifier =
- isDefiningTypeSpecifierContext(DSC);
+ isDefiningTypeSpecifierContext(DSC, getLangOpts().CPlusPlus);
bool CanBeOpaqueEnumDeclaration =
DS.isEmpty() && isOpaqueEnumDeclarationContext(DSC);
bool CanHaveEnumBase = (getLangOpts().CPlusPlus11 || getLangOpts().ObjC ||
@@ -4590,8 +4685,8 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
TypeResult BaseType;
SourceRange BaseRange;
- bool CanBeBitfield = (getCurScope()->getFlags() & Scope::ClassScope) &&
- ScopedEnumKWLoc.isInvalid() && Name;
+ bool CanBeBitfield =
+ getCurScope()->isClassScope() && ScopedEnumKWLoc.isInvalid() && Name;
// Parse the fixed underlying type.
if (Tok.is(tok::colon)) {
@@ -4633,7 +4728,8 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
// declares 'enum E : int; E *p;' not 'enum E : int*; E p;'.
DeclSpec DS(AttrFactory);
ParseSpecifierQualifierList(DS, AS, DeclSpecContext::DSC_type_specifier);
- Declarator DeclaratorInfo(DS, DeclaratorContext::TypeName);
+ Declarator DeclaratorInfo(DS, ParsedAttributesView::none(),
+ DeclaratorContext::TypeName);
BaseType = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
BaseRange = SourceRange(ColonLoc, DeclaratorInfo.getSourceRange().getEnd());
@@ -4838,7 +4934,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
Decl *D = SkipBody.CheckSameAsPrevious ? SkipBody.New : TagDecl;
ParseEnumBody(StartLoc, D);
if (SkipBody.CheckSameAsPrevious &&
- !Actions.ActOnDuplicateDefinition(DS, TagDecl, SkipBody)) {
+ !Actions.ActOnDuplicateDefinition(TagDecl, SkipBody)) {
DS.SetTypeSpecError();
return;
}
@@ -4893,7 +4989,7 @@ void Parser::ParseEnumBody(SourceLocation StartLoc, Decl *EnumDecl) {
SourceLocation IdentLoc = ConsumeToken();
// If attributes exist after the enumerator, parse them.
- ParsedAttributesWithRange attrs(AttrFactory);
+ ParsedAttributes attrs(AttrFactory);
MaybeParseGNUAttributes(attrs);
if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) {
if (getLangOpts().CPlusPlus)
@@ -4987,7 +5083,7 @@ void Parser::ParseEnumBody(SourceLocation StartLoc, Decl *EnumDecl) {
// The next token must be valid after an enum definition. If not, a ';'
// was probably forgotten.
- bool CanBeBitfield = getCurScope()->getFlags() & Scope::ClassScope;
+ bool CanBeBitfield = getCurScope()->isClassScope();
if (!isValidAfterTypeSpecifier(CanBeBitfield)) {
ExpectAndConsume(tok::semi, diag::err_expected_after, "enum");
// Push this token back into the preprocessor and change our current token
@@ -5574,9 +5670,9 @@ void Parser::ParseTypeQualifierListOpt(
Optional<llvm::function_ref<void()>> CodeCompletionHandler) {
if (standardAttributesAllowed() && (AttrReqs & AR_CXX11AttributesParsed) &&
isCXX11AttributeSpecifier()) {
- ParsedAttributesWithRange attrs(AttrFactory);
- ParseCXX11Attributes(attrs);
- DS.takeAttributesFrom(attrs);
+ ParsedAttributes Attrs(AttrFactory);
+ ParseCXX11Attributes(Attrs);
+ DS.takeAttributesFrom(Attrs);
}
SourceLocation EndLoc;
@@ -5717,11 +5813,12 @@ void Parser::ParseTypeQualifierListOpt(
}
/// ParseDeclarator - Parse and verify a newly-initialized declarator.
-///
void Parser::ParseDeclarator(Declarator &D) {
/// This implements the 'declarator' production in the C grammar, then checks
/// for well-formedness and issues diagnostics.
- ParseDeclaratorInternal(D, &Parser::ParseDirectDeclarator);
+ Actions.runWithSufficientStackSpace(D.getBeginLoc(), [&] {
+ ParseDeclaratorInternal(D, &Parser::ParseDirectDeclarator);
+ });
}
static bool isPtrOperatorToken(tok::TokenKind Kind, const LangOptions &Lang,
@@ -5753,7 +5850,7 @@ static bool isPtrOperatorToken(tok::TokenKind Kind, const LangOptions &Lang,
}
// Indicates whether the given declarator is a pipe declarator.
-static bool isPipeDeclerator(const Declarator &D) {
+static bool isPipeDeclarator(const Declarator &D) {
const unsigned NumTypes = D.getNumTypeObjects();
for (unsigned Idx = 0; Idx != NumTypes; ++Idx)
@@ -5832,7 +5929,9 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
D.ExtendWithDeclSpec(DS);
// Recurse to parse whatever is left.
- ParseDeclaratorInternal(D, DirectDeclParser);
+ Actions.runWithSufficientStackSpace(D.getBeginLoc(), [&] {
+ ParseDeclaratorInternal(D, DirectDeclParser);
+ });
// Sema will have to catch (syntactically invalid) pointers into global
// scope. It has to catch pointers into namespace scope anyway.
@@ -5846,7 +5945,7 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
tok::TokenKind Kind = Tok.getKind();
- if (D.getDeclSpec().isTypeSpecPipe() && !isPipeDeclerator(D)) {
+ if (D.getDeclSpec().isTypeSpecPipe() && !isPipeDeclarator(D)) {
DeclSpec DS(AttrFactory);
ParseTypeQualifierListOpt(DS);
@@ -5881,7 +5980,8 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
D.ExtendWithDeclSpec(DS);
// Recursively parse the declarator.
- ParseDeclaratorInternal(D, DirectDeclParser);
+ Actions.runWithSufficientStackSpace(
+ D.getBeginLoc(), [&] { ParseDeclaratorInternal(D, DirectDeclParser); });
if (Kind == tok::star)
// Remember that we parsed a pointer type, and remember the type-quals.
D.AddTypeInfo(DeclaratorChunk::getPointer(
@@ -5926,7 +6026,8 @@ void Parser::ParseDeclaratorInternal(Declarator &D,
}
// Recursively parse the declarator.
- ParseDeclaratorInternal(D, DirectDeclParser);
+ Actions.runWithSufficientStackSpace(
+ D.getBeginLoc(), [&] { ParseDeclaratorInternal(D, DirectDeclParser); });
if (D.getNumTypeObjects() > 0) {
// C++ [dcl.ref]p4: There shall be no references to references.
@@ -6550,9 +6651,9 @@ void Parser::InitCXXThisScopeForDeclaratorIfRelevant(
/// declarator D up to a paren, which indicates that we are parsing function
/// arguments.
///
-/// If FirstArgAttrs is non-null, then the caller parsed those arguments
-/// immediately after the open paren - they should be considered to be the
-/// first argument of a parameter.
+/// If FirstArgAttrs is non-null, then the caller parsed those attributes
+/// immediately after the open paren - they will be applied to the DeclSpec
+/// of the first parameter.
///
/// If RequiresArg is true, then the first argument of the function is required
/// to be present and required to not be an identifier list.
@@ -6593,7 +6694,7 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
SmallVector<SourceRange, 2> DynamicExceptionRanges;
ExprResult NoexceptExpr;
CachedTokens *ExceptionSpecTokens = nullptr;
- ParsedAttributesWithRange FnAttrs(AttrFactory);
+ ParsedAttributes FnAttrs(AttrFactory);
TypeResult TrailingReturnType;
SourceLocation TrailingReturnTypeLoc;
@@ -6627,8 +6728,11 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
else if (RequiresArg)
Diag(Tok, diag::err_argument_required_after_attribute);
- HasProto = ParamInfo.size() || getLangOpts().CPlusPlus
- || getLangOpts().OpenCL;
+ // OpenCL disallows functions without a prototype, but it doesn't enforce
+ // strict prototypes as in C2x because it allows a function definition to
+ // have an identifier list. See OpenCL 3.0 6.11/g for more details.
+ HasProto = ParamInfo.size() || getLangOpts().requiresStrictPrototypes() ||
+ getLangOpts().OpenCL;
// If we have the closing ')', eat it.
Tracker.consumeClose();
@@ -6719,8 +6823,7 @@ void Parser::ParseFunctionDeclarator(Declarator &D,
// this in C and not C++, where the decls will continue to live in the
// surrounding context.
SmallVector<NamedDecl *, 0> DeclsInPrototype;
- if (getCurScope()->getFlags() & Scope::FunctionDeclarationScope &&
- !getLangOpts().CPlusPlus) {
+ if (getCurScope()->isFunctionDeclarationScope() && !getLangOpts().CPlusPlus) {
for (Decl *D : getCurScope()->decls()) {
NamedDecl *ND = dyn_cast<NamedDecl>(D);
if (!ND || isa<ParmVarDecl>(ND))
@@ -6766,7 +6869,7 @@ bool Parser::ParseRefQualifier(bool &RefQualifierIsLValueRef,
/// Note that identifier-lists are only allowed for normal declarators, not for
/// abstract-declarators.
bool Parser::isFunctionDeclaratorIdentifierList() {
- return !getLangOpts().CPlusPlus
+ return !getLangOpts().requiresStrictPrototypes()
&& Tok.is(tok::identifier)
&& !TryAltiVecVectorToken()
// K&R identifier lists can't have typedefs as identifiers, per C99
@@ -6800,6 +6903,10 @@ bool Parser::isFunctionDeclaratorIdentifierList() {
void Parser::ParseFunctionDeclaratorIdentifierList(
Declarator &D,
SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo) {
+ // We should never reach this point in C2x or C++.
+ assert(!getLangOpts().requiresStrictPrototypes() &&
+ "Cannot parse an identifier list in C2x or C++");
+
// If there was no identifier specified for the declarator, either we are in
// an abstract-declarator, or we are in a parameter declarator which was found
// to be abstract. In abstract-declarators, identifier lists are not valid:
@@ -6848,7 +6955,7 @@ void Parser::ParseFunctionDeclaratorIdentifierList(
///
/// DeclContext is the context of the declarator being parsed. If FirstArgAttrs
/// is non-null, then the caller parsed those attributes immediately after the
-/// open paren - they should be considered to be part of the first parameter.
+/// open paren - they will be applied to the DeclSpec of the first parameter.
///
/// After returning, ParamInfo will hold the parsed parameters. EllipsisLoc will
/// be the location of the ellipsis, if any was parsed.
@@ -6874,10 +6981,9 @@ void Parser::ParseFunctionDeclaratorIdentifierList(
/// [C++11] attribute-specifier-seq parameter-declaration
///
void Parser::ParseParameterDeclarationClause(
- DeclaratorContext DeclaratorCtx,
- ParsedAttributes &FirstArgAttrs,
- SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
- SourceLocation &EllipsisLoc) {
+ DeclaratorContext DeclaratorCtx, ParsedAttributes &FirstArgAttrs,
+ SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo,
+ SourceLocation &EllipsisLoc) {
// Avoid exceeding the maximum function scope depth.
// See https://bugs.llvm.org/show_bug.cgi?id=19607
@@ -6901,37 +7007,42 @@ void Parser::ParseParameterDeclarationClause(
// Just use the ParsingDeclaration "scope" of the declarator.
DeclSpec DS(AttrFactory);
- // Parse any C++11 attributes.
- MaybeParseCXX11Attributes(DS.getAttributes());
+ ParsedAttributes ArgDeclAttrs(AttrFactory);
+ ParsedAttributes ArgDeclSpecAttrs(AttrFactory);
- // Skip any Microsoft attributes before a param.
- MaybeParseMicrosoftAttributes(DS.getAttributes());
+ if (FirstArgAttrs.Range.isValid()) {
+ // If the caller parsed attributes for the first argument, add them now.
+ // Take them so that we only apply the attributes to the first parameter.
+ // We have already started parsing the decl-specifier sequence, so don't
+ // parse any parameter-declaration pieces that precede it.
+ ArgDeclSpecAttrs.takeAllFrom(FirstArgAttrs);
+ } else {
+ // Parse any C++11 attributes.
+ MaybeParseCXX11Attributes(ArgDeclAttrs);
- SourceLocation DSStart = Tok.getLocation();
+ // Skip any Microsoft attributes before a param.
+ MaybeParseMicrosoftAttributes(ArgDeclSpecAttrs);
+ }
- // If the caller parsed attributes for the first argument, add them now.
- // Take them so that we only apply the attributes to the first parameter.
- // FIXME: If we can leave the attributes in the token stream somehow, we can
- // get rid of a parameter (FirstArgAttrs) and this statement. It might be
- // too much hassle.
- DS.takeAttributesFrom(FirstArgAttrs);
+ SourceLocation DSStart = Tok.getLocation();
ParseDeclarationSpecifiers(DS);
-
+ DS.takeAttributesFrom(ArgDeclSpecAttrs);
// Parse the declarator. This is "PrototypeContext" or
// "LambdaExprParameterContext", because we must accept either
// 'declarator' or 'abstract-declarator' here.
- Declarator ParmDeclarator(
- DS, DeclaratorCtx == DeclaratorContext::RequiresExpr
- ? DeclaratorContext::RequiresExpr
- : DeclaratorCtx == DeclaratorContext::LambdaExpr
- ? DeclaratorContext::LambdaExprParameter
- : DeclaratorContext::Prototype);
+ Declarator ParmDeclarator(DS, ArgDeclAttrs,
+ DeclaratorCtx == DeclaratorContext::RequiresExpr
+ ? DeclaratorContext::RequiresExpr
+ : DeclaratorCtx == DeclaratorContext::LambdaExpr
+ ? DeclaratorContext::LambdaExprParameter
+ : DeclaratorContext::Prototype);
ParseDeclarator(ParmDeclarator);
// Parse GNU attributes, if present.
MaybeParseGNUAttributes(ParmDeclarator);
+ MaybeParseHLSLSemantics(DS.getAttributes());
if (Tok.is(tok::kw_requires)) {
// User tried to define a requires clause in a parameter declaration,
@@ -7035,8 +7146,16 @@ void Parser::ParseParameterDeclarationClause(
if (getLangOpts().CPlusPlus11 && Tok.is(tok::l_brace)) {
Diag(Tok, diag::warn_cxx98_compat_generalized_initializer_lists);
DefArgResult = ParseBraceInitializer();
- } else
+ } else {
+ if (Tok.is(tok::l_paren) && NextToken().is(tok::l_brace)) {
+ Diag(Tok, diag::err_stmt_expr_in_default_arg) << 0;
+ Actions.ActOnParamDefaultArgumentError(Param, EqualLoc);
+ // Skip the statement expression and continue parsing
+ SkipUntil(tok::comma, StopBeforeMatch);
+ continue;
+ }
DefArgResult = ParseAssignmentExpression();
+ }
DefArgResult = Actions.CorrectDelayedTyposInExpr(DefArgResult);
if (DefArgResult.isInvalid()) {
Actions.ActOnParamDefaultArgumentError(Param, EqualLoc);
@@ -7217,7 +7336,8 @@ void Parser::ParseMisplacedBracketDeclarator(Declarator &D) {
assert(!D.mayOmitIdentifier() && "Declarator cannot omit identifier");
SourceLocation StartBracketLoc = Tok.getLocation();
- Declarator TempDeclarator(D.getDeclSpec(), D.getContext());
+ Declarator TempDeclarator(D.getDeclSpec(), ParsedAttributesView::none(),
+ D.getContext());
while (Tok.is(tok::l_square)) {
ParseBracketDeclarator(TempDeclarator);
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseDeclCXX.cpp b/contrib/llvm-project/clang/lib/Parse/ParseDeclCXX.cpp
index c08a586604b1..143b373e9ea5 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseDeclCXX.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseDeclCXX.cpp
@@ -10,15 +10,16 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/Parse/Parser.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/PrettyDeclStackTrace.h"
+#include "clang/Basic/AttributeCommonInfo.h"
#include "clang/Basic/Attributes.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/OperatorKinds.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Parse/Parser.h"
#include "clang/Parse/RAIIObjectsForParser.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ParsedTemplate.h"
@@ -73,16 +74,28 @@ Parser::DeclGroupPtrTy Parser::ParseNamespace(DeclaratorContext Context,
InnerNamespaceInfoList ExtraNSs;
SourceLocation FirstNestedInlineLoc;
- ParsedAttributesWithRange attrs(AttrFactory);
- SourceLocation attrLoc;
- if (getLangOpts().CPlusPlus11 && isCXX11AttributeSpecifier()) {
- Diag(Tok.getLocation(), getLangOpts().CPlusPlus17
- ? diag::warn_cxx14_compat_ns_enum_attribute
- : diag::ext_ns_enum_attribute)
- << 0 /*namespace*/;
- attrLoc = Tok.getLocation();
- ParseCXX11Attributes(attrs);
- }
+ ParsedAttributes attrs(AttrFactory);
+
+ auto ReadAttributes = [&] {
+ bool MoreToParse;
+ do {
+ MoreToParse = false;
+ if (Tok.is(tok::kw___attribute)) {
+ ParseGNUAttributes(attrs);
+ MoreToParse = true;
+ }
+ if (getLangOpts().CPlusPlus11 && isCXX11AttributeSpecifier()) {
+ Diag(Tok.getLocation(), getLangOpts().CPlusPlus17
+ ? diag::warn_cxx14_compat_ns_enum_attribute
+ : diag::ext_ns_enum_attribute)
+ << 0 /*namespace*/;
+ ParseCXX11Attributes(attrs);
+ MoreToParse = true;
+ }
+ } while (MoreToParse);
+ };
+
+ ReadAttributes();
if (Tok.is(tok::identifier)) {
Ident = Tok.getIdentifierInfo();
@@ -108,16 +121,14 @@ Parser::DeclGroupPtrTy Parser::ParseNamespace(DeclaratorContext Context,
}
}
+ ReadAttributes();
+
+ SourceLocation attrLoc = attrs.Range.getBegin();
+
// A nested namespace definition cannot have attributes.
if (!ExtraNSs.empty() && attrLoc.isValid())
Diag(attrLoc, diag::err_unexpected_nested_namespace_attribute);
- // Read label attributes, if present.
- if (Tok.is(tok::kw___attribute)) {
- attrLoc = Tok.getLocation();
- ParseGNUAttributes(attrs);
- }
-
if (Tok.is(tok::equal)) {
if (!Ident) {
Diag(Tok, diag::err_expected) << tok::identifier;
@@ -241,9 +252,9 @@ void Parser::ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs,
if (index == InnerNSs.size()) {
while (!tryParseMisplacedModuleImport() && Tok.isNot(tok::r_brace) &&
Tok.isNot(tok::eof)) {
- ParsedAttributesWithRange attrs(AttrFactory);
- MaybeParseCXX11Attributes(attrs);
- ParseExternalDeclaration(attrs);
+ ParsedAttributes Attrs(AttrFactory);
+ MaybeParseCXX11Attributes(Attrs);
+ ParseExternalDeclaration(Attrs);
}
// The caller is what called check -- we are simply calling
@@ -344,8 +355,8 @@ Decl *Parser::ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context) {
getCurScope(), DS.getSourceRange().getBegin(), Lang.get(),
Tok.is(tok::l_brace) ? Tok.getLocation() : SourceLocation());
- ParsedAttributesWithRange attrs(AttrFactory);
- MaybeParseCXX11Attributes(attrs);
+ ParsedAttributes DeclAttrs(AttrFactory);
+ MaybeParseCXX11Attributes(DeclAttrs);
if (Tok.isNot(tok::l_brace)) {
// Reset the source range in DS, as the leading "extern"
@@ -354,7 +365,7 @@ Decl *Parser::ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context) {
DS.SetRangeEnd(SourceLocation());
// ... but anyway remember that such an "extern" was seen.
DS.setExternInLinkageSpec(true);
- ParseExternalDeclaration(attrs, &DS);
+ ParseExternalDeclaration(DeclAttrs, &DS);
return LinkageSpec ? Actions.ActOnFinishLinkageSpecification(
getCurScope(), LinkageSpec, SourceLocation())
: nullptr;
@@ -362,7 +373,7 @@ Decl *Parser::ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context) {
DS.abort();
- ProhibitAttributes(attrs);
+ ProhibitAttributes(DeclAttrs);
BalancedDelimiterTracker T(*this, tok::l_brace);
T.consumeOpen();
@@ -394,9 +405,9 @@ Decl *Parser::ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context) {
break;
LLVM_FALLTHROUGH;
default:
- ParsedAttributesWithRange attrs(AttrFactory);
- MaybeParseCXX11Attributes(attrs);
- ParseExternalDeclaration(attrs);
+ ParsedAttributes Attrs(AttrFactory);
+ MaybeParseCXX11Attributes(Attrs);
+ ParseExternalDeclaration(Attrs);
continue;
}
@@ -426,9 +437,8 @@ Decl *Parser::ParseExportDeclaration() {
if (Tok.isNot(tok::l_brace)) {
// FIXME: Factor out a ParseExternalDeclarationWithAttrs.
- ParsedAttributesWithRange Attrs(AttrFactory);
+ ParsedAttributes Attrs(AttrFactory);
MaybeParseCXX11Attributes(Attrs);
- MaybeParseMicrosoftAttributes(Attrs);
ParseExternalDeclaration(Attrs);
return Actions.ActOnFinishExportDecl(getCurScope(), ExportDecl,
SourceLocation());
@@ -446,9 +456,8 @@ Decl *Parser::ParseExportDeclaration() {
while (!tryParseMisplacedModuleImport() && Tok.isNot(tok::r_brace) &&
Tok.isNot(tok::eof)) {
- ParsedAttributesWithRange Attrs(AttrFactory);
+ ParsedAttributes Attrs(AttrFactory);
MaybeParseCXX11Attributes(Attrs);
- MaybeParseMicrosoftAttributes(Attrs);
ParseExternalDeclaration(Attrs);
}
@@ -459,11 +468,9 @@ Decl *Parser::ParseExportDeclaration() {
/// ParseUsingDirectiveOrDeclaration - Parse C++ using using-declaration or
/// using-directive. Assumes that current token is 'using'.
-Parser::DeclGroupPtrTy
-Parser::ParseUsingDirectiveOrDeclaration(DeclaratorContext Context,
- const ParsedTemplateInfo &TemplateInfo,
- SourceLocation &DeclEnd,
- ParsedAttributesWithRange &attrs) {
+Parser::DeclGroupPtrTy Parser::ParseUsingDirectiveOrDeclaration(
+ DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
+ SourceLocation &DeclEnd, ParsedAttributes &Attrs) {
assert(Tok.is(tok::kw_using) && "Not using token");
ObjCDeclContextSwitch ObjCDC(*this);
@@ -492,12 +499,12 @@ Parser::ParseUsingDirectiveOrDeclaration(DeclaratorContext Context,
<< 0 /* directive */ << R << FixItHint::CreateRemoval(R);
}
- Decl *UsingDir = ParseUsingDirective(Context, UsingLoc, DeclEnd, attrs);
+ Decl *UsingDir = ParseUsingDirective(Context, UsingLoc, DeclEnd, Attrs);
return Actions.ConvertDeclToDeclGroup(UsingDir);
}
// Otherwise, it must be a using-declaration or an alias-declaration.
- return ParseUsingDeclaration(Context, TemplateInfo, UsingLoc, DeclEnd, attrs,
+ return ParseUsingDeclaration(Context, TemplateInfo, UsingLoc, DeclEnd, Attrs,
AS_none);
}
@@ -672,11 +679,10 @@ bool Parser::ParseUsingDeclarator(DeclaratorContext Context,
///
/// elaborated-enum-specifier:
/// 'enum' nested-name-specifier[opt] identifier
-Parser::DeclGroupPtrTy
-Parser::ParseUsingDeclaration(
+Parser::DeclGroupPtrTy Parser::ParseUsingDeclaration(
DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo,
SourceLocation UsingLoc, SourceLocation &DeclEnd,
- ParsedAttributesWithRange &PrefixAttrs, AccessSpecifier AS) {
+ ParsedAttributes &PrefixAttrs, AccessSpecifier AS) {
SourceLocation UELoc;
bool InInitStatement = Context == DeclaratorContext::SelectionInit ||
Context == DeclaratorContext::ForInit;
@@ -714,7 +720,7 @@ Parser::ParseUsingDeclaration(
// Check for misplaced attributes before the identifier in an
// alias-declaration.
- ParsedAttributesWithRange MisplacedAttrs(AttrFactory);
+ ParsedAttributes MisplacedAttrs(AttrFactory);
MaybeParseCXX11Attributes(MisplacedAttrs);
if (InInitStatement && Tok.isNot(tok::identifier))
@@ -723,7 +729,7 @@ Parser::ParseUsingDeclaration(
UsingDeclarator D;
bool InvalidDeclarator = ParseUsingDeclarator(Context, D);
- ParsedAttributesWithRange Attrs(AttrFactory);
+ ParsedAttributes Attrs(AttrFactory);
MaybeParseAttributes(PAKM_GNU | PAKM_CXX11, Attrs);
// If we had any misplaced attributes from earlier, this is where they
@@ -1030,10 +1036,9 @@ SourceLocation Parser::ParseDecltypeSpecifier(DeclSpec &DS) {
}
// Check for C++1y 'decltype(auto)'.
- if (Tok.is(tok::kw_auto)) {
- // No need to disambiguate here: an expression can't start with 'auto',
- // because the typename-specifier in a function-style cast operation can't
- // be 'auto'.
+ if (Tok.is(tok::kw_auto) && NextToken().is(tok::r_paren)) {
+ // the typename-specifier in a function-style cast expression may
+ // be 'auto' since C++2b.
Diag(Tok.getLocation(),
getLangOpts().CPlusPlus14
? diag::warn_cxx11_compat_decltype_auto_type_specifier
@@ -1212,7 +1217,8 @@ TypeResult Parser::ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
EndLocation = ParseDecltypeSpecifier(DS);
- Declarator DeclaratorInfo(DS, DeclaratorContext::TypeName);
+ Declarator DeclaratorInfo(DS, ParsedAttributesView::none(),
+ DeclaratorContext::TypeName);
return Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
}
@@ -1304,7 +1310,8 @@ TypeResult Parser::ParseBaseTypeSpecifier(SourceLocation &BaseLoc,
DS.SetTypeSpecType(TST_typename, IdLoc, PrevSpec, DiagID, Type,
Actions.getASTContext().getPrintingPolicy());
- Declarator DeclaratorInfo(DS, DeclaratorContext::TypeName);
+ Declarator DeclaratorInfo(DS, ParsedAttributesView::none(),
+ DeclaratorContext::TypeName);
return Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
}
@@ -1462,9 +1469,9 @@ bool Parser::isValidAfterTypeSpecifier(bool CouldBeBitfield) {
void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
SourceLocation StartLoc, DeclSpec &DS,
const ParsedTemplateInfo &TemplateInfo,
- AccessSpecifier AS,
- bool EnteringContext, DeclSpecContext DSC,
- ParsedAttributesWithRange &Attributes) {
+ AccessSpecifier AS, bool EnteringContext,
+ DeclSpecContext DSC,
+ ParsedAttributes &Attributes) {
DeclSpec::TST TagType;
if (TagTokKind == tok::kw_struct)
TagType = DeclSpec::TST_struct;
@@ -1495,7 +1502,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
(TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate);
SuppressAccessChecks diagsFromTag(*this, shouldDelayDiagsInTag);
- ParsedAttributesWithRange attrs(AttrFactory);
+ ParsedAttributes attrs(AttrFactory);
// If attributes exist after tag, parse them.
MaybeParseAttributes(PAKM_CXX11 | PAKM_Declspec | PAKM_GNU, attrs);
@@ -1747,11 +1754,13 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
const PrintingPolicy &Policy = Actions.getASTContext().getPrintingPolicy();
Sema::TagUseKind TUK;
- if (isDefiningTypeSpecifierContext(DSC) == AllowDefiningTypeSpec::No ||
+ if (isDefiningTypeSpecifierContext(DSC, getLangOpts().CPlusPlus) ==
+ AllowDefiningTypeSpec::No ||
(getLangOpts().OpenMP && OpenMPDirectiveParsing))
TUK = Sema::TUK_Reference;
else if (Tok.is(tok::l_brace) ||
- (getLangOpts().CPlusPlus && Tok.is(tok::colon)) ||
+ (DSC != DeclSpecContext::DSC_association &&
+ getLangOpts().CPlusPlus && Tok.is(tok::colon)) ||
(isClassCompatibleKeyword() &&
(NextToken().is(tok::l_brace) || NextToken().is(tok::colon)))) {
if (DS.isFriendSpecified()) {
@@ -2043,8 +2052,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// Parse the definition body.
ParseStructUnionBody(StartLoc, TagType, cast<RecordDecl>(D));
if (SkipBody.CheckSameAsPrevious &&
- !Actions.ActOnDuplicateDefinition(DS, TagOrTempResult.get(),
- SkipBody)) {
+ !Actions.ActOnDuplicateDefinition(TagOrTempResult.get(), SkipBody)) {
DS.SetTypeSpecError();
return;
}
@@ -2154,7 +2162,7 @@ BaseResult Parser::ParseBaseSpecifier(Decl *ClassDecl) {
bool IsVirtual = false;
SourceLocation StartLoc = Tok.getLocation();
- ParsedAttributesWithRange Attributes(AttrFactory);
+ ParsedAttributes Attributes(AttrFactory);
MaybeParseCXX11Attributes(Attributes);
// Parse the 'virtual' keyword.
@@ -2165,8 +2173,11 @@ BaseResult Parser::ParseBaseSpecifier(Decl *ClassDecl) {
// Parse an (optional) access specifier.
AccessSpecifier Access = getAccessSpecifierIfPresent();
- if (Access != AS_none)
+ if (Access != AS_none) {
ConsumeToken();
+ if (getLangOpts().HLSL)
+ Diag(Tok.getLocation(), diag::ext_hlsl_access_specifiers);
+ }
CheckMisplacedCXX11Attribute(Attributes, StartLoc);
@@ -2671,23 +2682,15 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
TemplateInfo, TemplateDiags);
}
- ParsedAttributesWithRange attrs(AttrFactory);
- ParsedAttributesViewWithRange FnAttrs;
+ ParsedAttributes DeclAttrs(AttrFactory);
// Optional C++11 attribute-specifier
- MaybeParseCXX11Attributes(attrs);
+ MaybeParseCXX11Attributes(DeclAttrs);
// The next token may be an OpenMP pragma annotation token. That would
// normally be handled from ParseCXXClassMemberDeclarationWithPragmas, but in
// this case, it came from an *attribute* rather than a pragma. Handle it now.
if (Tok.is(tok::annot_attr_openmp))
- return ParseOpenMPDeclarativeDirectiveWithExtDecl(AS, attrs);
-
- // We need to keep these attributes for future diagnostic
- // before they are taken over by declaration specifier.
- FnAttrs.addAll(attrs.begin(), attrs.end());
- FnAttrs.Range = attrs.Range;
-
- MaybeParseMicrosoftAttributes(attrs);
+ return ParseOpenMPDeclarativeDirectiveWithExtDecl(AS, DeclAttrs);
if (Tok.is(tok::kw_using)) {
// Eat 'using'.
@@ -2708,16 +2711,20 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
SourceLocation DeclEnd;
// Otherwise, it must be a using-declaration or an alias-declaration.
return ParseUsingDeclaration(DeclaratorContext::Member, TemplateInfo,
- UsingLoc, DeclEnd, attrs, AS);
+ UsingLoc, DeclEnd, DeclAttrs, AS);
}
+ ParsedAttributes DeclSpecAttrs(AttrFactory);
+ MaybeParseMicrosoftAttributes(DeclSpecAttrs);
+
// Hold late-parsed attributes so we can attach a Decl to them later.
LateParsedAttrList CommonLateParsedAttrs;
// decl-specifier-seq:
// Parse the common declaration-specifiers piece.
ParsingDeclSpec DS(*this, TemplateDiags);
- DS.takeAttributesFrom(attrs);
+ DS.takeAttributesFrom(DeclSpecAttrs);
+
if (MalformedTypeSpec)
DS.SetTypeSpecError();
@@ -2755,11 +2762,11 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
if (TryConsumeToken(tok::semi)) {
if (DS.isFriendSpecified())
- ProhibitAttributes(FnAttrs);
+ ProhibitAttributes(DeclAttrs);
RecordDecl *AnonRecord = nullptr;
Decl *TheDecl = Actions.ParsedFreeStandingDeclSpec(
- getCurScope(), AS, DS, TemplateParams, false, AnonRecord);
+ getCurScope(), AS, DS, DeclAttrs, TemplateParams, false, AnonRecord);
DS.complete(TheDecl);
if (AnonRecord) {
Decl* decls[] = {AnonRecord, TheDecl};
@@ -2768,7 +2775,8 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
return Actions.ConvertDeclToDeclGroup(TheDecl);
}
- ParsingDeclarator DeclaratorInfo(*this, DS, DeclaratorContext::Member);
+ ParsingDeclarator DeclaratorInfo(*this, DS, DeclAttrs,
+ DeclaratorContext::Member);
if (TemplateInfo.TemplateParams)
DeclaratorInfo.setTemplateParameterLists(TemplateParams);
VirtSpecifiers VS;
@@ -2860,7 +2868,7 @@ Parser::ParseCXXClassMemberDeclaration(AccessSpecifier AS,
DS.isFriendSpecified()) {
// Diagnose attributes that appear before decl specifier:
// [[]] friend int foo();
- ProhibitAttributes(FnAttrs);
+ ProhibitAttributes(DeclAttrs);
}
if (DefinitionKind != FunctionDefinitionKind::Declaration) {
@@ -3167,7 +3175,7 @@ void Parser::SkipCXXMemberSpecification(SourceLocation RecordLoc,
// Diagnose any C++11 attributes after 'final' keyword.
// We deliberately discard these attributes.
- ParsedAttributesWithRange Attrs(AttrFactory);
+ ParsedAttributes Attrs(AttrFactory);
CheckMisplacedCXX11Attribute(Attrs, AttrFixitLoc);
// This can only happen if we had malformed misplaced attributes;
@@ -3207,14 +3215,15 @@ void Parser::SkipCXXMemberSpecification(SourceLocation RecordLoc,
T.skipToEnd();
// Parse and discard any trailing attributes.
- ParsedAttributes Attrs(AttrFactory);
- if (Tok.is(tok::kw___attribute))
+ if (Tok.is(tok::kw___attribute)) {
+ ParsedAttributes Attrs(AttrFactory);
MaybeParseGNUAttributes(Attrs);
+ }
}
Parser::DeclGroupPtrTy Parser::ParseCXXClassMemberDeclarationWithPragmas(
- AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs,
- DeclSpec::TST TagType, Decl *TagDecl) {
+ AccessSpecifier &AS, ParsedAttributes &AccessAttrs, DeclSpec::TST TagType,
+ Decl *TagDecl) {
ParenBraceBracketBalancer BalancerRAIIObj(*this);
switch (Tok.getKind()) {
@@ -3264,6 +3273,8 @@ Parser::DeclGroupPtrTy Parser::ParseCXXClassMemberDeclarationWithPragmas(
LLVM_FALLTHROUGH;
case tok::kw_public:
case tok::kw_protected: {
+ if (getLangOpts().HLSL)
+ Diag(Tok.getLocation(), diag::ext_hlsl_access_specifiers);
AccessSpecifier NewAS = getAccessSpecifierIfPresent();
assert(NewAS != AS_none);
// Current token is a C++ access specifier.
@@ -3324,7 +3335,7 @@ Parser::DeclGroupPtrTy Parser::ParseCXXClassMemberDeclarationWithPragmas(
///
void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
SourceLocation AttrFixitLoc,
- ParsedAttributesWithRange &Attrs,
+ ParsedAttributes &Attrs,
unsigned TagType, Decl *TagDecl) {
assert((TagType == DeclSpec::TST_struct ||
TagType == DeclSpec::TST_interface ||
@@ -3360,7 +3371,7 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
break;
}
- if ((S->getFlags() & Scope::FnScope))
+ if (S->isFunctionScope())
// If we're in a function or function template then this is a local
// class rather than a nested class.
break;
@@ -3503,12 +3514,13 @@ void Parser::ParseCXXMemberSpecification(SourceLocation RecordLoc,
// C++ 11p3: Members of a class defined with the keyword class are private
// by default. Members of a class defined with the keywords struct or union
// are public by default.
+ // HLSL: In HLSL members of a class are public by default.
AccessSpecifier CurAS;
- if (TagType == DeclSpec::TST_class)
+ if (TagType == DeclSpec::TST_class && !getLangOpts().HLSL)
CurAS = AS_private;
else
CurAS = AS_public;
- ParsedAttributesWithRange AccessAttrs(AttrFactory);
+ ParsedAttributes AccessAttrs(AttrFactory);
if (TagDecl) {
// While we still have something to read, read the member-declarations.
@@ -4296,10 +4308,20 @@ bool Parser::ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
ParsedAttr::Syntax Syntax =
LO.CPlusPlus ? ParsedAttr::AS_CXX11 : ParsedAttr::AS_C2x;
+ // Try parsing microsoft attributes
+ if (getLangOpts().MicrosoftExt || getLangOpts().HLSL) {
+ if (hasAttribute(AttributeCommonInfo::Syntax::AS_Microsoft, ScopeName,
+ AttrName, getTargetInfo(), getLangOpts()))
+ Syntax = ParsedAttr::AS_Microsoft;
+ }
+
// If the attribute isn't known, we will not attempt to parse any
// arguments.
- if (!hasAttribute(LO.CPlusPlus ? AttrSyntax::CXX : AttrSyntax::C, ScopeName,
- AttrName, getTargetInfo(), getLangOpts())) {
+ if (Syntax != ParsedAttr::AS_Microsoft &&
+ !hasAttribute(LO.CPlusPlus ? AttributeCommonInfo::Syntax::AS_CXX11
+ : AttributeCommonInfo::Syntax::AS_C2x,
+ ScopeName, AttrName, getTargetInfo(), getLangOpts())) {
+ if (getLangOpts().MicrosoftExt || getLangOpts().HLSL) {}
// Eat the left paren, then skip to the ending right paren.
ConsumeParen();
SkipUntil(tok::r_paren);
@@ -4361,7 +4383,7 @@ bool Parser::ParseCXX11AttributeArgs(IdentifierInfo *AttrName,
return true;
}
-/// ParseCXX11AttributeSpecifier - Parse a C++11 or C2x attribute-specifier.
+/// Parse a C++11 or C2x attribute-specifier.
///
/// [C++11] attribute-specifier:
/// '[' '[' attribute-list ']' ']'
@@ -4514,19 +4536,17 @@ void Parser::ParseCXX11AttributeSpecifierInternal(ParsedAttributes &Attrs,
///
/// attribute-specifier-seq:
/// attribute-specifier-seq[opt] attribute-specifier
-void Parser::ParseCXX11Attributes(ParsedAttributesWithRange &attrs,
- SourceLocation *endLoc) {
+void Parser::ParseCXX11Attributes(ParsedAttributes &Attrs) {
assert(standardAttributesAllowed());
- SourceLocation StartLoc = Tok.getLocation(), Loc;
- if (!endLoc)
- endLoc = &Loc;
+ SourceLocation StartLoc = Tok.getLocation();
+ SourceLocation EndLoc = StartLoc;
do {
- ParseCXX11AttributeSpecifier(attrs, endLoc);
+ ParseCXX11AttributeSpecifier(Attrs, &EndLoc);
} while (isCXX11AttributeSpecifier());
- attrs.Range = SourceRange(StartLoc, *endLoc);
+ Attrs.Range = SourceRange(StartLoc, EndLoc);
}
void Parser::DiagnoseAndSkipCXX11Attributes() {
@@ -4659,10 +4679,11 @@ void Parser::ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs) {
/// [MS] ms-attribute-seq:
/// ms-attribute[opt]
/// ms-attribute ms-attribute-seq
-void Parser::ParseMicrosoftAttributes(ParsedAttributes &attrs,
- SourceLocation *endLoc) {
+void Parser::ParseMicrosoftAttributes(ParsedAttributes &Attrs) {
assert(Tok.is(tok::l_square) && "Not a Microsoft attribute list");
+ SourceLocation StartLoc = Tok.getLocation();
+ SourceLocation EndLoc = StartLoc;
do {
// FIXME: If this is actually a C++11 attribute, parse it as one.
BalancedDelimiterTracker T(*this, tok::l_square);
@@ -4682,15 +4703,37 @@ void Parser::ParseMicrosoftAttributes(ParsedAttributes &attrs,
if (Tok.isNot(tok::identifier)) // ']', but also eof
break;
if (Tok.getIdentifierInfo()->getName() == "uuid")
- ParseMicrosoftUuidAttributeArgs(attrs);
- else
+ ParseMicrosoftUuidAttributeArgs(Attrs);
+ else {
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ SourceLocation NameLoc = Tok.getLocation();
ConsumeToken();
+ ParsedAttr::Kind AttrKind =
+ ParsedAttr::getParsedKind(II, nullptr, ParsedAttr::AS_Microsoft);
+ // For HLSL we want to handle all attributes, but for MSVC compat, we
+ // silently ignore unknown Microsoft attributes.
+ if (getLangOpts().HLSL || AttrKind != ParsedAttr::UnknownAttribute) {
+ bool AttrParsed = false;
+ if (Tok.is(tok::l_paren)) {
+ CachedTokens OpenMPTokens;
+ AttrParsed =
+ ParseCXX11AttributeArgs(II, NameLoc, Attrs, &EndLoc, nullptr,
+ SourceLocation(), OpenMPTokens);
+ ReplayOpenMPAttributeTokens(OpenMPTokens);
+ }
+ if (!AttrParsed) {
+ Attrs.addNew(II, NameLoc, nullptr, SourceLocation(), nullptr, 0,
+ ParsedAttr::AS_Microsoft);
+ }
+ }
+ }
}
T.consumeClose();
- if (endLoc)
- *endLoc = T.getCloseLocation();
+ EndLoc = T.getCloseLocation();
} while (Tok.is(tok::l_square));
+
+ Attrs.Range = SourceRange(StartLoc, EndLoc);
}
void Parser::ParseMicrosoftIfExistsClassDeclaration(
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseExpr.cpp b/contrib/llvm-project/clang/lib/Parse/ParseExpr.cpp
index fbf79a0a8746..a6a946d7f31b 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseExpr.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseExpr.cpp
@@ -791,6 +791,7 @@ class CastExpressionIdValidator final : public CorrectionCandidateCallback {
/// [GNU] '__builtin_FUNCTION' '(' ')'
/// [GNU] '__builtin_LINE' '(' ')'
/// [CLANG] '__builtin_COLUMN' '(' ')'
+/// [GNU] '__builtin_source_location' '(' ')'
/// [GNU] '__builtin_types_compatible_p' '(' type-name ',' type-name ')'
/// [GNU] '__null'
/// [OBJC] '[' objc-message-expr ']'
@@ -1211,7 +1212,8 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
DS.SetTypeSpecType(TST_typename, ILoc, PrevSpec, DiagID, Typ,
Actions.getASTContext().getPrintingPolicy());
- Declarator DeclaratorInfo(DS, DeclaratorContext::TypeName);
+ Declarator DeclaratorInfo(DS, ParsedAttributesView::none(),
+ DeclaratorContext::TypeName);
TypeResult Ty = Actions.ActOnTypeName(getCurScope(),
DeclaratorInfo);
if (Ty.isInvalid())
@@ -1303,6 +1305,7 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
case tok::kw___builtin_FILE:
case tok::kw___builtin_FUNCTION:
case tok::kw___builtin_LINE:
+ case tok::kw___builtin_source_location:
if (NotPrimaryExpression)
*NotPrimaryExpression = true;
// This parses the complete suffix; we can return early.
@@ -1488,7 +1491,8 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
PrevSpec, DiagID, Type,
Actions.getASTContext().getPrintingPolicy());
- Declarator DeclaratorInfo(DS, DeclaratorContext::TypeName);
+ Declarator DeclaratorInfo(DS, ParsedAttributesView::none(),
+ DeclaratorContext::TypeName);
TypeResult Ty = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
if (Ty.isInvalid())
break;
@@ -1524,6 +1528,7 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
case tok::kw___float128:
case tok::kw___ibm128:
case tok::kw_void:
+ case tok::kw_auto:
case tok::kw_typename:
case tok::kw_typeof:
case tok::kw___vector:
@@ -1835,6 +1840,7 @@ ExprResult Parser::ParseCastExpression(CastParseKind ParseKind,
/// primary-expression
/// postfix-expression '[' expression ']'
/// postfix-expression '[' braced-init-list ']'
+/// postfix-expression '[' expression-list [opt] ']' [C++2b 12.4.5]
/// postfix-expression '(' argument-expression-list[opt] ')'
/// postfix-expression '.' identifier
/// postfix-expression '->' identifier
@@ -1898,30 +1904,58 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
(void)Actions.CorrectDelayedTyposInExpr(LHS);
return ExprError();
}
-
BalancedDelimiterTracker T(*this, tok::l_square);
T.consumeOpen();
Loc = T.getOpenLocation();
- ExprResult Idx, Length, Stride;
+ ExprResult Length, Stride;
SourceLocation ColonLocFirst, ColonLocSecond;
+ ExprVector ArgExprs;
+ bool HasError = false;
PreferredType.enterSubscript(Actions, Tok.getLocation(), LHS.get());
- if (getLangOpts().CPlusPlus11 && Tok.is(tok::l_brace)) {
- Diag(Tok, diag::warn_cxx98_compat_generalized_initializer_lists);
- Idx = ParseBraceInitializer();
- } else if (getLangOpts().OpenMP) {
- ColonProtectionRAIIObject RAII(*this);
- // Parse [: or [ expr or [ expr :
- if (!Tok.is(tok::colon)) {
- // [ expr
- Idx = ParseExpression();
+
+ // We try to parse a list of indexes in all language mode first
+ // and, in we find 0 or one index, we try to parse an OpenMP array
+ // section. This allow us to support C++2b multi dimensional subscript and
+ // OpenMp sections in the same language mode.
+ if (!getLangOpts().OpenMP || Tok.isNot(tok::colon)) {
+ if (!getLangOpts().CPlusPlus2b) {
+ ExprResult Idx;
+ if (getLangOpts().CPlusPlus11 && Tok.is(tok::l_brace)) {
+ Diag(Tok, diag::warn_cxx98_compat_generalized_initializer_lists);
+ Idx = ParseBraceInitializer();
+ } else {
+ Idx = ParseExpression(); // May be a comma expression
+ }
+ LHS = Actions.CorrectDelayedTyposInExpr(LHS);
+ Idx = Actions.CorrectDelayedTyposInExpr(Idx);
+ if (Idx.isInvalid()) {
+ HasError = true;
+ } else {
+ ArgExprs.push_back(Idx.get());
+ }
+ } else if (Tok.isNot(tok::r_square)) {
+ CommaLocsTy CommaLocs;
+ if (ParseExpressionList(ArgExprs, CommaLocs)) {
+ LHS = Actions.CorrectDelayedTyposInExpr(LHS);
+ HasError = true;
+ }
+ assert(
+ (ArgExprs.empty() || ArgExprs.size() == CommaLocs.size() + 1) &&
+ "Unexpected number of commas!");
}
+ }
+
+ if (ArgExprs.size() <= 1 && getLangOpts().OpenMP) {
+ ColonProtectionRAIIObject RAII(*this);
if (Tok.is(tok::colon)) {
// Consume ':'
ColonLocFirst = ConsumeToken();
if (Tok.isNot(tok::r_square) &&
(getLangOpts().OpenMP < 50 ||
- ((Tok.isNot(tok::colon) && getLangOpts().OpenMP >= 50))))
+ ((Tok.isNot(tok::colon) && getLangOpts().OpenMP >= 50)))) {
Length = ParseExpression();
+ Length = Actions.CorrectDelayedTyposInExpr(Length);
+ }
}
if (getLangOpts().OpenMP >= 50 &&
(OMPClauseKind == llvm::omp::Clause::OMPC_to ||
@@ -1933,27 +1967,23 @@ Parser::ParsePostfixExpressionSuffix(ExprResult LHS) {
Stride = ParseExpression();
}
}
- } else
- Idx = ParseExpression();
+ }
SourceLocation RLoc = Tok.getLocation();
-
LHS = Actions.CorrectDelayedTyposInExpr(LHS);
- Idx = Actions.CorrectDelayedTyposInExpr(Idx);
- Length = Actions.CorrectDelayedTyposInExpr(Length);
- if (!LHS.isInvalid() && !Idx.isInvalid() && !Length.isInvalid() &&
+
+ if (!LHS.isInvalid() && !HasError && !Length.isInvalid() &&
!Stride.isInvalid() && Tok.is(tok::r_square)) {
if (ColonLocFirst.isValid() || ColonLocSecond.isValid()) {
LHS = Actions.ActOnOMPArraySectionExpr(
- LHS.get(), Loc, Idx.get(), ColonLocFirst, ColonLocSecond,
- Length.get(), Stride.get(), RLoc);
+ LHS.get(), Loc, ArgExprs.empty() ? nullptr : ArgExprs[0],
+ ColonLocFirst, ColonLocSecond, Length.get(), Stride.get(), RLoc);
} else {
LHS = Actions.ActOnArraySubscriptExpr(getCurScope(), LHS.get(), Loc,
- Idx.get(), RLoc);
+ ArgExprs, RLoc);
}
} else {
LHS = ExprError();
- Idx = ExprError();
}
// Match the ']'.
@@ -2268,7 +2298,8 @@ Parser::ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok,
if (isTypeIdUnambiguously()) {
DeclSpec DS(AttrFactory);
ParseSpecifierQualifierList(DS);
- Declarator DeclaratorInfo(DS, DeclaratorContext::TypeName);
+ Declarator DeclaratorInfo(DS, ParsedAttributesView::none(),
+ DeclaratorContext::TypeName);
ParseDeclarator(DeclaratorInfo);
SourceLocation LParenLoc = PP.getLocForEndOfToken(OpTok.getLocation());
@@ -2482,6 +2513,7 @@ ExprResult Parser::ParseUnaryExprOrTypeTraitExpression() {
/// [GNU] '__builtin_FUNCTION' '(' ')'
/// [GNU] '__builtin_LINE' '(' ')'
/// [CLANG] '__builtin_COLUMN' '(' ')'
+/// [GNU] '__builtin_source_location' '(' ')'
/// [OCL] '__builtin_astype' '(' assignment-expression ',' type-name ')'
///
/// [GNU] offsetof-member-designator:
@@ -2704,7 +2736,8 @@ ExprResult Parser::ParseBuiltinPrimaryExpression() {
case tok::kw___builtin_COLUMN:
case tok::kw___builtin_FILE:
case tok::kw___builtin_FUNCTION:
- case tok::kw___builtin_LINE: {
+ case tok::kw___builtin_LINE:
+ case tok::kw___builtin_source_location: {
// Attempt to consume the r-paren.
if (Tok.isNot(tok::r_paren)) {
Diag(Tok, diag::err_expected) << tok::r_paren;
@@ -2721,6 +2754,8 @@ ExprResult Parser::ParseBuiltinPrimaryExpression() {
return SourceLocExpr::Line;
case tok::kw___builtin_COLUMN:
return SourceLocExpr::Column;
+ case tok::kw___builtin_source_location:
+ return SourceLocExpr::SourceLocStruct;
default:
llvm_unreachable("invalid keyword");
}
@@ -2838,7 +2873,8 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
// None of these cases should fall through with an invalid Result
// unless they've already reported an error.
if (ExprType >= CompoundStmt && Tok.is(tok::l_brace)) {
- Diag(Tok, diag::ext_gnu_statement_expr);
+ Diag(Tok, OpenLoc.isMacroID() ? diag::ext_gnu_statement_expr_macro
+ : diag::ext_gnu_statement_expr);
checkCompoundToken(OpenLoc, tok::l_paren, CompoundToken::StmtExprBegin);
@@ -2926,7 +2962,8 @@ Parser::ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr,
// Parse the type declarator.
DeclSpec DS(AttrFactory);
ParseSpecifierQualifierList(DS);
- Declarator DeclaratorInfo(DS, DeclaratorContext::TypeName);
+ Declarator DeclaratorInfo(DS, ParsedAttributesView::none(),
+ DeclaratorContext::TypeName);
ParseDeclarator(DeclaratorInfo);
// If our type is followed by an identifier and either ':' or ']', then
@@ -3243,7 +3280,7 @@ ExprResult Parser::ParseGenericSelectionExpression() {
Ty = nullptr;
} else {
ColonProtectionRAIIObject X(*this);
- TypeResult TR = ParseTypeName();
+ TypeResult TR = ParseTypeName(nullptr, DeclaratorContext::Association);
if (TR.isInvalid()) {
SkipUntil(tok::r_paren, StopAtSemi);
return ExprError();
@@ -3356,7 +3393,9 @@ ExprResult Parser::ParseFoldExpression(ExprResult LHS,
/// \endverbatim
bool Parser::ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
SmallVectorImpl<SourceLocation> &CommaLocs,
- llvm::function_ref<void()> ExpressionStarts) {
+ llvm::function_ref<void()> ExpressionStarts,
+ bool FailImmediatelyOnInvalidExpr,
+ bool EarlyTypoCorrection) {
bool SawError = false;
while (true) {
if (ExpressionStarts)
@@ -3369,6 +3408,9 @@ bool Parser::ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
} else
Expr = ParseAssignmentExpression();
+ if (EarlyTypoCorrection)
+ Expr = Actions.CorrectDelayedTyposInExpr(Expr);
+
if (Tok.is(tok::ellipsis))
Expr = Actions.ActOnPackExpansion(Expr.get(), ConsumeToken());
else if (Tok.is(tok::code_completion)) {
@@ -3382,8 +3424,10 @@ bool Parser::ParseExpressionList(SmallVectorImpl<Expr *> &Exprs,
break;
}
if (Expr.isInvalid()) {
- SkipUntil(tok::comma, tok::r_paren, StopBeforeMatch);
SawError = true;
+ if (FailImmediatelyOnInvalidExpr)
+ break;
+ SkipUntil(tok::comma, tok::r_paren, StopBeforeMatch);
} else {
Exprs.push_back(Expr.get());
}
@@ -3454,7 +3498,8 @@ void Parser::ParseBlockId(SourceLocation CaretLoc) {
ParseSpecifierQualifierList(DS);
// Parse the block-declarator.
- Declarator DeclaratorInfo(DS, DeclaratorContext::BlockLiteral);
+ Declarator DeclaratorInfo(DS, ParsedAttributesView::none(),
+ DeclaratorContext::BlockLiteral);
DeclaratorInfo.setFunctionDefinitionKind(FunctionDefinitionKind::Definition);
ParseDeclarator(DeclaratorInfo);
@@ -3493,7 +3538,8 @@ ExprResult Parser::ParseBlockLiteralExpression() {
// Parse the return type if present.
DeclSpec DS(AttrFactory);
- Declarator ParamInfo(DS, DeclaratorContext::BlockLiteral);
+ Declarator ParamInfo(DS, ParsedAttributesView::none(),
+ DeclaratorContext::BlockLiteral);
ParamInfo.setFunctionDefinitionKind(FunctionDefinitionKind::Definition);
// FIXME: Since the return type isn't actually parsed, it can't be used to
// fill ParamInfo with an initial valid range, so do it manually.
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseExprCXX.cpp b/contrib/llvm-project/clang/lib/Parse/ParseExprCXX.cpp
index 2d38891c723f..9bd89eddb455 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseExprCXX.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseExprCXX.cpp
@@ -453,8 +453,8 @@ bool Parser::ParseOptionalCXXScopeSpecifier(
bool IsCorrectedToColon = false;
bool *CorrectionFlagPtr = ColonIsSacred ? &IsCorrectedToColon : nullptr;
if (Actions.ActOnCXXNestedNameSpecifier(
- getCurScope(), IdInfo, EnteringContext, SS, false,
- CorrectionFlagPtr, OnlyNamespace)) {
+ getCurScope(), IdInfo, EnteringContext, SS, CorrectionFlagPtr,
+ OnlyNamespace)) {
// Identifier is not recognized as a nested name, but we can have
// mistyped '::' instead of ':'.
if (CorrectionFlagPtr && IsCorrectedToColon) {
@@ -1248,7 +1248,7 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
// Parse lambda-declarator[opt].
DeclSpec DS(AttrFactory);
- Declarator D(DS, DeclaratorContext::LambdaExpr);
+ Declarator D(DS, ParsedAttributesView::none(), DeclaratorContext::LambdaExpr);
TemplateParameterDepthRAII CurTemplateDepthTracker(TemplateParameterDepth);
Actions.PushLambdaScope();
@@ -1355,7 +1355,8 @@ ExprResult Parser::ParseLambdaExpressionAfterIntroducer(
DeclEndLoc = ESpecRange.getEnd();
// Parse attribute-specifier[opt].
- MaybeParseCXX11Attributes(Attr, &DeclEndLoc);
+ if (MaybeParseCXX11Attributes(Attr))
+ DeclEndLoc = Attr.Range.getEnd();
// Parse OpenCL addr space attribute.
if (Tok.isOneOf(tok::kw___private, tok::kw___global, tok::kw___local,
@@ -1522,7 +1523,8 @@ ExprResult Parser::ParseCXXCasts() {
ParseSpecifierQualifierList(DS);
// Parse the abstract-declarator, if present.
- Declarator DeclaratorInfo(DS, DeclaratorContext::TypeName);
+ Declarator DeclaratorInfo(DS, ParsedAttributesView::none(),
+ DeclaratorContext::TypeName);
ParseDeclarator(DeclaratorInfo);
SourceLocation RAngleBracketLoc = Tok.getLocation();
@@ -1848,7 +1850,8 @@ ExprResult Parser::ParseCXXThis() {
/// In C++1z onwards, the type specifier can also be a template-name.
ExprResult
Parser::ParseCXXTypeConstructExpression(const DeclSpec &DS) {
- Declarator DeclaratorInfo(DS, DeclaratorContext::FunctionalCast);
+ Declarator DeclaratorInfo(DS, ParsedAttributesView::none(),
+ DeclaratorContext::FunctionalCast);
ParsedType TypeRep = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo).get();
assert((Tok.is(tok::l_paren) ||
@@ -1912,7 +1915,7 @@ Parser::ParseCXXTypeConstructExpression(const DeclSpec &DS) {
Parser::DeclGroupPtrTy
Parser::ParseAliasDeclarationInInitStatement(DeclaratorContext Context,
- ParsedAttributesWithRange &Attrs) {
+ ParsedAttributes &Attrs) {
assert(Tok.is(tok::kw_using) && "Expected using");
assert((Context == DeclaratorContext::ForInit ||
Context == DeclaratorContext::SelectionInit) &&
@@ -1991,7 +1994,7 @@ Parser::ParseCXXCondition(StmtResult *InitStmt, SourceLocation Loc,
return Sema::ConditionError();
}
- ParsedAttributesWithRange attrs(AttrFactory);
+ ParsedAttributes attrs(AttrFactory);
MaybeParseCXX11Attributes(attrs);
const auto WarnOnInit = [this, &CK] {
@@ -2047,9 +2050,11 @@ Parser::ParseCXXCondition(StmtResult *InitStmt, SourceLocation Loc,
if (Tok.is(tok::kw_using))
DG = ParseAliasDeclarationInInitStatement(
DeclaratorContext::SelectionInit, attrs);
- else
+ else {
+ ParsedAttributes DeclSpecAttrs(AttrFactory);
DG = ParseSimpleDeclaration(DeclaratorContext::SelectionInit, DeclEnd,
- attrs, /*RequireSemi=*/true);
+ attrs, DeclSpecAttrs, /*RequireSemi=*/true);
+ }
*InitStmt = Actions.ActOnDeclStmt(DG, DeclStart, DeclEnd);
return ParseCXXCondition(nullptr, Loc, CK, MissingOK);
}
@@ -2060,8 +2065,9 @@ Parser::ParseCXXCondition(StmtResult *InitStmt, SourceLocation Loc,
// permitted here.
assert(FRI && "should not parse a for range declaration here");
SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
- DeclGroupPtrTy DG = ParseSimpleDeclaration(DeclaratorContext::ForInit,
- DeclEnd, attrs, false, FRI);
+ ParsedAttributes DeclSpecAttrs(AttrFactory);
+ DeclGroupPtrTy DG = ParseSimpleDeclaration(
+ DeclaratorContext::ForInit, DeclEnd, attrs, DeclSpecAttrs, false, FRI);
FRI->LoopVar = Actions.ActOnDeclStmt(DG, DeclStart, Tok.getLocation());
assert((FRI->ColonLoc.isValid() || !DG) &&
"cannot find for range declaration");
@@ -2078,11 +2084,10 @@ Parser::ParseCXXCondition(StmtResult *InitStmt, SourceLocation Loc,
// type-specifier-seq
DeclSpec DS(AttrFactory);
- DS.takeAttributesFrom(attrs);
ParseSpecifierQualifierList(DS, AS_none, DeclSpecContext::DSC_condition);
// declarator
- Declarator DeclaratorInfo(DS, DeclaratorContext::Condition);
+ Declarator DeclaratorInfo(DS, attrs, DeclaratorContext::Condition);
ParseDeclarator(DeclaratorInfo);
// simple-asm-expr[opt]
@@ -2231,6 +2236,9 @@ void Parser::ParseCXXSimpleTypeSpecifier(DeclSpec &DS) {
case tok::kw_void:
DS.SetTypeSpecType(DeclSpec::TST_void, Loc, PrevSpec, DiagID, Policy);
break;
+ case tok::kw_auto:
+ DS.SetTypeSpecType(DeclSpec::TST_auto, Loc, PrevSpec, DiagID, Policy);
+ break;
case tok::kw_char:
DS.SetTypeSpecType(DeclSpec::TST_char, Loc, PrevSpec, DiagID, Policy);
break;
@@ -2732,7 +2740,8 @@ bool Parser::ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext,
// Parse the conversion-declarator, which is merely a sequence of
// ptr-operators.
- Declarator D(DS, DeclaratorContext::ConversionId);
+ Declarator D(DS, ParsedAttributesView::none(),
+ DeclaratorContext::ConversionId);
ParseDeclaratorInternal(D, /*DirectDeclParser=*/nullptr);
// Finish up the type.
@@ -3090,7 +3099,8 @@ Parser::ParseCXXNewExpression(bool UseGlobal, SourceLocation Start) {
SourceRange TypeIdParens;
DeclSpec DS(AttrFactory);
- Declarator DeclaratorInfo(DS, DeclaratorContext::CXXNew);
+ Declarator DeclaratorInfo(DS, ParsedAttributesView::none(),
+ DeclaratorContext::CXXNew);
if (Tok.is(tok::l_paren)) {
// If it turns out to be a placement, we change the type location.
BalancedDelimiterTracker T(*this, tok::l_paren);
@@ -3941,7 +3951,8 @@ Parser::ParseCXXAmbiguousParenExpression(ParenParseOption &ExprType,
if (ParseAs >= CompoundLiteral) {
// Parse the type declarator.
DeclSpec DS(AttrFactory);
- Declarator DeclaratorInfo(DS, DeclaratorContext::TypeName);
+ Declarator DeclaratorInfo(DS, ParsedAttributesView::none(),
+ DeclaratorContext::TypeName);
{
ColonProtectionRAIIObject InnerColonProtection(*this);
ParseSpecifierQualifierList(DS);
@@ -4019,7 +4030,8 @@ ExprResult Parser::ParseBuiltinBitCast() {
ParseSpecifierQualifierList(DS);
// Parse the abstract-declarator, if present.
- Declarator DeclaratorInfo(DS, DeclaratorContext::TypeName);
+ Declarator DeclaratorInfo(DS, ParsedAttributesView::none(),
+ DeclaratorContext::TypeName);
ParseDeclarator(DeclaratorInfo);
if (ExpectAndConsume(tok::comma)) {
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseHLSL.cpp b/contrib/llvm-project/clang/lib/Parse/ParseHLSL.cpp
new file mode 100644
index 000000000000..ed1f81dc4ee8
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Parse/ParseHLSL.cpp
@@ -0,0 +1,42 @@
+//===--- ParseHLSL.cpp - HLSL-specific parsing support --------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the parsing logic for HLSL language features.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Basic/AttributeCommonInfo.h"
+#include "clang/Parse/ParseDiagnostic.h"
+#include "clang/Parse/Parser.h"
+
+using namespace clang;
+
+void Parser::ParseHLSLSemantics(ParsedAttributes &Attrs,
+ SourceLocation *EndLoc) {
+ assert(Tok.is(tok::colon) && "Not a HLSL Semantic");
+ ConsumeToken();
+
+ if (!Tok.is(tok::identifier)) {
+ Diag(Tok.getLocation(), diag::err_expected_semantic_identifier);
+ return;
+ }
+
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ SourceLocation Loc = ConsumeToken();
+ if (EndLoc)
+ *EndLoc = Tok.getLocation();
+ ParsedAttr::Kind AttrKind =
+ ParsedAttr::getParsedKind(II, nullptr, ParsedAttr::AS_HLSLSemantic);
+
+ if (AttrKind == ParsedAttr::UnknownAttribute) {
+ Diag(Loc, diag::err_unknown_hlsl_semantic) << II;
+ return;
+ }
+ Attrs.addNew(II, Loc, nullptr, SourceLocation(), nullptr, 0,
+ ParsedAttr::AS_HLSLSemantic);
+}
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseObjc.cpp b/contrib/llvm-project/clang/lib/Parse/ParseObjc.cpp
index f493ac9b92ca..734c66f65dc2 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseObjc.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseObjc.cpp
@@ -45,8 +45,7 @@ void Parser::MaybeSkipAttributes(tok::ObjCKeywordKind Kind) {
/// [OBJC] objc-protocol-definition
/// [OBJC] objc-method-definition
/// [OBJC] '@' 'end'
-Parser::DeclGroupPtrTy
-Parser::ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs) {
+Parser::DeclGroupPtrTy Parser::ParseObjCAtDirectives(ParsedAttributes &Attrs) {
SourceLocation AtLoc = ConsumeToken(); // the "@"
if (Tok.is(tok::code_completion)) {
@@ -79,7 +78,8 @@ Parser::ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs) {
break;
case tok::objc_import:
if (getLangOpts().Modules || getLangOpts().DebuggerSupport) {
- SingleDecl = ParseModuleImport(AtLoc);
+ Sema::ModuleImportState IS = Sema::ModuleImportState::NotACXX20Module;
+ SingleDecl = ParseModuleImport(AtLoc, IS);
break;
}
Diag(AtLoc, diag::err_atimport);
@@ -283,7 +283,7 @@ Decl *Parser::ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
/*consumeLastToken=*/true))
return nullptr;
- Decl *CategoryType = Actions.ActOnStartCategoryInterface(
+ ObjCCategoryDecl *CategoryType = Actions.ActOnStartCategoryInterface(
AtLoc, nameId, nameLoc, typeParameterList, categoryId, categoryLoc,
ProtocolRefs.data(), ProtocolRefs.size(), ProtocolLocs.data(),
EndProtoLoc, attrs);
@@ -353,7 +353,7 @@ Decl *Parser::ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc,
Actions.ActOnTypedefedProtocols(protocols, protocolLocs,
superClassId, superClassLoc);
- Decl *ClsType = Actions.ActOnStartClassInterface(
+ ObjCInterfaceDecl *ClsType = Actions.ActOnStartClassInterface(
getCurScope(), AtLoc, nameId, nameLoc, typeParameterList, superClassId,
superClassLoc, typeArgs,
SourceRange(typeArgsLAngleLoc, typeArgsRAngleLoc), protocols.data(),
@@ -650,19 +650,21 @@ void Parser::ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey,
if (Tok.is(tok::r_brace))
break;
- ParsedAttributesWithRange attrs(AttrFactory);
+ ParsedAttributes EmptyAttrs(AttrFactory);
// Since we call ParseDeclarationOrFunctionDefinition() instead of
// ParseExternalDeclaration() below (so that this doesn't parse nested
// @interfaces), this needs to duplicate some code from the latter.
if (Tok.isOneOf(tok::kw_static_assert, tok::kw__Static_assert)) {
SourceLocation DeclEnd;
- allTUVariables.push_back(
- ParseDeclaration(DeclaratorContext::File, DeclEnd, attrs));
+ ParsedAttributes EmptyDeclSpecAttrs(AttrFactory);
+ allTUVariables.push_back(ParseDeclaration(
+ DeclaratorContext::File, DeclEnd, EmptyAttrs, EmptyDeclSpecAttrs));
continue;
}
- allTUVariables.push_back(ParseDeclarationOrFunctionDefinition(attrs));
+ allTUVariables.push_back(
+ ParseDeclarationOrFunctionDefinition(EmptyAttrs));
continue;
}
@@ -1225,6 +1227,10 @@ static void takeDeclAttributes(ParsedAttributesView &attrs,
/// declarator and add them to the given list.
static void takeDeclAttributes(ParsedAttributes &attrs,
Declarator &D) {
+ // This gets called only from Parser::ParseObjCTypeName(), and that should
+ // never add declaration attributes to the Declarator.
+ assert(D.getDeclarationAttributes().empty());
+
// First, take ownership of all attributes.
attrs.getPool().takeAllFrom(D.getAttributePool());
attrs.getPool().takeAllFrom(D.getDeclSpec().getAttributePool());
@@ -1268,7 +1274,7 @@ ParsedType Parser::ParseObjCTypeName(ObjCDeclSpec &DS,
if (context == DeclaratorContext::ObjCResult)
dsContext = DeclSpecContext::DSC_objc_method_result;
ParseSpecifierQualifierList(declSpec, AS_none, dsContext);
- Declarator declarator(declSpec, context);
+ Declarator declarator(declSpec, ParsedAttributesView::none(), context);
ParseDeclarator(declarator);
// If that's not invalid, extract a type.
@@ -1487,7 +1493,8 @@ Decl *Parser::ParseObjCMethodDecl(SourceLocation mLoc,
DeclSpec DS(AttrFactory);
ParseDeclarationSpecifiers(DS);
// Parse the declarator.
- Declarator ParmDecl(DS, DeclaratorContext::Prototype);
+ Declarator ParmDecl(DS, ParsedAttributesView::none(),
+ DeclaratorContext::Prototype);
ParseDeclarator(ParmDecl);
IdentifierInfo *ParmII = ParmDecl.getIdentifier();
Decl *Param = Actions.ActOnParamDeclarator(getCurScope(), ParmDecl);
@@ -1693,7 +1700,8 @@ void Parser::parseObjCTypeArgsOrProtocolQualifiers(
typeArg, Actions.getASTContext().getPrintingPolicy());
// Form a declarator to turn this into a type.
- Declarator D(DS, DeclaratorContext::TypeName);
+ Declarator D(DS, ParsedAttributesView::none(),
+ DeclaratorContext::TypeName);
TypeResult fullTypeArg = Actions.ActOnTypeName(getCurScope(), D);
if (fullTypeArg.isUsable()) {
typeArgs.push_back(fullTypeArg.get());
@@ -1864,16 +1872,16 @@ TypeResult Parser::parseObjCTypeArgsAndProtocolQualifiers(
protocolRAngleLoc);
}
-void Parser::HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc,
- BalancedDelimiterTracker &T,
- SmallVectorImpl<Decl *> &AllIvarDecls,
- bool RBraceMissing) {
+void Parser::HelperActionsForIvarDeclarations(
+ ObjCContainerDecl *interfaceDecl, SourceLocation atLoc,
+ BalancedDelimiterTracker &T, SmallVectorImpl<Decl *> &AllIvarDecls,
+ bool RBraceMissing) {
if (!RBraceMissing)
T.consumeClose();
- Actions.ActOnObjCContainerStartDefinition(interfaceDecl);
+ assert(getObjCDeclContext() == interfaceDecl &&
+ "Ivars should have interfaceDecl as their decl context");
Actions.ActOnLastBitfield(T.getCloseLocation(), AllIvarDecls);
- Actions.ActOnObjCContainerFinishDefinition();
// Call ActOnFields() even if we don't have any decls. This is useful
// for code rewriting tools that need to be aware of the empty list.
Actions.ActOnFields(getCurScope(), atLoc, interfaceDecl, AllIvarDecls,
@@ -1902,14 +1910,13 @@ void Parser::HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocatio
/// objc-instance-variable-decl:
/// struct-declaration
///
-void Parser::ParseObjCClassInstanceVariables(Decl *interfaceDecl,
+void Parser::ParseObjCClassInstanceVariables(ObjCContainerDecl *interfaceDecl,
tok::ObjCKeywordKind visibility,
SourceLocation atLoc) {
assert(Tok.is(tok::l_brace) && "expected {");
SmallVector<Decl *, 32> AllIvarDecls;
- ParseScope ClassScope(this, Scope::DeclScope|Scope::ClassScope);
- ObjCDeclContextSwitch ObjCDC(*this);
+ ParseScope ClassScope(this, Scope::DeclScope | Scope::ClassScope);
BalancedDelimiterTracker T(*this, tok::l_brace);
T.consumeOpen();
@@ -1973,13 +1980,13 @@ void Parser::ParseObjCClassInstanceVariables(Decl *interfaceDecl,
}
auto ObjCIvarCallback = [&](ParsingFieldDeclarator &FD) {
- Actions.ActOnObjCContainerStartDefinition(interfaceDecl);
+ assert(getObjCDeclContext() == interfaceDecl &&
+ "Ivar should have interfaceDecl as its decl context");
// Install the declarator into the interface decl.
FD.D.setObjCIvar(true);
Decl *Field = Actions.ActOnIvar(
getCurScope(), FD.D.getDeclSpec().getSourceRange().getBegin(), FD.D,
FD.BitfieldSize, visibility);
- Actions.ActOnObjCContainerFinishDefinition();
if (Field)
AllIvarDecls.push_back(Field);
FD.complete(Field);
@@ -2121,7 +2128,7 @@ Parser::ParseObjCAtImplementationDeclaration(SourceLocation AtLoc,
// We have a class or category name - consume it.
IdentifierInfo *nameId = Tok.getIdentifierInfo();
SourceLocation nameLoc = ConsumeToken(); // consume class or category name
- Decl *ObjCImpDecl = nullptr;
+ ObjCImplDecl *ObjCImpDecl = nullptr;
// Neither a type parameter list nor a list of protocol references is
// permitted here. Parse and diagnose them.
@@ -2216,7 +2223,7 @@ Parser::ParseObjCAtImplementationDeclaration(SourceLocation AtLoc,
{
ObjCImplParsingDataRAII ObjCImplParsing(*this, ObjCImpDecl);
while (!ObjCImplParsing.isFinished() && !isEofOrEom()) {
- ParsedAttributesWithRange attrs(AttrFactory);
+ ParsedAttributes attrs(AttrFactory);
MaybeParseCXX11Attributes(attrs);
if (DeclGroupPtrTy DGP = ParseExternalDeclaration(attrs)) {
DeclGroupRef DG = DGP.get();
@@ -2539,7 +2546,8 @@ StmtResult Parser::ParseObjCTryStmt(SourceLocation atLoc) {
if (Tok.isNot(tok::ellipsis)) {
DeclSpec DS(AttrFactory);
ParseDeclarationSpecifiers(DS);
- Declarator ParmDecl(DS, DeclaratorContext::ObjCCatch);
+ Declarator ParmDecl(DS, ParsedAttributesView::none(),
+ DeclaratorContext::ObjCCatch);
ParseDeclarator(ParmDecl);
// Inform the actions module about the declarator, so it
@@ -2955,7 +2963,8 @@ bool Parser::ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr) {
// We have a class message. Turn the simple-type-specifier or
// typename-specifier we parsed into a type and parse the
// remainder of the class message.
- Declarator DeclaratorInfo(DS, DeclaratorContext::TypeName);
+ Declarator DeclaratorInfo(DS, ParsedAttributesView::none(),
+ DeclaratorContext::TypeName);
TypeResult Type = Actions.ActOnTypeName(getCurScope(), DeclaratorInfo);
if (Type.isInvalid())
return true;
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseOpenMP.cpp b/contrib/llvm-project/clang/lib/Parse/ParseOpenMP.cpp
index 8ad5edb1bcd6..08dccf9e43f7 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseOpenMP.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseOpenMP.cpp
@@ -150,10 +150,12 @@ static OpenMPDirectiveKindExWrapper parseOpenMPDirectiveKind(Parser &P) {
{OMPD_for, OMPD_simd, OMPD_for_simd},
{OMPD_parallel, OMPD_for, OMPD_parallel_for},
{OMPD_parallel_for, OMPD_simd, OMPD_parallel_for_simd},
+ {OMPD_parallel, OMPD_loop, OMPD_parallel_loop},
{OMPD_parallel, OMPD_sections, OMPD_parallel_sections},
{OMPD_taskloop, OMPD_simd, OMPD_taskloop_simd},
{OMPD_target, OMPD_parallel, OMPD_target_parallel},
{OMPD_target, OMPD_simd, OMPD_target_simd},
+ {OMPD_target_parallel, OMPD_loop, OMPD_target_parallel_loop},
{OMPD_target_parallel, OMPD_for, OMPD_target_parallel_for},
{OMPD_target_parallel_for, OMPD_simd, OMPD_target_parallel_for_simd},
{OMPD_teams, OMPD_distribute, OMPD_teams_distribute},
@@ -163,8 +165,10 @@ static OpenMPDirectiveKindExWrapper parseOpenMPDirectiveKind(Parser &P) {
OMPD_teams_distribute_parallel_for},
{OMPD_teams_distribute_parallel_for, OMPD_simd,
OMPD_teams_distribute_parallel_for_simd},
+ {OMPD_teams, OMPD_loop, OMPD_teams_loop},
{OMPD_target, OMPD_teams, OMPD_target_teams},
{OMPD_target_teams, OMPD_distribute, OMPD_target_teams_distribute},
+ {OMPD_target_teams, OMPD_loop, OMPD_target_teams_loop},
{OMPD_target_teams_distribute, OMPD_parallel,
OMPD_target_teams_distribute_parallel},
{OMPD_target_teams_distribute, OMPD_simd,
@@ -174,11 +178,17 @@ static OpenMPDirectiveKindExWrapper parseOpenMPDirectiveKind(Parser &P) {
{OMPD_target_teams_distribute_parallel_for, OMPD_simd,
OMPD_target_teams_distribute_parallel_for_simd},
{OMPD_master, OMPD_taskloop, OMPD_master_taskloop},
+ {OMPD_masked, OMPD_taskloop, OMPD_masked_taskloop},
{OMPD_master_taskloop, OMPD_simd, OMPD_master_taskloop_simd},
+ {OMPD_masked_taskloop, OMPD_simd, OMPD_masked_taskloop_simd},
{OMPD_parallel, OMPD_master, OMPD_parallel_master},
+ {OMPD_parallel, OMPD_masked, OMPD_parallel_masked},
{OMPD_parallel_master, OMPD_taskloop, OMPD_parallel_master_taskloop},
+ {OMPD_parallel_masked, OMPD_taskloop, OMPD_parallel_masked_taskloop},
{OMPD_parallel_master_taskloop, OMPD_simd,
- OMPD_parallel_master_taskloop_simd}};
+ OMPD_parallel_master_taskloop_simd},
+ {OMPD_parallel_masked_taskloop, OMPD_simd,
+ OMPD_parallel_masked_taskloop_simd}};
enum { CancellationPoint = 0, DeclareReduction = 1, TargetData = 2 };
Token Tok = P.getCurToken();
OpenMPDirectiveKindExWrapper DKind =
@@ -635,7 +645,7 @@ TypeResult Parser::parseOpenMPDeclareMapperVarDecl(SourceRange &Range,
// Parse the declarator.
DeclaratorContext Context = DeclaratorContext::Prototype;
- Declarator DeclaratorInfo(DS, Context);
+ Declarator DeclaratorInfo(DS, ParsedAttributesView::none(), Context);
ParseDeclarator(DeclaratorInfo);
Range = DeclaratorInfo.getSourceRange();
if (DeclaratorInfo.getIdentifier() == nullptr) {
@@ -740,7 +750,7 @@ static bool parseDeclareSimdClauses(
OpenMPClauseKind CKind = getOpenMPClauseKind(ClauseName);
if (CKind == OMPC_uniform || CKind == OMPC_aligned ||
CKind == OMPC_linear) {
- Parser::OpenMPVarListDataTy Data;
+ Sema::OpenMPVarListDataTy Data;
SmallVectorImpl<Expr *> *Vars = &Uniforms;
if (CKind == OMPC_aligned) {
Vars = &Aligneds;
@@ -953,6 +963,10 @@ static bool checkExtensionProperty(Parser &P, SourceLocation Loc,
TraitProperty::implementation_extension_allow_templates)
return true;
+ if (TIProperty.Kind ==
+ TraitProperty::implementation_extension_bind_to_declaration)
+ return true;
+
auto IsMatchExtension = [](OMPTraitProperty &TP) {
return (TP.Kind ==
llvm::omp::TraitProperty::implementation_extension_match_all ||
@@ -1433,7 +1447,7 @@ void Parser::ParseOMPDeclareVariantClauses(Parser::DeclGroupPtrTy Ptr,
case OMPC_adjust_args: {
AdjustArgsLoc = Tok.getLocation();
ConsumeToken();
- Parser::OpenMPVarListDataTy Data;
+ Sema::OpenMPVarListDataTy Data;
SmallVector<Expr *> Vars;
IsError = ParseOpenMPVarList(OMPD_declare_variant, OMPC_adjust_args,
Vars, Data);
@@ -1557,7 +1571,7 @@ bool Parser::parseOpenMPAppendArgs(
// Parse the interop-types.
if (Optional<OMPDeclareVariantAttr::InteropType> IType =
parseInteropTypeList(*this))
- InterOpTypes.push_back(IType.getValue());
+ InterOpTypes.push_back(*IType);
else
HasError = true;
@@ -1764,7 +1778,7 @@ void Parser::ParseOpenMPEndAssumesDirective(SourceLocation Loc) {
/// Parsing of simple OpenMP clauses like 'default' or 'proc_bind'.
///
/// default-clause:
-/// 'default' '(' 'none' | 'shared' | 'firstprivate' ')
+/// 'default' '(' 'none' | 'shared' | 'private' | 'firstprivate' ')
///
/// proc_bind-clause:
/// 'proc_bind' '(' 'master' | 'close' | 'spread' ')
@@ -1830,7 +1844,7 @@ void Parser::ParseOMPDeclareTargetClauses(
bool IsIndirectClause = getLangOpts().OpenMP >= 51 &&
getOpenMPClauseKind(ClauseName) == OMPC_indirect;
- if (DTCI.Indirect.hasValue() && IsIndirectClause) {
+ if (DTCI.Indirect && IsIndirectClause) {
Diag(Tok, diag::err_omp_more_one_clause)
<< getOpenMPDirectiveName(OMPD_declare_target)
<< getOpenMPClauseName(OMPC_indirect) << 0;
@@ -1867,7 +1881,7 @@ void Parser::ParseOMPDeclareTargetClauses(
if (IsDeviceTypeClause) {
Optional<SimpleClauseData> DevTypeData =
parseOpenMPSimpleClause(*this, OMPC_device_type);
- if (DevTypeData.hasValue()) {
+ if (DevTypeData) {
if (DeviceTypeLoc.isValid()) {
// We already saw another device_type clause, diagnose it.
Diag(DevTypeData.getValue().Loc,
@@ -1887,7 +1901,7 @@ void Parser::ParseOMPDeclareTargetClauses(
case OMPC_DEVICE_TYPE_unknown:
llvm_unreachable("Unexpected device_type");
}
- DeviceTypeLoc = DevTypeData.getValue().Loc;
+ DeviceTypeLoc = DevTypeData->Loc;
}
continue;
}
@@ -1928,7 +1942,7 @@ void Parser::ParseOMPDeclareTargetClauses(
ConsumeToken();
}
- if (DTCI.Indirect.hasValue() && DTCI.DT != OMPDeclareTargetDeclAttr::DT_Any)
+ if (DTCI.Indirect && DTCI.DT != OMPDeclareTargetDeclAttr::DT_Any)
Diag(DeviceTypeLoc, diag::err_omp_declare_target_indirect_device_type);
// For declare target require at least 'to' or 'link' to be present.
@@ -2022,7 +2036,7 @@ void Parser::ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind BeginDKind,
/// annot_pragma_openmp_end
///
Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
- AccessSpecifier &AS, ParsedAttributesWithRange &Attrs, bool Delayed,
+ AccessSpecifier &AS, ParsedAttributes &Attrs, bool Delayed,
DeclSpec::TST TagType, Decl *Tag) {
assert(Tok.isOneOf(tok::annot_pragma_openmp, tok::annot_attr_openmp) &&
"Not an OpenMP directive!");
@@ -2307,9 +2321,9 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
Sema::DeclareTargetContextInfo DTCI(DKind, DTLoc);
if (HasClauses)
ParseOMPDeclareTargetClauses(DTCI);
- bool HasImplicitMappings =
- DKind == OMPD_begin_declare_target || !HasClauses ||
- (DTCI.ExplicitlyMapped.empty() && DTCI.Indirect.hasValue());
+ bool HasImplicitMappings = DKind == OMPD_begin_declare_target ||
+ !HasClauses ||
+ (DTCI.ExplicitlyMapped.empty() && DTCI.Indirect);
// Skip the last annot_pragma_openmp_end.
ConsumeAnyToken();
@@ -2363,6 +2377,7 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
case OMPD_parallel_for_simd:
case OMPD_parallel_sections:
case OMPD_parallel_master:
+ case OMPD_parallel_masked:
case OMPD_atomic:
case OMPD_target:
case OMPD_teams:
@@ -2379,6 +2394,10 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
case OMPD_master_taskloop_simd:
case OMPD_parallel_master_taskloop:
case OMPD_parallel_master_taskloop_simd:
+ case OMPD_masked_taskloop:
+ case OMPD_masked_taskloop_simd:
+ case OMPD_parallel_masked_taskloop:
+ case OMPD_parallel_masked_taskloop_simd:
case OMPD_distribute:
case OMPD_target_update:
case OMPD_distribute_parallel_for:
@@ -2399,6 +2418,10 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
case OMPD_masked:
case OMPD_metadirective:
case OMPD_loop:
+ case OMPD_teams_loop:
+ case OMPD_target_teams_loop:
+ case OMPD_parallel_loop:
+ case OMPD_target_parallel_loop:
Diag(Tok, diag::err_omp_unexpected_directive)
<< 1 << getOpenMPDirectiveName(DKind);
break;
@@ -2451,9 +2474,8 @@ Parser::DeclGroupPtrTy Parser::ParseOpenMPDeclarativeDirectiveWithExtDecl(
/// for simd' | 'target teams distribute simd' | 'masked' {clause}
/// annot_pragma_openmp_end
///
-StmtResult
-Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
- static bool ReadDirectiveWithinMetadirective = false;
+StmtResult Parser::ParseOpenMPDeclarativeOrExecutableDirective(
+ ParsedStmtContext StmtCtx, bool ReadDirectiveWithinMetadirective) {
if (!ReadDirectiveWithinMetadirective)
assert(Tok.isOneOf(tok::annot_pragma_openmp, tok::annot_attr_openmp) &&
"Not an OpenMP directive!");
@@ -2481,6 +2503,16 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
bool HasAssociatedStatement = true;
switch (DKind) {
+ case OMPD_nothing:
+ if ((StmtCtx & ParsedStmtContext::AllowStandaloneOpenMPDirectives) ==
+ ParsedStmtContext())
+ Diag(Tok, diag::err_omp_immediate_directive)
+ << getOpenMPDirectiveName(DKind) << 0;
+ ConsumeToken();
+ skipUntilPragmaOpenMPEnd(DKind);
+ if (Tok.is(tok::annot_pragma_openmp_end))
+ ConsumeAnnotationToken();
+ break;
case OMPD_metadirective: {
ConsumeToken();
SmallVector<VariantMatchInfo, 4> VMIs;
@@ -2615,9 +2647,9 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
}
// Parse Directive
- ReadDirectiveWithinMetadirective = true;
- Directive = ParseOpenMPDeclarativeOrExecutableDirective(StmtCtx);
- ReadDirectiveWithinMetadirective = false;
+ Directive = ParseOpenMPDeclarativeOrExecutableDirective(
+ StmtCtx,
+ /*ReadDirectiveWithinMetadirective=*/true);
break;
}
break;
@@ -2745,6 +2777,7 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
case OMPD_parallel_for_simd:
case OMPD_parallel_sections:
case OMPD_parallel_master:
+ case OMPD_parallel_masked:
case OMPD_task:
case OMPD_ordered:
case OMPD_atomic:
@@ -2755,12 +2788,20 @@ Parser::ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx) {
case OMPD_target_parallel:
case OMPD_target_parallel_for:
case OMPD_loop:
+ case OMPD_teams_loop:
+ case OMPD_target_teams_loop:
+ case OMPD_parallel_loop:
+ case OMPD_target_parallel_loop:
case OMPD_taskloop:
case OMPD_taskloop_simd:
case OMPD_master_taskloop:
+ case OMPD_masked_taskloop:
case OMPD_master_taskloop_simd:
+ case OMPD_masked_taskloop_simd:
case OMPD_parallel_master_taskloop:
+ case OMPD_parallel_masked_taskloop:
case OMPD_parallel_master_taskloop_simd:
+ case OMPD_parallel_masked_taskloop_simd:
case OMPD_distribute:
case OMPD_distribute_parallel_for:
case OMPD_distribute_parallel_for_simd:
@@ -3097,7 +3138,8 @@ OMPClause *Parser::ParseOpenMPUsesAllocatorClause(OpenMPDirectiveKind DKind) {
/// in_reduction-clause | allocator-clause | allocate-clause |
/// acq_rel-clause | acquire-clause | release-clause | relaxed-clause |
/// depobj-clause | destroy-clause | detach-clause | inclusive-clause |
-/// exclusive-clause | uses_allocators-clause | use_device_addr-clause
+/// exclusive-clause | uses_allocators-clause | use_device_addr-clause |
+/// has_device_addr
///
OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
OpenMPClauseKind CKind, bool FirstClause) {
@@ -3279,6 +3321,7 @@ OMPClause *Parser::ParseOpenMPClause(OpenMPDirectiveKind DKind,
case OMPC_use_device_ptr:
case OMPC_use_device_addr:
case OMPC_is_device_ptr:
+ case OMPC_has_device_addr:
case OMPC_allocate:
case OMPC_nontemporal:
case OMPC_inclusive:
@@ -3573,7 +3616,7 @@ OMPClause *Parser::ParseOpenMPInteropClause(OpenMPClauseKind Kind,
/// Parsing of simple OpenMP clauses like 'default' or 'proc_bind'.
///
/// default-clause:
-/// 'default' '(' 'none' | 'shared' | 'firstprivate' ')'
+/// 'default' '(' 'none' | 'shared' | 'private' | 'firstprivate' ')'
///
/// proc_bind-clause:
/// 'proc_bind' '(' 'master' | 'close' | 'spread' ')'
@@ -3582,7 +3625,8 @@ OMPClause *Parser::ParseOpenMPInteropClause(OpenMPClauseKind Kind,
/// 'bind' '(' 'teams' | 'parallel' | 'thread' ')'
///
/// update-clause:
-/// 'update' '(' 'in' | 'out' | 'inout' | 'mutexinoutset' ')'
+/// 'update' '(' 'in' | 'out' | 'inout' | 'mutexinoutset' |
+/// 'inoutset' ')'
///
OMPClause *Parser::ParseOpenMPSimpleClause(OpenMPClauseKind Kind,
bool ParseOnly) {
@@ -3590,10 +3634,14 @@ OMPClause *Parser::ParseOpenMPSimpleClause(OpenMPClauseKind Kind,
if (!Val || ParseOnly)
return nullptr;
if (getLangOpts().OpenMP < 51 && Kind == OMPC_default &&
- static_cast<DefaultKind>(Val.getValue().Type) ==
- OMP_DEFAULT_firstprivate) {
+ (static_cast<DefaultKind>(Val.getValue().Type) == OMP_DEFAULT_private ||
+ static_cast<DefaultKind>(Val.getValue().Type) ==
+ OMP_DEFAULT_firstprivate)) {
Diag(Val.getValue().LOpen, diag::err_omp_invalid_dsa)
- << getOpenMPClauseName(OMPC_firstprivate)
+ << getOpenMPClauseName(static_cast<DefaultKind>(Val.getValue().Type) ==
+ OMP_DEFAULT_private
+ ? OMPC_private
+ : OMPC_firstprivate)
<< getOpenMPClauseName(OMPC_default) << "5.1";
return nullptr;
}
@@ -3875,7 +3923,7 @@ static OpenMPMapModifierKind isMapModifier(Parser &P) {
}
/// Parse the mapper modifier in map, to, and from clauses.
-bool Parser::parseMapperModifier(OpenMPVarListDataTy &Data) {
+bool Parser::parseMapperModifier(Sema::OpenMPVarListDataTy &Data) {
// Parse '('.
BalancedDelimiterTracker T(*this, tok::l_paren, tok::colon);
if (T.expectAndConsume(diag::err_expected_lparen_after, "mapper")) {
@@ -3907,7 +3955,7 @@ bool Parser::parseMapperModifier(OpenMPVarListDataTy &Data) {
/// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list)
/// where, map-type-modifier ::= always | close | mapper(mapper-identifier) |
/// present
-bool Parser::parseMapTypeModifiers(OpenMPVarListDataTy &Data) {
+bool Parser::parseMapTypeModifiers(Sema::OpenMPVarListDataTy &Data) {
while (getCurToken().isNot(tok::colon)) {
OpenMPMapModifierKind TypeModifier = isMapModifier(*this);
if (TypeModifier == OMPC_MAP_MODIFIER_always ||
@@ -3963,7 +4011,7 @@ static OpenMPMapClauseKind isMapType(Parser &P) {
/// Parse map-type in map clause.
/// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list)
/// where, map-type ::= to | from | tofrom | alloc | release | delete
-static void parseMapType(Parser &P, Parser::OpenMPVarListDataTy &Data) {
+static void parseMapType(Parser &P, Sema::OpenMPVarListDataTy &Data) {
Token Tok = P.getCurToken();
if (Tok.is(tok::colon)) {
P.Diag(Tok, diag::err_omp_map_type_missing);
@@ -4082,11 +4130,38 @@ ExprResult Parser::ParseOpenMPIteratorsExpr() {
Data);
}
+bool Parser::ParseOpenMPReservedLocator(OpenMPClauseKind Kind,
+ Sema::OpenMPVarListDataTy &Data,
+ const LangOptions &LangOpts) {
+ // Currently the only reserved locator is 'omp_all_memory' which is only
+ // allowed on a depend clause.
+ if (Kind != OMPC_depend || LangOpts.OpenMP < 51)
+ return false;
+
+ if (Tok.is(tok::identifier) &&
+ Tok.getIdentifierInfo()->isStr("omp_all_memory")) {
+
+ if (Data.ExtraModifier == OMPC_DEPEND_outallmemory ||
+ Data.ExtraModifier == OMPC_DEPEND_inoutallmemory)
+ Diag(Tok, diag::warn_omp_more_one_omp_all_memory);
+ else if (Data.ExtraModifier != OMPC_DEPEND_out &&
+ Data.ExtraModifier != OMPC_DEPEND_inout)
+ Diag(Tok, diag::err_omp_requires_out_inout_depend_type);
+ else
+ Data.ExtraModifier = Data.ExtraModifier == OMPC_DEPEND_out
+ ? OMPC_DEPEND_outallmemory
+ : OMPC_DEPEND_inoutallmemory;
+ ConsumeToken();
+ return true;
+ }
+ return false;
+}
+
/// Parses clauses with list.
bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
OpenMPClauseKind Kind,
SmallVectorImpl<Expr *> &Vars,
- OpenMPVarListDataTy &Data) {
+ Sema::OpenMPVarListDataTy &Data) {
UnqualifiedId UnqualifiedReductionId;
bool InvalidReductionId = false;
bool IsInvalidMapperModifier = false;
@@ -4342,14 +4417,16 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
Tok.isNot(tok::annot_pragma_openmp_end))) {
ParseScope OMPListScope(this, Scope::OpenMPDirectiveScope);
ColonProtectionRAIIObject ColonRAII(*this, MayHaveTail);
- // Parse variable
- ExprResult VarExpr =
- Actions.CorrectDelayedTyposInExpr(ParseAssignmentExpression());
- if (VarExpr.isUsable()) {
- Vars.push_back(VarExpr.get());
- } else {
- SkipUntil(tok::comma, tok::r_paren, tok::annot_pragma_openmp_end,
- StopBeforeMatch);
+ if (!ParseOpenMPReservedLocator(Kind, Data, getLangOpts())) {
+ // Parse variable
+ ExprResult VarExpr =
+ Actions.CorrectDelayedTyposInExpr(ParseAssignmentExpression());
+ if (VarExpr.isUsable()) {
+ Vars.push_back(VarExpr.get());
+ } else {
+ SkipUntil(tok::comma, tok::r_paren, tok::annot_pragma_openmp_end,
+ StopBeforeMatch);
+ }
}
// Skip ',' if any
IsComma = Tok.is(tok::comma);
@@ -4437,6 +4514,8 @@ bool Parser::ParseOpenMPVarList(OpenMPDirectiveKind DKind,
/// 'use_device_addr' '(' list ')'
/// is_device_ptr-clause:
/// 'is_device_ptr' '(' list ')'
+/// has_device_addr-clause:
+/// 'has_device_addr' '(' list ')'
/// allocate-clause:
/// 'allocate' '(' [ allocator ':' ] list ')'
/// nontemporal-clause:
@@ -4456,7 +4535,7 @@ OMPClause *Parser::ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
SourceLocation Loc = Tok.getLocation();
SourceLocation LOpen = ConsumeToken();
SmallVector<Expr *, 4> Vars;
- OpenMPVarListDataTy Data;
+ Sema::OpenMPVarListDataTy Data;
if (ParseOpenMPVarList(DKind, Kind, Vars, Data))
return nullptr;
@@ -4464,10 +4543,5 @@ OMPClause *Parser::ParseOpenMPVarListClause(OpenMPDirectiveKind DKind,
if (ParseOnly)
return nullptr;
OMPVarListLocTy Locs(Loc, LOpen, Data.RLoc);
- return Actions.ActOnOpenMPVarListClause(
- Kind, Vars, Data.DepModOrTailExpr, Locs, Data.ColonLoc,
- Data.ReductionOrMapperIdScopeSpec, Data.ReductionOrMapperId,
- Data.ExtraModifier, Data.MapTypeModifiers, Data.MapTypeModifiersLoc,
- Data.IsMapTypeImplicit, Data.ExtraModifierLoc, Data.MotionModifiers,
- Data.MotionModifiersLoc);
+ return Actions.ActOnOpenMPVarListClause(Kind, Vars, Locs, Data);
}
diff --git a/contrib/llvm-project/clang/lib/Parse/ParsePragma.cpp b/contrib/llvm-project/clang/lib/Parse/ParsePragma.cpp
index 27e850127862..6ca98876b8fc 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParsePragma.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParsePragma.cpp
@@ -255,12 +255,6 @@ struct PragmaMSIntrinsicHandler : public PragmaHandler {
Token &FirstToken) override;
};
-struct PragmaMSOptimizeHandler : public PragmaHandler {
- PragmaMSOptimizeHandler() : PragmaHandler("optimize") {}
- void HandlePragma(Preprocessor &PP, PragmaIntroducer Introducer,
- Token &FirstToken) override;
-};
-
// "\#pragma fenv_access (on)".
struct PragmaMSFenvAccessHandler : public PragmaHandler {
PragmaMSFenvAccessHandler() : PragmaHandler("fenv_access") {}
@@ -445,12 +439,16 @@ void Parser::initializePragmaHandlers() {
PP.AddPragmaHandler(MSCodeSeg.get());
MSSection = std::make_unique<PragmaMSPragma>("section");
PP.AddPragmaHandler(MSSection.get());
+ MSFunction = std::make_unique<PragmaMSPragma>("function");
+ PP.AddPragmaHandler(MSFunction.get());
+ MSAllocText = std::make_unique<PragmaMSPragma>("alloc_text");
+ PP.AddPragmaHandler(MSAllocText.get());
+ MSOptimize = std::make_unique<PragmaMSPragma>("optimize");
+ PP.AddPragmaHandler(MSOptimize.get());
MSRuntimeChecks = std::make_unique<PragmaMSRuntimeChecksHandler>();
PP.AddPragmaHandler(MSRuntimeChecks.get());
MSIntrinsic = std::make_unique<PragmaMSIntrinsicHandler>();
PP.AddPragmaHandler(MSIntrinsic.get());
- MSOptimize = std::make_unique<PragmaMSOptimizeHandler>();
- PP.AddPragmaHandler(MSOptimize.get());
MSFenvAccess = std::make_unique<PragmaMSFenvAccessHandler>();
PP.AddPragmaHandler(MSFenvAccess.get());
}
@@ -554,6 +552,10 @@ void Parser::resetPragmaHandlers() {
MSCodeSeg.reset();
PP.RemovePragmaHandler(MSSection.get());
MSSection.reset();
+ PP.RemovePragmaHandler(MSFunction.get());
+ MSFunction.reset();
+ PP.RemovePragmaHandler(MSAllocText.get());
+ MSAllocText.reset();
PP.RemovePragmaHandler(MSRuntimeChecks.get());
MSRuntimeChecks.reset();
PP.RemovePragmaHandler(MSIntrinsic.get());
@@ -796,7 +798,7 @@ void Parser::HandlePragmaFEnvRound() {
reinterpret_cast<uintptr_t>(Tok.getAnnotationValue()));
SourceLocation PragmaLoc = ConsumeAnnotationToken();
- Actions.setRoundingMode(PragmaLoc, RM);
+ Actions.ActOnPragmaFEnvRound(PragmaLoc, RM);
}
StmtResult Parser::HandlePragmaCaptured()
@@ -906,13 +908,17 @@ void Parser::HandlePragmaMSPragma() {
// Figure out which #pragma we're dealing with. The switch has no default
// because lex shouldn't emit the annotation token for unrecognized pragmas.
typedef bool (Parser::*PragmaHandler)(StringRef, SourceLocation);
- PragmaHandler Handler = llvm::StringSwitch<PragmaHandler>(PragmaName)
- .Case("data_seg", &Parser::HandlePragmaMSSegment)
- .Case("bss_seg", &Parser::HandlePragmaMSSegment)
- .Case("const_seg", &Parser::HandlePragmaMSSegment)
- .Case("code_seg", &Parser::HandlePragmaMSSegment)
- .Case("section", &Parser::HandlePragmaMSSection)
- .Case("init_seg", &Parser::HandlePragmaMSInitSeg);
+ PragmaHandler Handler =
+ llvm::StringSwitch<PragmaHandler>(PragmaName)
+ .Case("data_seg", &Parser::HandlePragmaMSSegment)
+ .Case("bss_seg", &Parser::HandlePragmaMSSegment)
+ .Case("const_seg", &Parser::HandlePragmaMSSegment)
+ .Case("code_seg", &Parser::HandlePragmaMSSegment)
+ .Case("section", &Parser::HandlePragmaMSSection)
+ .Case("init_seg", &Parser::HandlePragmaMSInitSeg)
+ .Case("function", &Parser::HandlePragmaMSFunction)
+ .Case("alloc_text", &Parser::HandlePragmaMSAllocText)
+ .Case("optimize", &Parser::HandlePragmaMSOptimize);
if (!(this->*Handler)(PragmaName, PragmaLocation)) {
// Pragma handling failed, and has been diagnosed. Slurp up the tokens
@@ -1149,6 +1155,65 @@ bool Parser::HandlePragmaMSInitSeg(StringRef PragmaName,
return true;
}
+bool Parser::HandlePragmaMSAllocText(StringRef PragmaName,
+ SourceLocation PragmaLocation) {
+ Token FirstTok = Tok;
+ if (ExpectAndConsume(tok::l_paren, diag::warn_pragma_expected_lparen,
+ PragmaName))
+ return false;
+
+ StringRef Section;
+ if (Tok.is(tok::string_literal)) {
+ ExprResult StringResult = ParseStringLiteralExpression();
+ if (StringResult.isInvalid())
+ return false; // Already diagnosed.
+ StringLiteral *SegmentName = cast<StringLiteral>(StringResult.get());
+ if (SegmentName->getCharByteWidth() != 1) {
+ PP.Diag(PragmaLocation, diag::warn_pragma_expected_non_wide_string)
+ << PragmaName;
+ return false;
+ }
+ Section = SegmentName->getString();
+ } else if (Tok.is(tok::identifier)) {
+ Section = Tok.getIdentifierInfo()->getName();
+ PP.Lex(Tok);
+ } else {
+ PP.Diag(PragmaLocation, diag::warn_pragma_expected_section_name)
+ << PragmaName;
+ return false;
+ }
+
+ if (ExpectAndConsume(tok::comma, diag::warn_pragma_expected_comma,
+ PragmaName))
+ return false;
+
+ SmallVector<std::tuple<IdentifierInfo *, SourceLocation>> Functions;
+ while (true) {
+ if (Tok.isNot(tok::identifier)) {
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_identifier)
+ << PragmaName;
+ return false;
+ }
+
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ Functions.emplace_back(II, Tok.getLocation());
+
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::comma))
+ break;
+ PP.Lex(Tok);
+ }
+
+ if (ExpectAndConsume(tok::r_paren, diag::warn_pragma_expected_rparen,
+ PragmaName) ||
+ ExpectAndConsume(tok::eof, diag::warn_pragma_extra_tokens_at_eol,
+ PragmaName))
+ return false;
+
+ Actions.ActOnPragmaMSAllocText(FirstTok.getLocation(), Section, Functions);
+ return true;
+}
+
namespace {
struct PragmaLoopHintInfo {
Token PragmaName;
@@ -1560,7 +1625,7 @@ getAttributeSubjectRulesRecoveryPointForToken(const Token &Tok) {
/// suggests the possible attribute subject rules in a fix-it together with
/// any other missing tokens.
DiagnosticBuilder createExpectedAttributeSubjectRulesTokenDiagnostic(
- unsigned DiagID, ParsedAttr &Attribute,
+ unsigned DiagID, ParsedAttributes &Attrs,
MissingAttributeSubjectRulesRecoveryPoint Point, Parser &PRef) {
SourceLocation Loc = PRef.getEndOfPreviousToken();
if (Loc.isInvalid())
@@ -1580,25 +1645,38 @@ DiagnosticBuilder createExpectedAttributeSubjectRulesTokenDiagnostic(
SourceRange FixItRange(Loc);
if (EndPoint == MissingAttributeSubjectRulesRecoveryPoint::None) {
// Gather the subject match rules that are supported by the attribute.
- SmallVector<std::pair<attr::SubjectMatchRule, bool>, 4> SubjectMatchRuleSet;
- Attribute.getMatchRules(PRef.getLangOpts(), SubjectMatchRuleSet);
- if (SubjectMatchRuleSet.empty()) {
+ // Add all the possible rules initially.
+ llvm::BitVector IsMatchRuleAvailable(attr::SubjectMatchRule_Last + 1, true);
+ // Remove the ones that are not supported by any of the attributes.
+ for (const ParsedAttr &Attribute : Attrs) {
+ SmallVector<std::pair<attr::SubjectMatchRule, bool>, 4> MatchRules;
+ Attribute.getMatchRules(PRef.getLangOpts(), MatchRules);
+ llvm::BitVector IsSupported(attr::SubjectMatchRule_Last + 1);
+ for (const auto &Rule : MatchRules) {
+ // Ensure that the missing rule is reported in the fix-it only when it's
+ // supported in the current language mode.
+ if (!Rule.second)
+ continue;
+ IsSupported[Rule.first] = true;
+ }
+ IsMatchRuleAvailable &= IsSupported;
+ }
+ if (IsMatchRuleAvailable.count() == 0) {
// FIXME: We can emit a "fix-it" with a subject list placeholder when
// placeholders will be supported by the fix-its.
return Diagnostic;
}
FixIt += "any(";
bool NeedsComma = false;
- for (const auto &I : SubjectMatchRuleSet) {
- // Ensure that the missing rule is reported in the fix-it only when it's
- // supported in the current language mode.
- if (!I.second)
+ for (unsigned I = 0; I <= attr::SubjectMatchRule_Last; I++) {
+ if (!IsMatchRuleAvailable[I])
continue;
if (NeedsComma)
FixIt += ", ";
else
NeedsComma = true;
- FixIt += attr::getSubjectMatchRuleSpelling(I.first);
+ FixIt += attr::getSubjectMatchRuleSpelling(
+ static_cast<attr::SubjectMatchRule>(I));
}
FixIt += ")";
// Check if we need to remove the range
@@ -1668,22 +1746,25 @@ void Parser::HandlePragmaAttribute() {
return SkipToEnd();
}
- if (Tok.isNot(tok::identifier)) {
- Diag(Tok, diag::err_pragma_attribute_expected_attribute_name);
- SkipToEnd();
- return;
- }
- IdentifierInfo *AttrName = Tok.getIdentifierInfo();
- SourceLocation AttrNameLoc = ConsumeToken();
+ // Parse the comma-separated list of attributes.
+ do {
+ if (Tok.isNot(tok::identifier)) {
+ Diag(Tok, diag::err_pragma_attribute_expected_attribute_name);
+ SkipToEnd();
+ return;
+ }
+ IdentifierInfo *AttrName = Tok.getIdentifierInfo();
+ SourceLocation AttrNameLoc = ConsumeToken();
- if (Tok.isNot(tok::l_paren))
- Attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
- ParsedAttr::AS_GNU);
- else
- ParseGNUAttributeArgs(AttrName, AttrNameLoc, Attrs, /*EndLoc=*/nullptr,
- /*ScopeName=*/nullptr,
- /*ScopeLoc=*/SourceLocation(), ParsedAttr::AS_GNU,
- /*Declarator=*/nullptr);
+ if (Tok.isNot(tok::l_paren))
+ Attrs.addNew(AttrName, AttrNameLoc, nullptr, AttrNameLoc, nullptr, 0,
+ ParsedAttr::AS_GNU);
+ else
+ ParseGNUAttributeArgs(AttrName, AttrNameLoc, Attrs, /*EndLoc=*/nullptr,
+ /*ScopeName=*/nullptr,
+ /*ScopeLoc=*/SourceLocation(), ParsedAttr::AS_GNU,
+ /*Declarator=*/nullptr);
+ } while (TryConsumeToken(tok::comma));
if (ExpectAndConsume(tok::r_paren))
return SkipToEnd();
@@ -1721,26 +1802,19 @@ void Parser::HandlePragmaAttribute() {
return;
}
- // Ensure that we don't have more than one attribute.
- if (Attrs.size() > 1) {
- SourceLocation Loc = Attrs[1].getLoc();
- Diag(Loc, diag::err_pragma_attribute_multiple_attributes);
- SkipToEnd();
- return;
- }
-
- ParsedAttr &Attribute = *Attrs.begin();
- if (!Attribute.isSupportedByPragmaAttribute()) {
- Diag(PragmaLoc, diag::err_pragma_attribute_unsupported_attribute)
- << Attribute;
- SkipToEnd();
- return;
+ for (const ParsedAttr &Attribute : Attrs) {
+ if (!Attribute.isSupportedByPragmaAttribute()) {
+ Diag(PragmaLoc, diag::err_pragma_attribute_unsupported_attribute)
+ << Attribute;
+ SkipToEnd();
+ return;
+ }
}
// Parse the subject-list.
if (!TryConsumeToken(tok::comma)) {
createExpectedAttributeSubjectRulesTokenDiagnostic(
- diag::err_expected, Attribute,
+ diag::err_expected, Attrs,
MissingAttributeSubjectRulesRecoveryPoint::Comma, *this)
<< tok::comma;
SkipToEnd();
@@ -1749,7 +1823,7 @@ void Parser::HandlePragmaAttribute() {
if (Tok.isNot(tok::identifier)) {
createExpectedAttributeSubjectRulesTokenDiagnostic(
- diag::err_pragma_attribute_invalid_subject_set_specifier, Attribute,
+ diag::err_pragma_attribute_invalid_subject_set_specifier, Attrs,
MissingAttributeSubjectRulesRecoveryPoint::ApplyTo, *this);
SkipToEnd();
return;
@@ -1757,7 +1831,7 @@ void Parser::HandlePragmaAttribute() {
const IdentifierInfo *II = Tok.getIdentifierInfo();
if (!II->isStr("apply_to")) {
createExpectedAttributeSubjectRulesTokenDiagnostic(
- diag::err_pragma_attribute_invalid_subject_set_specifier, Attribute,
+ diag::err_pragma_attribute_invalid_subject_set_specifier, Attrs,
MissingAttributeSubjectRulesRecoveryPoint::ApplyTo, *this);
SkipToEnd();
return;
@@ -1766,7 +1840,7 @@ void Parser::HandlePragmaAttribute() {
if (!TryConsumeToken(tok::equal)) {
createExpectedAttributeSubjectRulesTokenDiagnostic(
- diag::err_expected, Attribute,
+ diag::err_expected, Attrs,
MissingAttributeSubjectRulesRecoveryPoint::Equals, *this)
<< tok::equal;
SkipToEnd();
@@ -1796,8 +1870,10 @@ void Parser::HandlePragmaAttribute() {
if (Info->Action == PragmaAttributeInfo::Push)
Actions.ActOnPragmaAttributeEmptyPush(PragmaLoc, Info->Namespace);
- Actions.ActOnPragmaAttributeAttribute(Attribute, PragmaLoc,
- std::move(SubjectMatchRules));
+ for (ParsedAttr &Attribute : Attrs) {
+ Actions.ActOnPragmaAttributeAttribute(Attribute, PragmaLoc,
+ SubjectMatchRules);
+ }
}
// #pragma GCC visibility comes in two variants:
@@ -2946,14 +3022,6 @@ void PragmaCommentHandler::HandlePragma(Preprocessor &PP,
return;
}
- // On PS4, issue a warning about any pragma comments other than
- // #pragma comment lib.
- if (PP.getTargetInfo().getTriple().isPS4() && Kind != PCK_Lib) {
- PP.Diag(Tok.getLocation(), diag::warn_pragma_comment_ignored)
- << II->getName();
- return;
- }
-
// Read the optional string if present.
PP.Lex(Tok);
std::string ArgumentString;
@@ -3028,12 +3096,13 @@ void PragmaOptimizeHandler::HandlePragma(Preprocessor &PP,
namespace {
/// Used as the annotation value for tok::annot_pragma_fp.
struct TokFPAnnotValue {
- enum FlagKinds { Contract, Reassociate, Exceptions };
+ enum FlagKinds { Contract, Reassociate, Exceptions, EvalMethod };
enum FlagValues { On, Off, Fast };
llvm::Optional<LangOptions::FPModeKind> ContractValue;
llvm::Optional<LangOptions::FPModeKind> ReassociateValue;
llvm::Optional<LangOptions::FPExceptionModeKind> ExceptionsValue;
+ llvm::Optional<LangOptions::FPEvalMethodKind> EvalMethodValue;
};
} // end anonymous namespace
@@ -3060,6 +3129,7 @@ void PragmaFPHandler::HandlePragma(Preprocessor &PP,
.Case("contract", TokFPAnnotValue::Contract)
.Case("reassociate", TokFPAnnotValue::Reassociate)
.Case("exceptions", TokFPAnnotValue::Exceptions)
+ .Case("eval_method", TokFPAnnotValue::EvalMethod)
.Default(None);
if (!FlagKind) {
PP.Diag(Tok.getLocation(), diag::err_pragma_fp_invalid_option)
@@ -3074,8 +3144,11 @@ void PragmaFPHandler::HandlePragma(Preprocessor &PP,
return;
}
PP.Lex(Tok);
+ bool isEvalMethodDouble =
+ Tok.is(tok::kw_double) && FlagKind == TokFPAnnotValue::EvalMethod;
- if (Tok.isNot(tok::identifier)) {
+ // Don't diagnose if we have an eval_metod pragma with "double" kind.
+ if (Tok.isNot(tok::identifier) && !isEvalMethodDouble) {
PP.Diag(Tok.getLocation(), diag::err_pragma_fp_invalid_argument)
<< PP.getSpelling(Tok) << OptionInfo->getName()
<< static_cast<int>(*FlagKind);
@@ -3121,6 +3194,19 @@ void PragmaFPHandler::HandlePragma(Preprocessor &PP,
<< PP.getSpelling(Tok) << OptionInfo->getName() << *FlagKind;
return;
}
+ } else if (FlagKind == TokFPAnnotValue::EvalMethod) {
+ AnnotValue->EvalMethodValue =
+ llvm::StringSwitch<llvm::Optional<LangOptions::FPEvalMethodKind>>(
+ II->getName())
+ .Case("source", LangOptions::FPEvalMethodKind::FEM_Source)
+ .Case("double", LangOptions::FPEvalMethodKind::FEM_Double)
+ .Case("extended", LangOptions::FPEvalMethodKind::FEM_Extended)
+ .Default(llvm::None);
+ if (!AnnotValue->EvalMethodValue) {
+ PP.Diag(Tok.getLocation(), diag::err_pragma_fp_invalid_argument)
+ << PP.getSpelling(Tok) << OptionInfo->getName() << *FlagKind;
+ return;
+ }
}
PP.Lex(Tok);
@@ -3223,6 +3309,9 @@ void Parser::HandlePragmaFP() {
if (AnnotValue->ExceptionsValue)
Actions.ActOnPragmaFPExceptions(Tok.getLocation(),
*AnnotValue->ExceptionsValue);
+ if (AnnotValue->EvalMethodValue)
+ Actions.ActOnPragmaFPEvalMethod(Tok.getLocation(),
+ *AnnotValue->EvalMethodValue);
ConsumeAnnotationToken();
}
@@ -3515,58 +3604,100 @@ void PragmaMSIntrinsicHandler::HandlePragma(Preprocessor &PP,
<< "intrinsic";
}
-// #pragma optimize("gsty", on|off)
-void PragmaMSOptimizeHandler::HandlePragma(Preprocessor &PP,
- PragmaIntroducer Introducer,
- Token &Tok) {
- SourceLocation StartLoc = Tok.getLocation();
- PP.Lex(Tok);
+bool Parser::HandlePragmaMSFunction(StringRef PragmaName,
+ SourceLocation PragmaLocation) {
+ Token FirstTok = Tok;
- if (Tok.isNot(tok::l_paren)) {
- PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_lparen) << "optimize";
- return;
+ if (ExpectAndConsume(tok::l_paren, diag::warn_pragma_expected_lparen,
+ PragmaName))
+ return false;
+
+ bool SuggestIntrinH = !PP.isMacroDefined("__INTRIN_H");
+
+ llvm::SmallVector<StringRef> NoBuiltins;
+ while (Tok.is(tok::identifier)) {
+ IdentifierInfo *II = Tok.getIdentifierInfo();
+ if (!II->getBuiltinID())
+ PP.Diag(Tok.getLocation(), diag::warn_pragma_intrinsic_builtin)
+ << II << SuggestIntrinH;
+ else
+ NoBuiltins.emplace_back(II->getName());
+
+ PP.Lex(Tok);
+ if (Tok.isNot(tok::comma))
+ break;
+ PP.Lex(Tok); // ,
}
- PP.Lex(Tok);
+
+ if (ExpectAndConsume(tok::r_paren, diag::warn_pragma_expected_rparen,
+ PragmaName) ||
+ ExpectAndConsume(tok::eof, diag::warn_pragma_extra_tokens_at_eol,
+ PragmaName))
+ return false;
+
+ Actions.ActOnPragmaMSFunction(FirstTok.getLocation(), NoBuiltins);
+ return true;
+}
+
+// #pragma optimize("gsty", on|off)
+bool Parser::HandlePragmaMSOptimize(StringRef PragmaName,
+ SourceLocation PragmaLocation) {
+ Token FirstTok = Tok;
+ if (ExpectAndConsume(tok::l_paren, diag::warn_pragma_expected_lparen,
+ PragmaName))
+ return false;
if (Tok.isNot(tok::string_literal)) {
- PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_string) << "optimize";
- return;
+ PP.Diag(PragmaLocation, diag::warn_pragma_expected_string) << PragmaName;
+ return false;
}
- // We could syntax check the string but it's probably not worth the effort.
- PP.Lex(Tok);
-
- if (Tok.isNot(tok::comma)) {
- PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_comma) << "optimize";
- return;
+ ExprResult StringResult = ParseStringLiteralExpression();
+ if (StringResult.isInvalid())
+ return false; // Already diagnosed.
+ StringLiteral *OptimizationList = cast<StringLiteral>(StringResult.get());
+ if (OptimizationList->getCharByteWidth() != 1) {
+ PP.Diag(PragmaLocation, diag::warn_pragma_expected_non_wide_string)
+ << PragmaName;
+ return false;
}
- PP.Lex(Tok);
- if (Tok.is(tok::eod) || Tok.is(tok::r_paren)) {
- PP.Diag(Tok.getLocation(), diag::warn_pragma_missing_argument)
- << "optimize" << /*Expected=*/true << "'on' or 'off'";
- return;
+ if (ExpectAndConsume(tok::comma, diag::warn_pragma_expected_comma,
+ PragmaName))
+ return false;
+
+ if (Tok.is(tok::eof) || Tok.is(tok::r_paren)) {
+ PP.Diag(PragmaLocation, diag::warn_pragma_missing_argument)
+ << PragmaName << /*Expected=*/true << "'on' or 'off'";
+ return false;
}
IdentifierInfo *II = Tok.getIdentifierInfo();
if (!II || (!II->isStr("on") && !II->isStr("off"))) {
- PP.Diag(Tok.getLocation(), diag::warn_pragma_invalid_argument)
- << PP.getSpelling(Tok) << "optimize" << /*Expected=*/true
+ PP.Diag(PragmaLocation, diag::warn_pragma_invalid_argument)
+ << PP.getSpelling(Tok) << PragmaName << /*Expected=*/true
<< "'on' or 'off'";
- return;
+ return false;
}
+ bool IsOn = II->isStr("on");
PP.Lex(Tok);
- if (Tok.isNot(tok::r_paren)) {
- PP.Diag(Tok.getLocation(), diag::warn_pragma_expected_rparen) << "optimize";
- return;
- }
- PP.Lex(Tok);
+ if (ExpectAndConsume(tok::r_paren, diag::warn_pragma_expected_rparen,
+ PragmaName))
+ return false;
- if (Tok.isNot(tok::eod)) {
- PP.Diag(Tok.getLocation(), diag::warn_pragma_extra_tokens_at_eol)
- << "optimize";
- return;
+ // TODO: Add support for "sgty"
+ if (!OptimizationList->getString().empty()) {
+ PP.Diag(PragmaLocation, diag::warn_pragma_invalid_argument)
+ << OptimizationList->getString() << PragmaName << /*Expected=*/true
+ << "\"\"";
+ return false;
}
- PP.Diag(StartLoc, diag::warn_pragma_optimize);
+
+ if (ExpectAndConsume(tok::eof, diag::warn_pragma_extra_tokens_at_eol,
+ PragmaName))
+ return false;
+
+ Actions.ActOnPragmaMSOptimize(FirstTok.getLocation(), IsOn);
+ return true;
}
void PragmaForceCUDAHostDeviceHandler::HandlePragma(
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseStmt.cpp b/contrib/llvm-project/clang/lib/Parse/ParseStmt.cpp
index ee07775b6346..1f6c74aeae7e 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseStmt.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseStmt.cpp
@@ -105,15 +105,21 @@ Parser::ParseStatementOrDeclaration(StmtVector &Stmts,
// statement are different from [[]] attributes that follow an __attribute__
// at the start of the statement. Thus, we're not using MaybeParseAttributes
// here because we don't want to allow arbitrary orderings.
- ParsedAttributesWithRange Attrs(AttrFactory);
- MaybeParseCXX11Attributes(Attrs, nullptr, /*MightBeObjCMessageSend*/ true);
+ ParsedAttributes CXX11Attrs(AttrFactory);
+ MaybeParseCXX11Attributes(CXX11Attrs, /*MightBeObjCMessageSend*/ true);
+ ParsedAttributes GNUAttrs(AttrFactory);
if (getLangOpts().OpenCL)
- MaybeParseGNUAttributes(Attrs);
+ MaybeParseGNUAttributes(GNUAttrs);
StmtResult Res = ParseStatementOrDeclarationAfterAttributes(
- Stmts, StmtCtx, TrailingElseLoc, Attrs);
+ Stmts, StmtCtx, TrailingElseLoc, CXX11Attrs, GNUAttrs);
MaybeDestroyTemplateIds();
+ // Attributes that are left should all go on the statement, so concatenate the
+ // two lists.
+ ParsedAttributes Attrs(AttrFactory);
+ takeAndConcatenateAttrs(CXX11Attrs, GNUAttrs, Attrs);
+
assert((Attrs.empty() || Res.isInvalid() || Res.isUsable()) &&
"attributes on empty statement");
@@ -158,7 +164,8 @@ private:
StmtResult Parser::ParseStatementOrDeclarationAfterAttributes(
StmtVector &Stmts, ParsedStmtContext StmtCtx,
- SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs) {
+ SourceLocation *TrailingElseLoc, ParsedAttributes &CXX11Attrs,
+ ParsedAttributes &GNUAttrs) {
const char *SemiError = nullptr;
StmtResult Res;
SourceLocation GNUAttributeLoc;
@@ -184,6 +191,12 @@ Retry:
case tok::identifier: {
Token Next = NextToken();
if (Next.is(tok::colon)) { // C99 6.8.1: labeled-statement
+ // Both C++11 and GNU attributes preceding the label appertain to the
+ // label, so put them in a single list to pass on to
+ // ParseLabeledStatement().
+ ParsedAttributes Attrs(AttrFactory);
+ takeAndConcatenateAttrs(CXX11Attrs, GNUAttrs, Attrs);
+
// identifier ':' statement
return ParseLabeledStatement(Attrs, StmtCtx);
}
@@ -213,25 +226,33 @@ Retry:
}
default: {
+ bool HaveAttrs = !CXX11Attrs.empty() || !GNUAttrs.empty();
+ auto IsStmtAttr = [](ParsedAttr &Attr) { return Attr.isStmtAttr(); };
+ bool AllAttrsAreStmtAttrs = llvm::all_of(CXX11Attrs, IsStmtAttr) &&
+ llvm::all_of(GNUAttrs, IsStmtAttr);
if ((getLangOpts().CPlusPlus || getLangOpts().MicrosoftExt ||
(StmtCtx & ParsedStmtContext::AllowDeclarationsInC) !=
ParsedStmtContext()) &&
- ((GNUAttributeLoc.isValid() &&
- !(!Attrs.empty() &&
- llvm::all_of(
- Attrs, [](ParsedAttr &Attr) { return Attr.isStmtAttr(); }))) ||
+ ((GNUAttributeLoc.isValid() && !(HaveAttrs && AllAttrsAreStmtAttrs)) ||
isDeclarationStatement())) {
SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
DeclGroupPtrTy Decl;
if (GNUAttributeLoc.isValid()) {
DeclStart = GNUAttributeLoc;
- Decl = ParseDeclaration(DeclaratorContext::Block, DeclEnd, Attrs,
- &GNUAttributeLoc);
+ Decl = ParseDeclaration(DeclaratorContext::Block, DeclEnd, CXX11Attrs,
+ GNUAttrs, &GNUAttributeLoc);
} else {
- Decl = ParseDeclaration(DeclaratorContext::Block, DeclEnd, Attrs);
+ Decl = ParseDeclaration(DeclaratorContext::Block, DeclEnd, CXX11Attrs,
+ GNUAttrs);
}
- if (Attrs.Range.getBegin().isValid())
- DeclStart = Attrs.Range.getBegin();
+ if (CXX11Attrs.Range.getBegin().isValid()) {
+ // The caller must guarantee that the CXX11Attrs appear before the
+ // GNUAttrs, and we rely on that here.
+ assert(GNUAttrs.Range.getBegin().isInvalid() ||
+ GNUAttrs.Range.getBegin() > CXX11Attrs.Range.getBegin());
+ DeclStart = CXX11Attrs.Range.getBegin();
+ } else if (GNUAttrs.Range.getBegin().isValid())
+ DeclStart = GNUAttrs.Range.getBegin();
return Actions.ActOnDeclStmt(Decl, DeclStart, DeclEnd);
}
@@ -245,7 +266,7 @@ Retry:
case tok::kw___attribute: {
GNUAttributeLoc = Tok.getLocation();
- ParseGNUAttributes(Attrs);
+ ParseGNUAttributes(GNUAttrs);
goto Retry;
}
@@ -297,10 +318,13 @@ Retry:
break;
case tok::kw_asm: {
- ProhibitAttributes(Attrs);
+ for (const ParsedAttr &AL : CXX11Attrs)
+ Diag(AL.getRange().getBegin(), diag::warn_attribute_ignored) << AL;
+ // Prevent these from being interpreted as statement attributes later on.
+ CXX11Attrs.clear();
+ ProhibitAttributes(GNUAttrs);
bool msAsm = false;
Res = ParseAsmStatement(msAsm);
- Res = Actions.ActOnFinishFullStmt(Res.get());
if (msAsm) return Res;
SemiError = "asm";
break;
@@ -308,7 +332,8 @@ Retry:
case tok::kw___if_exists:
case tok::kw___if_not_exists:
- ProhibitAttributes(Attrs);
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
ParseMicrosoftIfExistsStatement(Stmts);
// An __if_exists block is like a compound statement, but it doesn't create
// a new scope.
@@ -318,7 +343,8 @@ Retry:
return ParseCXXTryBlock();
case tok::kw___try:
- ProhibitAttributes(Attrs); // TODO: is it correct?
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
return ParseSEHTryBlock();
case tok::kw___leave:
@@ -327,55 +353,65 @@ Retry:
break;
case tok::annot_pragma_vis:
- ProhibitAttributes(Attrs);
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
HandlePragmaVisibility();
return StmtEmpty();
case tok::annot_pragma_pack:
- ProhibitAttributes(Attrs);
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
HandlePragmaPack();
return StmtEmpty();
case tok::annot_pragma_msstruct:
- ProhibitAttributes(Attrs);
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
HandlePragmaMSStruct();
return StmtEmpty();
case tok::annot_pragma_align:
- ProhibitAttributes(Attrs);
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
HandlePragmaAlign();
return StmtEmpty();
case tok::annot_pragma_weak:
- ProhibitAttributes(Attrs);
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
HandlePragmaWeak();
return StmtEmpty();
case tok::annot_pragma_weakalias:
- ProhibitAttributes(Attrs);
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
HandlePragmaWeakAlias();
return StmtEmpty();
case tok::annot_pragma_redefine_extname:
- ProhibitAttributes(Attrs);
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
HandlePragmaRedefineExtname();
return StmtEmpty();
case tok::annot_pragma_fp_contract:
- ProhibitAttributes(Attrs);
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
Diag(Tok, diag::err_pragma_file_or_compound_scope) << "fp_contract";
ConsumeAnnotationToken();
return StmtError();
case tok::annot_pragma_fp:
- ProhibitAttributes(Attrs);
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
Diag(Tok, diag::err_pragma_file_or_compound_scope) << "clang fp";
ConsumeAnnotationToken();
return StmtError();
case tok::annot_pragma_fenv_access:
case tok::annot_pragma_fenv_access_ms:
- ProhibitAttributes(Attrs);
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
Diag(Tok, diag::err_pragma_file_or_compound_scope)
<< (Kind == tok::annot_pragma_fenv_access ? "STDC FENV_ACCESS"
: "fenv_access");
@@ -383,53 +419,62 @@ Retry:
return StmtEmpty();
case tok::annot_pragma_fenv_round:
- ProhibitAttributes(Attrs);
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
Diag(Tok, diag::err_pragma_file_or_compound_scope) << "STDC FENV_ROUND";
ConsumeAnnotationToken();
return StmtError();
case tok::annot_pragma_float_control:
- ProhibitAttributes(Attrs);
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
Diag(Tok, diag::err_pragma_file_or_compound_scope) << "float_control";
ConsumeAnnotationToken();
return StmtError();
case tok::annot_pragma_opencl_extension:
- ProhibitAttributes(Attrs);
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
HandlePragmaOpenCLExtension();
return StmtEmpty();
case tok::annot_pragma_captured:
- ProhibitAttributes(Attrs);
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
return HandlePragmaCaptured();
case tok::annot_pragma_openmp:
// Prohibit attributes that are not OpenMP attributes, but only before
// processing a #pragma omp clause.
- ProhibitAttributes(Attrs);
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
LLVM_FALLTHROUGH;
case tok::annot_attr_openmp:
// Do not prohibit attributes if they were OpenMP attributes.
return ParseOpenMPDeclarativeOrExecutableDirective(StmtCtx);
case tok::annot_pragma_ms_pointers_to_members:
- ProhibitAttributes(Attrs);
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
HandlePragmaMSPointersToMembers();
return StmtEmpty();
case tok::annot_pragma_ms_pragma:
- ProhibitAttributes(Attrs);
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
HandlePragmaMSPragma();
return StmtEmpty();
case tok::annot_pragma_ms_vtordisp:
- ProhibitAttributes(Attrs);
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
HandlePragmaMSVtorDisp();
return StmtEmpty();
case tok::annot_pragma_loop_hint:
- ProhibitAttributes(Attrs);
- return ParsePragmaLoopHint(Stmts, StmtCtx, TrailingElseLoc, Attrs);
+ ProhibitAttributes(CXX11Attrs);
+ ProhibitAttributes(GNUAttrs);
+ return ParsePragmaLoopHint(Stmts, StmtCtx, TrailingElseLoc, CXX11Attrs);
case tok::annot_pragma_dump:
HandlePragmaDump();
@@ -624,7 +669,7 @@ StmtResult Parser::ParseSEHLeaveStatement() {
/// identifier ':' statement
/// [GNU] identifier ':' attributes[opt] statement
///
-StmtResult Parser::ParseLabeledStatement(ParsedAttributesWithRange &attrs,
+StmtResult Parser::ParseLabeledStatement(ParsedAttributes &Attrs,
ParsedStmtContext StmtCtx) {
assert(Tok.is(tok::identifier) && Tok.getIdentifierInfo() &&
"Not an identifier!");
@@ -644,7 +689,7 @@ StmtResult Parser::ParseLabeledStatement(ParsedAttributesWithRange &attrs,
// Read label attributes, if present.
StmtResult SubStmt;
if (Tok.is(tok::kw___attribute)) {
- ParsedAttributesWithRange TempAttrs(AttrFactory);
+ ParsedAttributes TempAttrs(AttrFactory);
ParseGNUAttributes(TempAttrs);
// In C++, GNU attributes only apply to the label if they are followed by a
@@ -655,11 +700,12 @@ StmtResult Parser::ParseLabeledStatement(ParsedAttributesWithRange &attrs,
// and followed by a semicolon, GCC will reject (it appears to parse the
// attributes as part of a statement in that case). That looks like a bug.
if (!getLangOpts().CPlusPlus || Tok.is(tok::semi))
- attrs.takeAllFrom(TempAttrs);
+ Attrs.takeAllFrom(TempAttrs);
else {
StmtVector Stmts;
- SubStmt = ParseStatementOrDeclarationAfterAttributes(Stmts, StmtCtx,
- nullptr, TempAttrs);
+ ParsedAttributes EmptyCXX11Attrs(AttrFactory);
+ SubStmt = ParseStatementOrDeclarationAfterAttributes(
+ Stmts, StmtCtx, nullptr, EmptyCXX11Attrs, TempAttrs);
if (!TempAttrs.empty() && !SubStmt.isInvalid())
SubStmt = Actions.ActOnAttributedStmt(TempAttrs, SubStmt.get());
}
@@ -675,8 +721,8 @@ StmtResult Parser::ParseLabeledStatement(ParsedAttributesWithRange &attrs,
LabelDecl *LD = Actions.LookupOrCreateLabel(IdentTok.getIdentifierInfo(),
IdentTok.getLocation());
- Actions.ProcessDeclAttributeList(Actions.CurScope, LD, attrs);
- attrs.clear();
+ Actions.ProcessDeclAttributeList(Actions.CurScope, LD, Attrs);
+ Attrs.clear();
return Actions.ActOnLabelStmt(IdentTok.getLocation(), LD, ColonLoc,
SubStmt.get());
@@ -1118,9 +1164,8 @@ StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
while (Tok.is(tok::kw___extension__))
ConsumeToken();
- ParsedAttributesWithRange attrs(AttrFactory);
- MaybeParseCXX11Attributes(attrs, nullptr,
- /*MightBeObjCMessageSend*/ true);
+ ParsedAttributes attrs(AttrFactory);
+ MaybeParseCXX11Attributes(attrs, /*MightBeObjCMessageSend*/ true);
// If this is the start of a declaration, parse it as such.
if (isDeclarationStatement()) {
@@ -1129,8 +1174,9 @@ StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
ExtensionRAIIObject O(Diags);
SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
- DeclGroupPtrTy Res =
- ParseDeclaration(DeclaratorContext::Block, DeclEnd, attrs);
+ ParsedAttributes DeclSpecAttrs(AttrFactory);
+ DeclGroupPtrTy Res = ParseDeclaration(DeclaratorContext::Block, DeclEnd,
+ attrs, DeclSpecAttrs);
R = Actions.ActOnDeclStmt(Res, DeclStart, DeclEnd);
} else {
// Otherwise this was a unary __extension__ marker.
@@ -1153,6 +1199,16 @@ StmtResult Parser::ParseCompoundStatementBody(bool isStmtExpr) {
if (R.isUsable())
Stmts.push_back(R.get());
}
+ // Warn the user that using option `-ffp-eval-method=source` on a
+ // 32-bit target and feature `sse` disabled, or using
+ // `pragma clang fp eval_method=source` and feature `sse` disabled, is not
+ // supported.
+ if (!PP.getTargetInfo().supportSourceEvalMethod() &&
+ (PP.getLastFPEvalPragmaLocation().isValid() ||
+ PP.getCurrentFPEvalMethod() ==
+ LangOptions::FPEvalMethodKind::FEM_Source))
+ Diag(Tok.getLocation(),
+ diag::warn_no_support_for_eval_method_source_on_m32);
SourceLocation CloseLoc = Tok.getLocation();
@@ -1915,7 +1971,7 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
return StmtError();
}
- ParsedAttributesWithRange attrs(AttrFactory);
+ ParsedAttributes attrs(AttrFactory);
MaybeParseCXX11Attributes(attrs);
SourceLocation EmptyInitStmtSemiLoc;
@@ -1946,8 +2002,8 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
? FixItHint::CreateInsertion(Loc, "auto &&")
: FixItHint());
- ForRangeInfo.LoopVar = Actions.ActOnCXXForRangeIdentifier(
- getCurScope(), Loc, Name, attrs, attrs.Range.getEnd());
+ ForRangeInfo.LoopVar =
+ Actions.ActOnCXXForRangeIdentifier(getCurScope(), Loc, Name, attrs);
} else if (isForInitDeclaration()) { // for (int X = 4;
ParenBraceBracketBalancer BalancerRAIIObj(*this);
@@ -1966,8 +2022,9 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
ColonProtectionRAIIObject ColonProtection(*this, MightBeForRangeStmt);
SourceLocation DeclStart = Tok.getLocation(), DeclEnd;
+ ParsedAttributes DeclSpecAttrs(AttrFactory);
DG = ParseSimpleDeclaration(
- DeclaratorContext::ForInit, DeclEnd, attrs, false,
+ DeclaratorContext::ForInit, DeclEnd, attrs, DeclSpecAttrs, false,
MightBeForRangeStmt ? &ForRangeInfo : nullptr);
FirstPart = Actions.ActOnDeclStmt(DG, DeclStart, Tok.getLocation());
if (ForRangeInfo.ParsedForRangeDecl()) {
@@ -2096,7 +2153,7 @@ StmtResult Parser::ParseForStatement(SourceLocation *TrailingElseLoc) {
// Enter a break / continue scope, if we didn't already enter one while
// parsing the second part.
- if (!(getCurScope()->getFlags() & Scope::ContinueScope))
+ if (!getCurScope()->isContinueScope())
getCurScope()->AddFlags(Scope::BreakScope | Scope::ContinueScope);
// Parse the third part of the for statement.
@@ -2316,9 +2373,9 @@ StmtResult Parser::ParseReturnStatement() {
StmtResult Parser::ParsePragmaLoopHint(StmtVector &Stmts,
ParsedStmtContext StmtCtx,
SourceLocation *TrailingElseLoc,
- ParsedAttributesWithRange &Attrs) {
+ ParsedAttributes &Attrs) {
// Create temporary attribute list.
- ParsedAttributesWithRange TempAttrs(AttrFactory);
+ ParsedAttributes TempAttrs(AttrFactory);
SourceLocation StartLoc = Tok.getLocation();
@@ -2338,8 +2395,9 @@ StmtResult Parser::ParsePragmaLoopHint(StmtVector &Stmts,
// Get the next statement.
MaybeParseCXX11Attributes(Attrs);
+ ParsedAttributes EmptyDeclSpecAttrs(AttrFactory);
StmtResult S = ParseStatementOrDeclarationAfterAttributes(
- Stmts, StmtCtx, TrailingElseLoc, Attrs);
+ Stmts, StmtCtx, TrailingElseLoc, Attrs, EmptyDeclSpecAttrs);
Attrs.takeAllFrom(TempAttrs);
@@ -2571,16 +2629,15 @@ StmtResult Parser::ParseCXXCatchBlock(bool FnCatch) {
// without default arguments.
Decl *ExceptionDecl = nullptr;
if (Tok.isNot(tok::ellipsis)) {
- ParsedAttributesWithRange Attributes(AttrFactory);
+ ParsedAttributes Attributes(AttrFactory);
MaybeParseCXX11Attributes(Attributes);
DeclSpec DS(AttrFactory);
- DS.takeAttributesFrom(Attributes);
if (ParseCXXTypeSpecifierSeq(DS))
return StmtError();
- Declarator ExDecl(DS, DeclaratorContext::CXXCatch);
+ Declarator ExDecl(DS, Attributes, DeclaratorContext::CXXCatch);
ParseDeclarator(ExDecl);
ExceptionDecl = Actions.ActOnExceptionDeclarator(getCurScope(), ExDecl);
} else
diff --git a/contrib/llvm-project/clang/lib/Parse/ParseTemplate.cpp b/contrib/llvm-project/clang/lib/Parse/ParseTemplate.cpp
index f875e3bf43e8..e32ea6003b84 100644
--- a/contrib/llvm-project/clang/lib/Parse/ParseTemplate.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/ParseTemplate.cpp
@@ -19,6 +19,7 @@
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
+#include "clang/Sema/SemaDiagnostic.h"
#include "llvm/Support/TimeProfiler.h"
using namespace clang;
@@ -199,12 +200,15 @@ Decl *Parser::ParseSingleDeclarationAfterTemplate(
if (Context == DeclaratorContext::Member) {
// We are parsing a member template.
- ParseCXXClassMemberDeclaration(AS, AccessAttrs, TemplateInfo,
- &DiagsFromTParams);
- return nullptr;
+ DeclGroupPtrTy D = ParseCXXClassMemberDeclaration(
+ AS, AccessAttrs, TemplateInfo, &DiagsFromTParams);
+
+ if (!D || !D.get().isSingleDecl())
+ return nullptr;
+ return D.get().getSingleDecl();
}
- ParsedAttributesWithRange prefixAttrs(AttrFactory);
+ ParsedAttributes prefixAttrs(AttrFactory);
MaybeParseCXX11Attributes(prefixAttrs);
if (Tok.is(tok::kw_using)) {
@@ -227,7 +231,7 @@ Decl *Parser::ParseSingleDeclarationAfterTemplate(
DeclEnd = ConsumeToken();
RecordDecl *AnonRecord = nullptr;
Decl *Decl = Actions.ParsedFreeStandingDeclSpec(
- getCurScope(), AS, DS,
+ getCurScope(), AS, DS, ParsedAttributesView::none(),
TemplateInfo.TemplateParams ? *TemplateInfo.TemplateParams
: MultiTemplateParamsArg(),
TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation,
@@ -241,11 +245,10 @@ Decl *Parser::ParseSingleDeclarationAfterTemplate(
// Move the attributes from the prefix into the DS.
if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation)
ProhibitAttributes(prefixAttrs);
- else
- DS.takeAttributesFrom(prefixAttrs);
// Parse the declarator.
- ParsingDeclarator DeclaratorInfo(*this, DS, (DeclaratorContext)Context);
+ ParsingDeclarator DeclaratorInfo(*this, DS, prefixAttrs,
+ (DeclaratorContext)Context);
if (TemplateInfo.TemplateParams)
DeclaratorInfo.setTemplateParameterLists(*TemplateInfo.TemplateParams);
@@ -665,7 +668,8 @@ NamedDecl *Parser::ParseTemplateParameter(unsigned Depth, unsigned Position) {
// probably meant to write the type of a NTTP.
DeclSpec DS(getAttrFactory());
DS.SetTypeSpecError();
- Declarator D(DS, DeclaratorContext::TemplateParam);
+ Declarator D(DS, ParsedAttributesView::none(),
+ DeclaratorContext::TemplateParam);
D.SetIdentifier(nullptr, Tok.getLocation());
D.setInvalidType(true);
NamedDecl *ErrorParam = Actions.ActOnNonTypeTemplateParameter(
@@ -989,7 +993,8 @@ Parser::ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position) {
DeclSpecContext::DSC_template_param);
// Parse this as a typename.
- Declarator ParamDecl(DS, DeclaratorContext::TemplateParam);
+ Declarator ParamDecl(DS, ParsedAttributesView::none(),
+ DeclaratorContext::TemplateParam);
ParseDeclarator(ParamDecl);
if (DS.getTypeSpecType() == DeclSpec::TST_unspecified) {
Diag(Tok.getLocation(), diag::err_expected_template_parameter);
@@ -1007,18 +1012,23 @@ Parser::ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position) {
SourceLocation EqualLoc;
ExprResult DefaultArg;
if (TryConsumeToken(tok::equal, EqualLoc)) {
- // C++ [temp.param]p15:
- // When parsing a default template-argument for a non-type
- // template-parameter, the first non-nested > is taken as the
- // end of the template-parameter-list rather than a greater-than
- // operator.
- GreaterThanIsOperatorScope G(GreaterThanIsOperator, false);
- EnterExpressionEvaluationContext ConstantEvaluated(
- Actions, Sema::ExpressionEvaluationContext::ConstantEvaluated);
-
- DefaultArg = Actions.CorrectDelayedTyposInExpr(ParseAssignmentExpression());
- if (DefaultArg.isInvalid())
+ if (Tok.is(tok::l_paren) && NextToken().is(tok::l_brace)) {
+ Diag(Tok.getLocation(), diag::err_stmt_expr_in_default_arg) << 1;
SkipUntil(tok::comma, tok::greater, StopAtSemi | StopBeforeMatch);
+ } else {
+ // C++ [temp.param]p15:
+ // When parsing a default template-argument for a non-type
+ // template-parameter, the first non-nested > is taken as the
+ // end of the template-parameter-list rather than a greater-than
+ // operator.
+ GreaterThanIsOperatorScope G(GreaterThanIsOperator, false);
+ EnterExpressionEvaluationContext ConstantEvaluated(
+ Actions, Sema::ExpressionEvaluationContext::ConstantEvaluated);
+ DefaultArg =
+ Actions.CorrectDelayedTyposInExpr(ParseAssignmentExpression());
+ if (DefaultArg.isInvalid())
+ SkipUntil(tok::comma, tok::greater, StopAtSemi | StopBeforeMatch);
+ }
}
// Create the parameter.
@@ -1233,8 +1243,6 @@ bool Parser::ParseGreaterThanInTemplateList(SourceLocation LAngleLoc,
/// token that forms the template-id. Otherwise, we will leave the
/// last token in the stream (e.g., so that it can be replaced with an
/// annotation token).
-///
-/// \param NameHint is not required, and merely affects code completion.
bool Parser::ParseTemplateIdAfterTemplateName(bool ConsumeLastToken,
SourceLocation &LAngleLoc,
TemplateArgList &TemplateArgs,
diff --git a/contrib/llvm-project/clang/lib/Parse/Parser.cpp b/contrib/llvm-project/clang/lib/Parse/Parser.cpp
index ffa1e0f027f1..6f63d01bc8ad 100644
--- a/contrib/llvm-project/clang/lib/Parse/Parser.cpp
+++ b/contrib/llvm-project/clang/lib/Parse/Parser.cpp
@@ -581,15 +581,20 @@ void Parser::DestroyTemplateIds() {
/// top-level-declaration-seq[opt] private-module-fragment[opt]
///
/// Note that in C, it is an error if there is no first declaration.
-bool Parser::ParseFirstTopLevelDecl(DeclGroupPtrTy &Result) {
+bool Parser::ParseFirstTopLevelDecl(DeclGroupPtrTy &Result,
+ Sema::ModuleImportState &ImportState) {
Actions.ActOnStartOfTranslationUnit();
+ // For C++20 modules, a module decl must be the first in the TU. We also
+ // need to track module imports.
+ ImportState = Sema::ModuleImportState::FirstDecl;
+ bool NoTopLevelDecls = ParseTopLevelDecl(Result, ImportState);
+
// C11 6.9p1 says translation units must have at least one top-level
// declaration. C++ doesn't have this restriction. We also don't want to
// complain if we have a precompiled header, although technically if the PCH
// is empty we should still emit the (pedantic) diagnostic.
// If the main file is a header, we're only pretending it's a TU; don't warn.
- bool NoTopLevelDecls = ParseTopLevelDecl(Result, true);
if (NoTopLevelDecls && !Actions.getASTContext().getExternalSource() &&
!getLangOpts().CPlusPlus && !getLangOpts().IsHeaderFile)
Diag(diag::ext_empty_translation_unit);
@@ -603,7 +608,8 @@ bool Parser::ParseFirstTopLevelDecl(DeclGroupPtrTy &Result) {
/// top-level-declaration:
/// declaration
/// [C++20] module-import-declaration
-bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl) {
+bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result,
+ Sema::ModuleImportState &ImportState) {
DestroyTemplateIdAnnotationsRAIIObj CleanupRAII(*this);
// Skip over the EOF token, flagging end of previous input for incremental
@@ -647,13 +653,12 @@ bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl) {
case tok::kw_module:
module_decl:
- Result = ParseModuleDecl(IsFirstDecl);
+ Result = ParseModuleDecl(ImportState);
return false;
- // tok::kw_import is handled by ParseExternalDeclaration. (Under the Modules
- // TS, an import can occur within an export block.)
+ case tok::kw_import:
import_decl: {
- Decl *ImportDecl = ParseModuleImport(SourceLocation());
+ Decl *ImportDecl = ParseModuleImport(SourceLocation(), ImportState);
Result = Actions.ConvertDeclToDeclGroup(ImportDecl);
return false;
}
@@ -669,12 +674,14 @@ bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl) {
Actions.ActOnModuleBegin(Tok.getLocation(), reinterpret_cast<Module *>(
Tok.getAnnotationValue()));
ConsumeAnnotationToken();
+ ImportState = Sema::ModuleImportState::NotACXX20Module;
return false;
case tok::annot_module_end:
Actions.ActOnModuleEnd(Tok.getLocation(), reinterpret_cast<Module *>(
Tok.getAnnotationValue()));
ConsumeAnnotationToken();
+ ImportState = Sema::ModuleImportState::NotACXX20Module;
return false;
case tok::eof:
@@ -714,15 +721,28 @@ bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl) {
break;
}
- ParsedAttributesWithRange attrs(AttrFactory);
+ ParsedAttributes attrs(AttrFactory);
MaybeParseCXX11Attributes(attrs);
Result = ParseExternalDeclaration(attrs);
+ // An empty Result might mean a line with ';' or some parsing error, ignore
+ // it.
+ if (Result) {
+ if (ImportState == Sema::ModuleImportState::FirstDecl)
+ // First decl was not modular.
+ ImportState = Sema::ModuleImportState::NotACXX20Module;
+ else if (ImportState == Sema::ModuleImportState::ImportAllowed)
+ // Non-imports disallow further imports.
+ ImportState = Sema::ModuleImportState::ImportFinished;
+ }
return false;
}
/// ParseExternalDeclaration:
///
+/// The `Attrs` that are passed in are C++11 attributes and appertain to the
+/// declaration.
+///
/// external-declaration: [C99 6.9], declaration: [C++ dcl.dcl]
/// function-definition
/// declaration
@@ -747,9 +767,8 @@ bool Parser::ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl) {
///
/// [Modules-TS] module-import-declaration
///
-Parser::DeclGroupPtrTy
-Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
- ParsingDeclSpec *DS) {
+Parser::DeclGroupPtrTy Parser::ParseExternalDeclaration(ParsedAttributes &Attrs,
+ ParsingDeclSpec *DS) {
DestroyTemplateIdAnnotationsRAIIObj CleanupRAII(*this);
ParenBraceBracketBalancer BalancerRAIIObj(*this);
@@ -803,7 +822,7 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
case tok::annot_attr_openmp:
case tok::annot_pragma_openmp: {
AccessSpecifier AS = AS_none;
- return ParseOpenMPDeclarativeDirectiveWithExtDecl(AS, attrs);
+ return ParseOpenMPDeclarativeDirectiveWithExtDecl(AS, Attrs);
}
case tok::annot_pragma_ms_pointers_to_members:
HandlePragmaMSPointersToMembers();
@@ -823,7 +842,7 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
case tok::semi:
// Either a C++11 empty-declaration or attribute-declaration.
SingleDecl =
- Actions.ActOnEmptyDeclaration(getCurScope(), attrs, Tok.getLocation());
+ Actions.ActOnEmptyDeclaration(getCurScope(), Attrs, Tok.getLocation());
ConsumeExtraSemi(OutsideFunction);
break;
case tok::r_brace:
@@ -837,10 +856,10 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
// __extension__ silences extension warnings in the subexpression.
ExtensionRAIIObject O(Diags); // Use RAII to do this.
ConsumeToken();
- return ParseExternalDeclaration(attrs);
+ return ParseExternalDeclaration(Attrs);
}
case tok::kw_asm: {
- ProhibitAttributes(attrs);
+ ProhibitAttributes(Attrs);
SourceLocation StartLoc = Tok.getLocation();
SourceLocation EndLoc;
@@ -865,7 +884,7 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
break;
}
case tok::at:
- return ParseObjCAtDirectives(attrs);
+ return ParseObjCAtDirectives(Attrs);
case tok::minus:
case tok::plus:
if (!getLangOpts().ObjC) {
@@ -887,11 +906,17 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
getCurScope(),
CurParsedObjCImpl ? Sema::PCC_ObjCImplementation : Sema::PCC_Namespace);
return nullptr;
- case tok::kw_import:
- SingleDecl = ParseModuleImport(SourceLocation());
- break;
+ case tok::kw_import: {
+ Sema::ModuleImportState IS = Sema::ModuleImportState::NotACXX20Module;
+ if (getLangOpts().CPlusPlusModules) {
+ llvm_unreachable("not expecting a c++20 import here");
+ ProhibitAttributes(Attrs);
+ }
+ SingleDecl = ParseModuleImport(SourceLocation(), IS);
+ } break;
case tok::kw_export:
if (getLangOpts().CPlusPlusModules || getLangOpts().ModulesTS) {
+ ProhibitAttributes(Attrs);
SingleDecl = ParseExportDeclaration();
break;
}
@@ -907,7 +932,9 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
// A function definition cannot start with any of these keywords.
{
SourceLocation DeclEnd;
- return ParseDeclaration(DeclaratorContext::File, DeclEnd, attrs);
+ ParsedAttributes EmptyDeclSpecAttrs(AttrFactory);
+ return ParseDeclaration(DeclaratorContext::File, DeclEnd, Attrs,
+ EmptyDeclSpecAttrs);
}
case tok::kw_static:
@@ -917,7 +944,9 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
Diag(ConsumeToken(), diag::warn_static_inline_explicit_inst_ignored)
<< 0;
SourceLocation DeclEnd;
- return ParseDeclaration(DeclaratorContext::File, DeclEnd, attrs);
+ ParsedAttributes EmptyDeclSpecAttrs(AttrFactory);
+ return ParseDeclaration(DeclaratorContext::File, DeclEnd, Attrs,
+ EmptyDeclSpecAttrs);
}
goto dont_know;
@@ -928,7 +957,9 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
// Inline namespaces. Allowed as an extension even in C++03.
if (NextKind == tok::kw_namespace) {
SourceLocation DeclEnd;
- return ParseDeclaration(DeclaratorContext::File, DeclEnd, attrs);
+ ParsedAttributes EmptyDeclSpecAttrs(AttrFactory);
+ return ParseDeclaration(DeclaratorContext::File, DeclEnd, Attrs,
+ EmptyDeclSpecAttrs);
}
// Parse (then ignore) 'inline' prior to a template instantiation. This is
@@ -937,7 +968,9 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
Diag(ConsumeToken(), diag::warn_static_inline_explicit_inst_ignored)
<< 1;
SourceLocation DeclEnd;
- return ParseDeclaration(DeclaratorContext::File, DeclEnd, attrs);
+ ParsedAttributes EmptyDeclSpecAttrs(AttrFactory);
+ return ParseDeclaration(DeclaratorContext::File, DeclEnd, Attrs,
+ EmptyDeclSpecAttrs);
}
}
goto dont_know;
@@ -952,7 +985,7 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
diag::ext_extern_template) << SourceRange(ExternLoc, TemplateLoc);
SourceLocation DeclEnd;
return Actions.ConvertDeclToDeclGroup(ParseExplicitInstantiation(
- DeclaratorContext::File, ExternLoc, TemplateLoc, DeclEnd, attrs));
+ DeclaratorContext::File, ExternLoc, TemplateLoc, DeclEnd, Attrs));
}
goto dont_know;
@@ -973,7 +1006,7 @@ Parser::ParseExternalDeclaration(ParsedAttributesWithRange &attrs,
return nullptr;
}
// We can't tell whether this is a function-definition or declaration yet.
- return ParseDeclarationOrFunctionDefinition(attrs, DS);
+ return ParseDeclarationOrFunctionDefinition(Attrs, DS);
}
// This routine returns a DeclGroup, if the thing we parsed only contains a
@@ -1037,10 +1070,8 @@ bool Parser::isStartOfFunctionDefinition(const ParsingDeclarator &Declarator) {
/// [OMP] threadprivate-directive
/// [OMP] allocate-directive [TODO]
///
-Parser::DeclGroupPtrTy
-Parser::ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
- ParsingDeclSpec &DS,
- AccessSpecifier AS) {
+Parser::DeclGroupPtrTy Parser::ParseDeclOrFunctionDefInternal(
+ ParsedAttributes &Attrs, ParsingDeclSpec &DS, AccessSpecifier AS) {
MaybeParseMicrosoftAttributes(DS.getAttributes());
// Parse the common declaration-specifiers piece.
ParseDeclarationSpecifiers(DS, ParsedTemplateInfo(), AS,
@@ -1079,11 +1110,11 @@ Parser::ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
? DS.getTypeSpecTypeLoc().getLocWithOffset(
LengthOfTSTToken(DS.getTypeSpecType()))
: SourceLocation();
- ProhibitAttributes(attrs, CorrectLocationForAttributes);
+ ProhibitAttributes(Attrs, CorrectLocationForAttributes);
ConsumeToken();
RecordDecl *AnonRecord = nullptr;
- Decl *TheDecl = Actions.ParsedFreeStandingDeclSpec(getCurScope(), AS_none,
- DS, AnonRecord);
+ Decl *TheDecl = Actions.ParsedFreeStandingDeclSpec(
+ getCurScope(), AS_none, DS, ParsedAttributesView::none(), AnonRecord);
DS.complete(TheDecl);
if (AnonRecord) {
Decl* decls[] = {AnonRecord, TheDecl};
@@ -1092,8 +1123,6 @@ Parser::ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
return Actions.ConvertDeclToDeclGroup(TheDecl);
}
- DS.takeAttributesFrom(attrs);
-
// ObjC2 allows prefix attributes on class interfaces and protocols.
// FIXME: This still needs better diagnostics. We should only accept
// attributes here, no types, etc.
@@ -1108,6 +1137,7 @@ Parser::ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
}
DS.abort();
+ DS.takeAttributesFrom(Attrs);
const char *PrevSpec = nullptr;
unsigned DiagID;
@@ -1131,19 +1161,18 @@ Parser::ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs,
if (getLangOpts().CPlusPlus && isTokenStringLiteral() &&
DS.getStorageClassSpec() == DeclSpec::SCS_extern &&
DS.getParsedSpecifiers() == DeclSpec::PQ_StorageClassSpecifier) {
+ ProhibitAttributes(Attrs);
Decl *TheDecl = ParseLinkage(DS, DeclaratorContext::File);
return Actions.ConvertDeclToDeclGroup(TheDecl);
}
- return ParseDeclGroup(DS, DeclaratorContext::File);
+ return ParseDeclGroup(DS, DeclaratorContext::File, Attrs);
}
-Parser::DeclGroupPtrTy
-Parser::ParseDeclarationOrFunctionDefinition(ParsedAttributesWithRange &attrs,
- ParsingDeclSpec *DS,
- AccessSpecifier AS) {
+Parser::DeclGroupPtrTy Parser::ParseDeclarationOrFunctionDefinition(
+ ParsedAttributes &Attrs, ParsingDeclSpec *DS, AccessSpecifier AS) {
if (DS) {
- return ParseDeclOrFunctionDefInternal(attrs, *DS, AS);
+ return ParseDeclOrFunctionDefInternal(Attrs, *DS, AS);
} else {
ParsingDeclSpec PDS(*this);
// Must temporarily exit the objective-c container scope for
@@ -1151,7 +1180,7 @@ Parser::ParseDeclarationOrFunctionDefinition(ParsedAttributesWithRange &attrs,
// afterwards.
ObjCDeclContextSwitch ObjCDC(*this);
- return ParseDeclOrFunctionDefInternal(attrs, PDS, AS);
+ return ParseDeclOrFunctionDefInternal(Attrs, PDS, AS);
}
}
@@ -1177,10 +1206,12 @@ Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D,
const DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo();
TemplateParameterDepthRAII CurTemplateDepthTracker(TemplateParameterDepth);
- // If this is C90 and the declspecs were completely missing, fudge in an
+ // If this is C89 and the declspecs were completely missing, fudge in an
// implicit int. We do this here because this is the only place where
// declaration-specifiers are completely optional in the grammar.
- if (getLangOpts().ImplicitInt && D.getDeclSpec().isEmpty()) {
+ if (getLangOpts().isImplicitIntRequired() && D.getDeclSpec().isEmpty()) {
+ Diag(D.getIdentifierLoc(), diag::warn_missing_type_specifier)
+ << D.getDeclSpec().getSourceRange();
const char *PrevSpec;
unsigned DiagID;
const PrintingPolicy &Policy = Actions.getASTContext().getPrintingPolicy();
@@ -1281,6 +1312,41 @@ Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D,
ParseScope BodyScope(this, Scope::FnScope | Scope::DeclScope |
Scope::CompoundStmtScope);
+ // Parse function body eagerly if it is either '= delete;' or '= default;' as
+ // ActOnStartOfFunctionDef needs to know whether the function is deleted.
+ Sema::FnBodyKind BodyKind = Sema::FnBodyKind::Other;
+ SourceLocation KWLoc;
+ if (TryConsumeToken(tok::equal)) {
+ assert(getLangOpts().CPlusPlus && "Only C++ function definitions have '='");
+
+ if (TryConsumeToken(tok::kw_delete, KWLoc)) {
+ Diag(KWLoc, getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_defaulted_deleted_function
+ : diag::ext_defaulted_deleted_function)
+ << 1 /* deleted */;
+ BodyKind = Sema::FnBodyKind::Delete;
+ } else if (TryConsumeToken(tok::kw_default, KWLoc)) {
+ Diag(KWLoc, getLangOpts().CPlusPlus11
+ ? diag::warn_cxx98_compat_defaulted_deleted_function
+ : diag::ext_defaulted_deleted_function)
+ << 0 /* defaulted */;
+ BodyKind = Sema::FnBodyKind::Default;
+ } else {
+ llvm_unreachable("function definition after = not 'delete' or 'default'");
+ }
+
+ if (Tok.is(tok::comma)) {
+ Diag(KWLoc, diag::err_default_delete_in_multiple_declaration)
+ << (BodyKind == Sema::FnBodyKind::Delete);
+ SkipUntil(tok::semi);
+ } else if (ExpectAndConsume(tok::semi, diag::err_expected_after,
+ BodyKind == Sema::FnBodyKind::Delete
+ ? "delete"
+ : "default")) {
+ SkipUntil(tok::semi);
+ }
+ }
+
// Tell the actions module that we have entered a function definition with the
// specified Declarator for the function.
Sema::SkipBodyInfo SkipBody;
@@ -1288,10 +1354,13 @@ Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D,
TemplateInfo.TemplateParams
? *TemplateInfo.TemplateParams
: MultiTemplateParamsArg(),
- &SkipBody);
+ &SkipBody, BodyKind);
if (SkipBody.ShouldSkip) {
- SkipFunctionBody();
+ // Do NOT enter SkipFunctionBody if we already consumed the tokens.
+ if (BodyKind == Sema::FnBodyKind::Other)
+ SkipFunctionBody();
+
return Res;
}
@@ -1302,6 +1371,13 @@ Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D,
// safe because we're always the sole owner.
D.getMutableDeclSpec().abort();
+ if (BodyKind != Sema::FnBodyKind::Other) {
+ Actions.SetFunctionBodyKind(Res, KWLoc, BodyKind);
+ Stmt *GeneratedBody = Res ? Res->getBody() : nullptr;
+ Actions.ActOnFinishFunctionBody(Res, GeneratedBody, false);
+ return Res;
+ }
+
// With abbreviated function templates - we need to explicitly add depth to
// account for the implicit template parameter list induced by the template.
if (auto *Template = dyn_cast_or_null<FunctionTemplateDecl>(Res))
@@ -1311,42 +1387,6 @@ Decl *Parser::ParseFunctionDefinition(ParsingDeclarator &D,
// parameter list was specified.
CurTemplateDepthTracker.addDepth(1);
- if (TryConsumeToken(tok::equal)) {
- assert(getLangOpts().CPlusPlus && "Only C++ function definitions have '='");
-
- bool Delete = false;
- SourceLocation KWLoc;
- if (TryConsumeToken(tok::kw_delete, KWLoc)) {
- Diag(KWLoc, getLangOpts().CPlusPlus11
- ? diag::warn_cxx98_compat_defaulted_deleted_function
- : diag::ext_defaulted_deleted_function)
- << 1 /* deleted */;
- Actions.SetDeclDeleted(Res, KWLoc);
- Delete = true;
- } else if (TryConsumeToken(tok::kw_default, KWLoc)) {
- Diag(KWLoc, getLangOpts().CPlusPlus11
- ? diag::warn_cxx98_compat_defaulted_deleted_function
- : diag::ext_defaulted_deleted_function)
- << 0 /* defaulted */;
- Actions.SetDeclDefaulted(Res, KWLoc);
- } else {
- llvm_unreachable("function definition after = not 'delete' or 'default'");
- }
-
- if (Tok.is(tok::comma)) {
- Diag(KWLoc, diag::err_default_delete_in_multiple_declaration)
- << Delete;
- SkipUntil(tok::semi);
- } else if (ExpectAndConsume(tok::semi, diag::err_expected_after,
- Delete ? "delete" : "default")) {
- SkipUntil(tok::semi);
- }
-
- Stmt *GeneratedBody = Res ? Res->getBody() : nullptr;
- Actions.ActOnFinishFunctionBody(Res, GeneratedBody, false);
- return Res;
- }
-
if (SkipFunctionBodies && (!Res || Actions.canSkipFunctionBody(Res)) &&
trySkippingFunctionBody()) {
BodyScope.Exit();
@@ -1444,7 +1484,8 @@ void Parser::ParseKNRParamDeclarations(Declarator &D) {
}
// Parse the first declarator attached to this declspec.
- Declarator ParmDeclarator(DS, DeclaratorContext::KNRTypeList);
+ Declarator ParmDeclarator(DS, ParsedAttributesView::none(),
+ DeclaratorContext::KNRTypeList);
ParseDeclarator(ParmDeclarator);
// Handle the full declarator list.
@@ -1533,7 +1574,7 @@ ExprResult Parser::ParseAsmStringLiteral(bool ForAsmLabel) {
ExprResult AsmString(ParseStringLiteralExpression());
if (!AsmString.isInvalid()) {
const auto *SL = cast<StringLiteral>(AsmString.get());
- if (!SL->isAscii()) {
+ if (!SL->isOrdinary()) {
Diag(Tok, diag::err_asm_operand_wide_string_literal)
<< SL->isWide()
<< SL->getSourceRange();
@@ -2127,14 +2168,14 @@ SourceLocation Parser::handleUnexpectedCodeCompletionToken() {
PrevTokLocation = Tok.getLocation();
for (Scope *S = getCurScope(); S; S = S->getParent()) {
- if (S->getFlags() & Scope::FnScope) {
+ if (S->isFunctionScope()) {
cutOffParsing();
Actions.CodeCompleteOrdinaryName(getCurScope(),
Sema::PCC_RecoveryInFunction);
return PrevTokLocation;
}
- if (S->getFlags() & Scope::ClassScope) {
+ if (S->isClassScope()) {
cutOffParsing();
Actions.CodeCompleteOrdinaryName(getCurScope(), Sema::PCC_Class);
return PrevTokLocation;
@@ -2269,9 +2310,9 @@ void Parser::ParseMicrosoftIfExistsExternalDeclaration() {
// Parse the declarations.
// FIXME: Support module import within __if_exists?
while (Tok.isNot(tok::r_brace) && !isEofOrEom()) {
- ParsedAttributesWithRange attrs(AttrFactory);
- MaybeParseCXX11Attributes(attrs);
- DeclGroupPtrTy Result = ParseExternalDeclaration(attrs);
+ ParsedAttributes Attrs(AttrFactory);
+ MaybeParseCXX11Attributes(Attrs);
+ DeclGroupPtrTy Result = ParseExternalDeclaration(Attrs);
if (Result && !getCurScope()->getParent())
Actions.getASTConsumer().HandleTopLevelDecl(Result.get());
}
@@ -2291,7 +2332,8 @@ void Parser::ParseMicrosoftIfExistsExternalDeclaration() {
/// attribute-specifier-seq[opt] ';'
/// private-module-fragment: [C++2a]
/// 'module' ':' 'private' ';' top-level-declaration-seq[opt]
-Parser::DeclGroupPtrTy Parser::ParseModuleDecl(bool IsFirstDecl) {
+Parser::DeclGroupPtrTy
+Parser::ParseModuleDecl(Sema::ModuleImportState &ImportState) {
SourceLocation StartLoc = Tok.getLocation();
Sema::ModuleDeclKind MDK = TryConsumeToken(tok::kw_export)
@@ -2311,7 +2353,7 @@ Parser::DeclGroupPtrTy Parser::ParseModuleDecl(bool IsFirstDecl) {
// Parse a global-module-fragment, if present.
if (getLangOpts().CPlusPlusModules && Tok.is(tok::semi)) {
SourceLocation SemiLoc = ConsumeToken();
- if (!IsFirstDecl) {
+ if (ImportState != Sema::ModuleImportState::FirstDecl) {
Diag(StartLoc, diag::err_global_module_introducer_not_at_start)
<< SourceRange(StartLoc, SemiLoc);
return nullptr;
@@ -2320,6 +2362,7 @@ Parser::DeclGroupPtrTy Parser::ParseModuleDecl(bool IsFirstDecl) {
Diag(StartLoc, diag::err_module_fragment_exported)
<< /*global*/0 << FixItHint::CreateRemoval(StartLoc);
}
+ ImportState = Sema::ModuleImportState::GlobalFragment;
return Actions.ActOnGlobalModuleFragmentDecl(ModuleLoc);
}
@@ -2334,52 +2377,56 @@ Parser::DeclGroupPtrTy Parser::ParseModuleDecl(bool IsFirstDecl) {
SourceLocation PrivateLoc = ConsumeToken();
DiagnoseAndSkipCXX11Attributes();
ExpectAndConsumeSemi(diag::err_private_module_fragment_expected_semi);
+ ImportState = Sema::ModuleImportState::PrivateFragment;
return Actions.ActOnPrivateModuleFragmentDecl(ModuleLoc, PrivateLoc);
}
SmallVector<std::pair<IdentifierInfo *, SourceLocation>, 2> Path;
- if (ParseModuleName(ModuleLoc, Path, /*IsImport*/false))
+ if (ParseModuleName(ModuleLoc, Path, /*IsImport*/ false))
return nullptr;
// Parse the optional module-partition.
+ SmallVector<std::pair<IdentifierInfo *, SourceLocation>, 2> Partition;
if (Tok.is(tok::colon)) {
SourceLocation ColonLoc = ConsumeToken();
- SmallVector<std::pair<IdentifierInfo *, SourceLocation>, 2> Partition;
- if (ParseModuleName(ModuleLoc, Partition, /*IsImport*/false))
+ if (!getLangOpts().CPlusPlusModules)
+ Diag(ColonLoc, diag::err_unsupported_module_partition)
+ << SourceRange(ColonLoc, Partition.back().second);
+ // Recover by ignoring the partition name.
+ else if (ParseModuleName(ModuleLoc, Partition, /*IsImport*/ false))
return nullptr;
-
- // FIXME: Support module partition declarations.
- Diag(ColonLoc, diag::err_unsupported_module_partition)
- << SourceRange(ColonLoc, Partition.back().second);
- // Recover by parsing as a non-partition.
}
// We don't support any module attributes yet; just parse them and diagnose.
- ParsedAttributesWithRange Attrs(AttrFactory);
+ ParsedAttributes Attrs(AttrFactory);
MaybeParseCXX11Attributes(Attrs);
- ProhibitCXX11Attributes(Attrs, diag::err_attribute_not_module_attr);
+ ProhibitCXX11Attributes(Attrs, diag::err_attribute_not_module_attr,
+ /*DiagnoseEmptyAttrs=*/false,
+ /*WarnOnUnknownAttrs=*/true);
ExpectAndConsumeSemi(diag::err_module_expected_semi);
- return Actions.ActOnModuleDecl(StartLoc, ModuleLoc, MDK, Path, IsFirstDecl);
+ return Actions.ActOnModuleDecl(StartLoc, ModuleLoc, MDK, Path, Partition,
+ ImportState);
}
/// Parse a module import declaration. This is essentially the same for
-/// Objective-C and the C++ Modules TS, except for the leading '@' (in ObjC)
-/// and the trailing optional attributes (in C++).
+/// Objective-C and C++20 except for the leading '@' (in ObjC) and the
+/// trailing optional attributes (in C++).
///
/// [ObjC] @import declaration:
/// '@' 'import' module-name ';'
/// [ModTS] module-import-declaration:
/// 'import' module-name attribute-specifier-seq[opt] ';'
-/// [C++2a] module-import-declaration:
+/// [C++20] module-import-declaration:
/// 'export'[opt] 'import' module-name
/// attribute-specifier-seq[opt] ';'
/// 'export'[opt] 'import' module-partition
/// attribute-specifier-seq[opt] ';'
/// 'export'[opt] 'import' header-name
/// attribute-specifier-seq[opt] ';'
-Decl *Parser::ParseModuleImport(SourceLocation AtLoc) {
+Decl *Parser::ParseModuleImport(SourceLocation AtLoc,
+ Sema::ModuleImportState &ImportState) {
SourceLocation StartLoc = AtLoc.isInvalid() ? Tok.getLocation() : AtLoc;
SourceLocation ExportLoc;
@@ -2391,9 +2438,10 @@ Decl *Parser::ParseModuleImport(SourceLocation AtLoc) {
bool IsObjCAtImport = Tok.isObjCAtKeyword(tok::objc_import);
SourceLocation ImportLoc = ConsumeToken();
+ // For C++20 modules, we can have "name" or ":Partition name" as valid input.
SmallVector<std::pair<IdentifierInfo *, SourceLocation>, 2> Path;
+ bool IsPartition = false;
Module *HeaderUnit = nullptr;
-
if (Tok.is(tok::header_name)) {
// This is a header import that the preprocessor decided we should skip
// because it was malformed in some way. Parse and ignore it; it's already
@@ -2403,24 +2451,27 @@ Decl *Parser::ParseModuleImport(SourceLocation AtLoc) {
// This is a header import that the preprocessor mapped to a module import.
HeaderUnit = reinterpret_cast<Module *>(Tok.getAnnotationValue());
ConsumeAnnotationToken();
- } else if (getLangOpts().CPlusPlusModules && Tok.is(tok::colon)) {
+ } else if (Tok.is(tok::colon)) {
SourceLocation ColonLoc = ConsumeToken();
- if (ParseModuleName(ImportLoc, Path, /*IsImport*/true))
+ if (!getLangOpts().CPlusPlusModules)
+ Diag(ColonLoc, diag::err_unsupported_module_partition)
+ << SourceRange(ColonLoc, Path.back().second);
+ // Recover by leaving partition empty.
+ else if (ParseModuleName(ColonLoc, Path, /*IsImport*/ true))
return nullptr;
-
- // FIXME: Support module partition import.
- Diag(ColonLoc, diag::err_unsupported_module_partition)
- << SourceRange(ColonLoc, Path.back().second);
- return nullptr;
+ else
+ IsPartition = true;
} else {
- if (ParseModuleName(ImportLoc, Path, /*IsImport*/true))
+ if (ParseModuleName(ImportLoc, Path, /*IsImport*/ true))
return nullptr;
}
- ParsedAttributesWithRange Attrs(AttrFactory);
+ ParsedAttributes Attrs(AttrFactory);
MaybeParseCXX11Attributes(Attrs);
// We don't support any module import attributes yet.
- ProhibitCXX11Attributes(Attrs, diag::err_attribute_not_import_attr);
+ ProhibitCXX11Attributes(Attrs, diag::err_attribute_not_import_attr,
+ /*DiagnoseEmptyAttrs=*/false,
+ /*WarnOnUnknownAttrs=*/true);
if (PP.hadModuleLoaderFatalFailure()) {
// With a fatal failure in the module loader, we abort parsing.
@@ -2428,12 +2479,51 @@ Decl *Parser::ParseModuleImport(SourceLocation AtLoc) {
return nullptr;
}
+ // Diagnose mis-imports.
+ bool SeenError = true;
+ switch (ImportState) {
+ case Sema::ModuleImportState::ImportAllowed:
+ SeenError = false;
+ break;
+ case Sema::ModuleImportState::FirstDecl:
+ case Sema::ModuleImportState::NotACXX20Module:
+ // We can only import a partition within a module purview.
+ if (IsPartition)
+ Diag(ImportLoc, diag::err_partition_import_outside_module);
+ else
+ SeenError = false;
+ break;
+ case Sema::ModuleImportState::GlobalFragment:
+ // We can only have pre-processor directives in the global module
+ // fragment. We cannot import a named modules here, however we have a
+ // header unit import.
+ if (!HeaderUnit || HeaderUnit->Kind != Module::ModuleKind::ModuleHeaderUnit)
+ Diag(ImportLoc, diag::err_import_in_wrong_fragment) << IsPartition << 0;
+ else
+ SeenError = false;
+ break;
+ case Sema::ModuleImportState::ImportFinished:
+ if (getLangOpts().CPlusPlusModules)
+ Diag(ImportLoc, diag::err_import_not_allowed_here);
+ else
+ SeenError = false;
+ break;
+ case Sema::ModuleImportState::PrivateFragment:
+ Diag(ImportLoc, diag::err_import_in_wrong_fragment) << IsPartition << 1;
+ break;
+ }
+ if (SeenError) {
+ ExpectAndConsumeSemi(diag::err_module_expected_semi);
+ return nullptr;
+ }
+
DeclResult Import;
if (HeaderUnit)
Import =
Actions.ActOnModuleImport(StartLoc, ExportLoc, ImportLoc, HeaderUnit);
else if (!Path.empty())
- Import = Actions.ActOnModuleImport(StartLoc, ExportLoc, ImportLoc, Path);
+ Import = Actions.ActOnModuleImport(StartLoc, ExportLoc, ImportLoc, Path,
+ IsPartition);
ExpectAndConsumeSemi(diag::err_module_expected_semi);
if (Import.isInvalid())
return nullptr;
@@ -2442,8 +2532,8 @@ Decl *Parser::ParseModuleImport(SourceLocation AtLoc) {
// the header is parseable. Emit a warning to make the user aware.
if (IsObjCAtImport && AtLoc.isValid()) {
auto &SrcMgr = PP.getSourceManager();
- auto *FE = SrcMgr.getFileEntryForID(SrcMgr.getFileID(AtLoc));
- if (FE && llvm::sys::path::parent_path(FE->getDir()->getName())
+ auto FE = SrcMgr.getFileEntryRefForID(SrcMgr.getFileID(AtLoc));
+ if (FE && llvm::sys::path::parent_path(FE->getDir().getName())
.endswith(".framework"))
Diags.Report(AtLoc, diag::warn_atimport_in_framework_header);
}
diff --git a/contrib/llvm-project/clang/lib/Sema/AnalysisBasedWarnings.cpp b/contrib/llvm-project/clang/lib/Sema/AnalysisBasedWarnings.cpp
index ac5ad52c0b1d..164fea6a449b 100644
--- a/contrib/llvm-project/clang/lib/Sema/AnalysisBasedWarnings.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/AnalysisBasedWarnings.cpp
@@ -1272,7 +1272,7 @@ static void DiagnoseSwitchLabelsFallthrough(Sema &S, AnalysisDeclContext &AC,
for (const CFGBlock *B : llvm::reverse(*Cfg)) {
const Stmt *Label = B->getLabel();
- if (!Label || !isa<SwitchCase>(Label))
+ if (!isa_and_nonnull<SwitchCase>(Label))
continue;
int AnnotatedCnt;
@@ -1844,7 +1844,7 @@ class ThreadSafetyReporter : public clang::threadSafety::ThreadSafetyHandler {
}
}
- void handleInvalidLockExp(StringRef Kind, SourceLocation Loc) override {
+ void handleInvalidLockExp(SourceLocation Loc) override {
PartialDiagnosticAt Warning(Loc, S.PDiag(diag::warn_cannot_resolve_lock)
<< Loc);
Warnings.emplace_back(std::move(Warning), getNotes());
@@ -1922,9 +1922,8 @@ class ThreadSafetyReporter : public clang::threadSafety::ThreadSafetyHandler {
Warnings.emplace_back(std::move(Warning), getNotes(Note));
}
- void handleNoMutexHeld(StringRef Kind, const NamedDecl *D,
- ProtectedOperationKind POK, AccessKind AK,
- SourceLocation Loc) override {
+ void handleNoMutexHeld(const NamedDecl *D, ProtectedOperationKind POK,
+ AccessKind AK, SourceLocation Loc) override {
assert((POK == POK_VarAccess || POK == POK_VarDereference) &&
"Only works for variables");
unsigned DiagID = POK == POK_VarAccess?
diff --git a/contrib/llvm-project/clang/lib/Sema/CodeCompleteConsumer.cpp b/contrib/llvm-project/clang/lib/Sema/CodeCompleteConsumer.cpp
index fefe20941f17..8e8a1be38c0f 100644
--- a/contrib/llvm-project/clang/lib/Sema/CodeCompleteConsumer.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/CodeCompleteConsumer.cpp
@@ -346,6 +346,15 @@ const char *CodeCompletionString::getTypedText() const {
return nullptr;
}
+std::string CodeCompletionString::getAllTypedText() const {
+ std::string Res;
+ for (const Chunk &C : *this)
+ if (C.Kind == CK_TypedText)
+ Res += C.Text;
+
+ return Res;
+}
+
const char *CodeCompletionAllocator::CopyString(const Twine &String) {
SmallString<128> Data;
StringRef Ref = String.toStringRef(Data);
@@ -621,8 +630,7 @@ void PrintingCodeCompleteConsumer::ProcessCodeCompleteResults(
std::stable_sort(Results, Results + NumResults);
if (!Context.getPreferredType().isNull())
- OS << "PREFERRED-TYPE: " << Context.getPreferredType().getAsString()
- << "\n";
+ OS << "PREFERRED-TYPE: " << Context.getPreferredType() << '\n';
StringRef Filter = SemaRef.getPreprocessor().getCodeCompletionFilter();
// Print the completions.
diff --git a/contrib/llvm-project/clang/lib/Sema/IdentifierResolver.cpp b/contrib/llvm-project/clang/lib/Sema/IdentifierResolver.cpp
index 333f4d70986a..9081714c893f 100644
--- a/contrib/llvm-project/clang/lib/Sema/IdentifierResolver.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/IdentifierResolver.cpp
@@ -121,12 +121,14 @@ bool IdentifierResolver::isDeclInScope(Decl *D, DeclContext *Ctx, Scope *S,
// of the controlled statement.
//
assert(S->getParent() && "No TUScope?");
- if (S->getParent()->getFlags() & Scope::ControlScope) {
+ // If the current decl is in a lambda, we shouldn't consider this is a
+ // redefinition as lambda has its own scope.
+ if (S->getParent()->isControlScope() && !S->isFunctionScope()) {
S = S->getParent();
if (S->isDeclScope(D))
return true;
}
- if (S->getFlags() & Scope::FnTryCatchScope)
+ if (S->isFnTryCatchScope())
return S->getParent()->isDeclScope(D);
}
return false;
diff --git a/contrib/llvm-project/clang/lib/Sema/OpenCLBuiltins.td b/contrib/llvm-project/clang/lib/Sema/OpenCLBuiltins.td
index ab3055300572..dc158454556a 100644
--- a/contrib/llvm-project/clang/lib/Sema/OpenCLBuiltins.td
+++ b/contrib/llvm-project/clang/lib/Sema/OpenCLBuiltins.td
@@ -78,10 +78,12 @@ class concatExtension<FunctionExtension Base, string NewExts> {
def NoTypeExt : TypeExtension<"">;
def Fp16TypeExt : TypeExtension<"cl_khr_fp16">;
def Fp64TypeExt : TypeExtension<"cl_khr_fp64">;
+def Atomic64TypeExt : TypeExtension<"cl_khr_int64_base_atomics cl_khr_int64_extended_atomics">;
+def AtomicFp64TypeExt : TypeExtension<"cl_khr_int64_base_atomics cl_khr_int64_extended_atomics cl_khr_fp64">;
// FunctionExtension definitions.
def FuncExtNone : FunctionExtension<"">;
-def FuncExtKhrSubgroups : FunctionExtension<"cl_khr_subgroups">;
+def FuncExtKhrSubgroups : FunctionExtension<"__opencl_subgroup_builtins">;
def FuncExtKhrSubgroupExtendedTypes : FunctionExtension<"cl_khr_subgroup_extended_types">;
def FuncExtKhrSubgroupNonUniformVote : FunctionExtension<"cl_khr_subgroup_non_uniform_vote">;
def FuncExtKhrSubgroupBallot : FunctionExtension<"cl_khr_subgroup_ballot">;
@@ -97,11 +99,10 @@ def FuncExtKhrLocalInt32ExtendedAtomics : FunctionExtension<"cl_khr_local_int32
def FuncExtKhrInt64BaseAtomics : FunctionExtension<"cl_khr_int64_base_atomics">;
def FuncExtKhrInt64ExtendedAtomics : FunctionExtension<"cl_khr_int64_extended_atomics">;
def FuncExtKhrMipmapImage : FunctionExtension<"cl_khr_mipmap_image">;
-def FuncExtKhrMipmapImageReadWrite : FunctionExtension<"cl_khr_mipmap_image __opencl_c_read_write_images">;
def FuncExtKhrMipmapImageWrites : FunctionExtension<"cl_khr_mipmap_image_writes">;
def FuncExtKhrGlMsaaSharing : FunctionExtension<"cl_khr_gl_msaa_sharing">;
-def FuncExtKhrGlMsaaSharingReadWrite : FunctionExtension<"cl_khr_gl_msaa_sharing __opencl_c_read_write_images">;
+def FuncExtOpenCLCDeviceEnqueue : FunctionExtension<"__opencl_c_device_enqueue">;
def FuncExtOpenCLCGenericAddressSpace : FunctionExtension<"__opencl_c_generic_address_space">;
def FuncExtOpenCLCNamedAddressSpaceBuiltins : FunctionExtension<"__opencl_c_named_address_space_builtins">;
def FuncExtOpenCLCPipes : FunctionExtension<"__opencl_c_pipes">;
@@ -132,9 +133,6 @@ def FuncExtFloatAtomicsFp64GenericASMinMax : FunctionExtension<"cl_ext_float_
// Not a real extension, but a workaround to add C++ for OpenCL specific builtins.
def FuncExtOpenCLCxx : FunctionExtension<"__cplusplus">;
-// Multiple extensions
-def FuncExtKhrMipmapWritesAndWrite3d : FunctionExtension<"cl_khr_mipmap_image_writes cl_khr_3d_image_writes">;
-
// Arm extensions.
def ArmIntegerDotProductInt8 : FunctionExtension<"cl_arm_integer_dot_product_int8">;
def ArmIntegerDotProductAccumulateInt8 : FunctionExtension<"cl_arm_integer_dot_product_accumulate_int8">;
@@ -244,7 +242,12 @@ class ImageType<Type _Ty, string _AccessQualifier> :
let IsConst = _Ty.IsConst;
let IsVolatile = _Ty.IsVolatile;
let AddrSpace = _Ty.AddrSpace;
- let Extension = _Ty.Extension;
+ // Add TypeExtensions for writable "image3d_t" and "read_write" image types.
+ let Extension = !cond(
+ !and(!eq(_Ty.Name, "image3d_t"), !eq(_AccessQualifier, "WO")) : TypeExtension<"cl_khr_3d_image_writes">,
+ !and(!eq(_Ty.Name, "image3d_t"), !eq(_AccessQualifier, "RW")) : TypeExtension<"cl_khr_3d_image_writes __opencl_c_read_write_images">,
+ !eq(_AccessQualifier, "RW") : TypeExtension<"__opencl_c_read_write_images">,
+ true : _Ty.Extension);
}
// OpenCL enum type (e.g. memory_scope).
@@ -349,9 +352,22 @@ def Float : Type<"float", QualType<"Context.FloatTy">>;
let Extension = Fp64TypeExt in {
def Double : Type<"double", QualType<"Context.DoubleTy">>;
}
+
+// The half type for builtins that require the cl_khr_fp16 extension.
let Extension = Fp16TypeExt in {
def Half : Type<"half", QualType<"Context.HalfTy">>;
}
+
+// Without the cl_khr_fp16 extension, the half type can only be used to declare
+// a pointer. Define const and non-const pointer types in all address spaces.
+// Use the "__half" alias to allow the TableGen emitter to distinguish the
+// (extensionless) pointee type of these pointer-to-half types from the "half"
+// type defined above that already carries the cl_khr_fp16 extension.
+foreach AS = [PrivateAS, GlobalAS, ConstantAS, LocalAS, GenericAS] in {
+ def "HalfPtr" # AS : PointerType<Type<"__half", QualType<"Context.HalfTy">>, AS>;
+ def "HalfPtrConst" # AS : PointerType<ConstType<Type<"__half", QualType<"Context.HalfTy">>>, AS>;
+}
+
def Size : Type<"size_t", QualType<"Context.getSizeType()">>;
def PtrDiff : Type<"ptrdiff_t", QualType<"Context.getPointerDiffType()">>;
def IntPtr : Type<"intptr_t", QualType<"Context.getIntPtrType()">>;
@@ -389,10 +405,14 @@ def NDRange : TypedefType<"ndrange_t">;
// OpenCL v2.0 s6.13.11: Atomic integer and floating-point types.
def AtomicInt : Type<"atomic_int", QualType<"Context.getAtomicType(Context.IntTy)">>;
def AtomicUInt : Type<"atomic_uint", QualType<"Context.getAtomicType(Context.UnsignedIntTy)">>;
-def AtomicLong : Type<"atomic_long", QualType<"Context.getAtomicType(Context.LongTy)">>;
-def AtomicULong : Type<"atomic_ulong", QualType<"Context.getAtomicType(Context.UnsignedLongTy)">>;
+let Extension = Atomic64TypeExt in {
+ def AtomicLong : Type<"atomic_long", QualType<"Context.getAtomicType(Context.LongTy)">>;
+ def AtomicULong : Type<"atomic_ulong", QualType<"Context.getAtomicType(Context.UnsignedLongTy)">>;
+}
def AtomicFloat : Type<"atomic_float", QualType<"Context.getAtomicType(Context.FloatTy)">>;
-def AtomicDouble : Type<"atomic_double", QualType<"Context.getAtomicType(Context.DoubleTy)">>;
+let Extension = AtomicFp64TypeExt in {
+ def AtomicDouble : Type<"atomic_double", QualType<"Context.getAtomicType(Context.DoubleTy)">>;
+}
def AtomicHalf : Type<"atomic_half", QualType<"Context.getAtomicType(Context.HalfTy)">>;
def AtomicIntPtr : Type<"atomic_intptr_t", QualType<"Context.getAtomicType(Context.getIntPtrType())">>;
def AtomicUIntPtr : Type<"atomic_uintptr_t", QualType<"Context.getAtomicType(Context.getUIntPtrType())">>;
@@ -870,22 +890,22 @@ defm : VloadVstore<[ConstantAS], 0>;
multiclass VloadVstoreHalf<list<AddressSpace> addrspaces, bit defStores> {
foreach AS = addrspaces in {
- def : Builtin<"vload_half", [Float, Size, PointerType<ConstType<Half>, AS>], Attr.Pure>;
+ def : Builtin<"vload_half", [Float, Size, !cast<Type>("HalfPtrConst" # AS)], Attr.Pure>;
foreach VSize = [2, 3, 4, 8, 16] in {
foreach name = ["vload_half" # VSize, "vloada_half" # VSize] in {
- def : Builtin<name, [VectorType<Float, VSize>, Size, PointerType<ConstType<Half>, AS>], Attr.Pure>;
+ def : Builtin<name, [VectorType<Float, VSize>, Size, !cast<Type>("HalfPtrConst" # AS)], Attr.Pure>;
}
}
if defStores then {
foreach rnd = ["", "_rte", "_rtz", "_rtp", "_rtn"] in {
foreach name = ["vstore_half" # rnd] in {
- def : Builtin<name, [Void, Float, Size, PointerType<Half, AS>]>;
- def : Builtin<name, [Void, Double, Size, PointerType<Half, AS>]>;
+ def : Builtin<name, [Void, Float, Size, !cast<Type>("HalfPtr" # AS)]>;
+ def : Builtin<name, [Void, Double, Size, !cast<Type>("HalfPtr" # AS)]>;
}
foreach VSize = [2, 3, 4, 8, 16] in {
foreach name = ["vstore_half" # VSize # rnd, "vstorea_half" # VSize # rnd] in {
- def : Builtin<name, [Void, VectorType<Float, VSize>, Size, PointerType<Half, AS>]>;
- def : Builtin<name, [Void, VectorType<Double, VSize>, Size, PointerType<Half, AS>]>;
+ def : Builtin<name, [Void, VectorType<Float, VSize>, Size, !cast<Type>("HalfPtr" # AS)]>;
+ def : Builtin<name, [Void, VectorType<Double, VSize>, Size, !cast<Type>("HalfPtr" # AS)]>;
}
}
}
@@ -917,7 +937,7 @@ def : Builtin<"write_mem_fence", [Void, MemFenceFlags]>;
// OpenCL v3.0 s6.15.10 - Address Space Qualifier Functions.
// to_global, to_local, to_private are declared in Builtins.def.
-let MinVersion = CL20 in {
+let Extension = FuncExtOpenCLCGenericAddressSpace in {
// The OpenCL 3.0 specification defines these with a "gentype" argument indicating any builtin
// type or user-defined type, which cannot be represented currently. Hence we slightly diverge
// by providing only the following overloads with a void pointer.
@@ -1082,8 +1102,10 @@ multiclass OpenCL2Atomics<AddressSpace addrspace, FunctionExtension BaseExt> {
foreach TypePair = [[AtomicInt, Int], [AtomicUInt, UInt],
[AtomicLong, Long], [AtomicULong, ULong],
[AtomicFloat, Float], [AtomicDouble, Double]] in {
- def : Builtin<"atomic_init",
- [Void, PointerType<VolatileType<TypePair[0]>, addrspace>, TypePair[1]]>;
+ let Extension = BaseExt in {
+ def : Builtin<"atomic_init",
+ [Void, PointerType<VolatileType<TypePair[0]>, addrspace>, TypePair[1]]>;
+ }
defm : BuiltinAtomicExplicit<"atomic_store",
[Void, PointerType<VolatileType<TypePair[0]>, addrspace>, TypePair[1]], BaseExt>;
defm : BuiltinAtomicExplicit<"atomic_load",
@@ -1163,7 +1185,7 @@ let MinVersion = CL20 in {
defvar extension_fp64 = !cast<FunctionExtension>("FuncExtFloatAtomicsFp64" # addrspace # "Add");
defm : BuiltinAtomicExplicit<"atomic_fetch_" # ModOp,
- [Half, PointerType<VolatileType<AtomicFloat>, addrspace>, Half], extension_fp16>;
+ [Half, PointerType<VolatileType<AtomicHalf>, addrspace>, Half], extension_fp16>;
defm : BuiltinAtomicExplicit<"atomic_fetch_" # ModOp,
[Float, PointerType<VolatileType<AtomicFloat>, addrspace>, Float], extension_fp32>;
defm : BuiltinAtomicExplicit<"atomic_fetch_" # ModOp,
@@ -1446,7 +1468,7 @@ let Extension = FuncExtOpenCLCPipes in {
// Defined in Builtins.def
// --- Table 33 ---
-let MinVersion = CL20 in {
+let Extension = FuncExtOpenCLCDeviceEnqueue in {
def : Builtin<"enqueue_marker",
[Int, Queue, UInt, PointerType<ConstType<ClkEvent>, GenericAS>, PointerType<ClkEvent, GenericAS>]>;
@@ -1585,9 +1607,6 @@ multiclass ImageQueryNumMipLevels<string aQual> {
let Extension = FuncExtKhrMipmapImage in {
defm : ImageQueryNumMipLevels<"RO">;
defm : ImageQueryNumMipLevels<"WO">;
-}
-
-let Extension = FuncExtKhrMipmapImageReadWrite in {
defm : ImageQueryNumMipLevels<"RW">;
}
@@ -1617,12 +1636,10 @@ let Extension = FuncExtKhrMipmapImageWrites in {
def : Builtin<"write_imageui", [Void, ImageType<imgTy, aQual>, VectorType<Int, 4>, Int, VectorType<UInt, 4>]>;
}
def : Builtin<"write_imagef", [Void, ImageType<Image2dArrayDepth, aQual>, VectorType<Int, 4>, Int, Float]>;
- let Extension = FuncExtKhrMipmapWritesAndWrite3d in {
- foreach imgTy = [Image3d] in {
- def : Builtin<"write_imagef", [Void, ImageType<imgTy, aQual>, VectorType<Int, 4>, Int, VectorType<Float, 4>]>;
- def : Builtin<"write_imagei", [Void, ImageType<imgTy, aQual>, VectorType<Int, 4>, Int, VectorType<Int, 4>]>;
- def : Builtin<"write_imageui", [Void, ImageType<imgTy, aQual>, VectorType<Int, 4>, Int, VectorType<UInt, 4>]>;
- }
+ foreach imgTy = [Image3d] in {
+ def : Builtin<"write_imagef", [Void, ImageType<imgTy, aQual>, VectorType<Int, 4>, Int, VectorType<Float, 4>]>;
+ def : Builtin<"write_imagei", [Void, ImageType<imgTy, aQual>, VectorType<Int, 4>, Int, VectorType<Int, 4>]>;
+ def : Builtin<"write_imageui", [Void, ImageType<imgTy, aQual>, VectorType<Int, 4>, Int, VectorType<UInt, 4>]>;
}
}
}
@@ -1666,9 +1683,6 @@ let Extension = FuncExtKhrGlMsaaSharing in {
defm : ImageReadMsaa<"RO">;
defm : ImageQueryMsaa<"RO">;
defm : ImageQueryMsaa<"WO">;
-}
-
-let Extension = FuncExtKhrGlMsaaSharingReadWrite in {
defm : ImageReadMsaa<"RW">;
defm : ImageQueryMsaa<"RW">;
}
@@ -1692,7 +1706,9 @@ let Extension = FuncExtKhrSubgroups in {
// --- Table 28.2.2 ---
let Extension = FuncExtKhrSubgroups in {
def : Builtin<"sub_group_barrier", [Void, MemFenceFlags], Attr.Convergent>;
- def : Builtin<"sub_group_barrier", [Void, MemFenceFlags, MemoryScope], Attr.Convergent>;
+ let MinVersion = CL20 in {
+ def : Builtin<"sub_group_barrier", [Void, MemFenceFlags, MemoryScope], Attr.Convergent>;
+ }
}
// --- Table 28.2.4 ---
@@ -1829,6 +1845,12 @@ let Extension = FunctionExtension<"__opencl_c_integer_dot_product_input_4x8bit_p
def : Builtin<"dot_acc_sat_4x8packed_su_int", [Int, UInt, UInt, Int], Attr.Const>;
}
+// Section 48.3 - cl_khr_subgroup_rotate
+let Extension = FunctionExtension<"cl_khr_subgroup_rotate"> in {
+ def : Builtin<"sub_group_rotate", [AGenType1, AGenType1, Int], Attr.Convergent>;
+ def : Builtin<"sub_group_clustered_rotate", [AGenType1, AGenType1, Int, UInt], Attr.Convergent>;
+}
+
//--------------------------------------------------------------------
// Arm extensions.
let Extension = ArmIntegerDotProductInt8 in {
diff --git a/contrib/llvm-project/clang/lib/Sema/ParsedAttr.cpp b/contrib/llvm-project/clang/lib/Sema/ParsedAttr.cpp
index 045847d0ce0f..4b9a694270c5 100644
--- a/contrib/llvm-project/clang/lib/Sema/ParsedAttr.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/ParsedAttr.cpp
@@ -155,6 +155,10 @@ unsigned ParsedAttr::getMaxArgs() const {
return getMinArgs() + getInfo().OptArgs;
}
+unsigned ParsedAttr::getNumArgMembers() const {
+ return getInfo().NumArgMembers;
+}
+
bool ParsedAttr::hasCustomParsing() const {
return getInfo().HasCustomParsing;
}
@@ -208,6 +212,41 @@ bool ParsedAttr::isSupportedByPragmaAttribute() const {
return getInfo().IsSupportedByPragmaAttribute;
}
+bool ParsedAttr::slidesFromDeclToDeclSpecLegacyBehavior() const {
+ assert(isStandardAttributeSyntax());
+
+ // We have historically allowed some type attributes with standard attribute
+ // syntax to slide to the decl-specifier-seq, so we have to keep supporting
+ // it. This property is consciously not defined as a flag in Attr.td because
+ // we don't want new attributes to specify it.
+ //
+ // Note: No new entries should be added to this list. Entries should be
+ // removed from this list after a suitable deprecation period, provided that
+ // there are no compatibility considerations with other compilers. If
+ // possible, we would like this list to go away entirely.
+ switch (getParsedKind()) {
+ case AT_AddressSpace:
+ case AT_OpenCLPrivateAddressSpace:
+ case AT_OpenCLGlobalAddressSpace:
+ case AT_OpenCLGlobalDeviceAddressSpace:
+ case AT_OpenCLGlobalHostAddressSpace:
+ case AT_OpenCLLocalAddressSpace:
+ case AT_OpenCLConstantAddressSpace:
+ case AT_OpenCLGenericAddressSpace:
+ case AT_NeonPolyVectorType:
+ case AT_NeonVectorType:
+ case AT_ArmMveStrictPolymorphism:
+ case AT_BTFTypeTag:
+ case AT_ObjCGC:
+ case AT_MatrixType:
+ return true;
+ default:
+ return false;
+ }
+}
+
+bool ParsedAttr::acceptsExprPack() const { return getInfo().AcceptsExprPack; }
+
unsigned ParsedAttr::getSemanticSpelling() const {
return getInfo().spellingIndexToSemanticSpelling(*this);
}
@@ -220,6 +259,14 @@ bool ParsedAttr::hasVariadicArg() const {
return getInfo().OptArgs == 15;
}
+bool ParsedAttr::isParamExpr(size_t N) const {
+ return getInfo().isParamExpr(N);
+}
+
+void ParsedAttr::handleAttrWithDelayedArgs(Sema &S, Decl *D) const {
+ ::handleAttrWithDelayedArgs(S, D, *this);
+}
+
static unsigned getNumAttributeArgs(const ParsedAttr &AL) {
// FIXME: Include the type in the argument list.
return AL.getNumArgs() + AL.hasParsedType();
@@ -251,3 +298,20 @@ bool ParsedAttr::checkAtMostNumArgs(Sema &S, unsigned Num) const {
diag::err_attribute_too_many_arguments,
std::greater<unsigned>());
}
+
+void clang::takeAndConcatenateAttrs(ParsedAttributes &First,
+ ParsedAttributes &Second,
+ ParsedAttributes &Result) {
+ // Note that takeAllFrom() puts the attributes at the beginning of the list,
+ // so to obtain the correct ordering, we add `Second`, then `First`.
+ Result.takeAllFrom(Second);
+ Result.takeAllFrom(First);
+ if (First.Range.getBegin().isValid())
+ Result.Range.setBegin(First.Range.getBegin());
+ else
+ Result.Range.setBegin(Second.Range.getBegin());
+ if (Second.Range.getEnd().isValid())
+ Result.Range.setEnd(Second.Range.getEnd());
+ else
+ Result.Range.setEnd(First.Range.getEnd());
+}
diff --git a/contrib/llvm-project/clang/lib/Sema/Sema.cpp b/contrib/llvm-project/clang/lib/Sema/Sema.cpp
index 7b57c8da4e9c..326010d4d93f 100644
--- a/contrib/llvm-project/clang/lib/Sema/Sema.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/Sema.cpp
@@ -114,6 +114,9 @@ PrintingPolicy Sema::getPrintingPolicy(const ASTContext &Context,
}
}
+ // Shorten the data output if needed
+ Policy.EntireContentsOfLargeArray = false;
+
return Policy;
}
@@ -199,8 +202,9 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
LateTemplateParserCleanup(nullptr), OpaqueParser(nullptr), IdResolver(pp),
StdExperimentalNamespaceCache(nullptr), StdInitializerList(nullptr),
StdCoroutineTraitsCache(nullptr), CXXTypeInfoDecl(nullptr),
- MSVCGuidDecl(nullptr), NSNumberDecl(nullptr), NSValueDecl(nullptr),
- NSStringDecl(nullptr), StringWithUTF8StringMethod(nullptr),
+ MSVCGuidDecl(nullptr), StdSourceLocationImplDecl(nullptr),
+ NSNumberDecl(nullptr), NSValueDecl(nullptr), NSStringDecl(nullptr),
+ StringWithUTF8StringMethod(nullptr),
ValueWithBytesObjCTypeMethod(nullptr), NSArrayDecl(nullptr),
ArrayWithObjectsMethod(nullptr), NSDictionaryDecl(nullptr),
DictionaryWithObjectsMethod(nullptr), GlobalNewDeleteDeclared(false),
@@ -230,6 +234,9 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
// Tell diagnostics how to render things from the AST library.
Diags.SetArgToStringFn(&FormatASTNodeDiagnosticArgument, &Context);
+ // This evaluation context exists to ensure that there's always at least one
+ // valid evaluation context available. It is never removed from the
+ // evaluation stack.
ExprEvalContexts.emplace_back(
ExpressionEvaluationContext::PotentiallyEvaluated, 0, CleanupInfo{},
nullptr, ExpressionEvaluationContextRecord::EK_Other);
@@ -242,6 +249,8 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
SemaPPCallbackHandler = Callbacks.get();
PP.addPPCallbacks(std::move(Callbacks));
SemaPPCallbackHandler->set(*this);
+
+ CurFPFeatures.setFPEvalMethod(PP.getCurrentFPEvalMethod());
}
// Anchor Sema's type info to this TU.
@@ -908,7 +917,7 @@ void Sema::LoadExternalWeakUndeclaredIdentifiers() {
SmallVector<std::pair<IdentifierInfo *, WeakInfo>, 4> WeakIDs;
ExternalSource->ReadWeakUndeclaredIdentifiers(WeakIDs);
for (auto &WeakID : WeakIDs)
- WeakUndeclaredIdentifiers.insert(WeakID);
+ (void)WeakUndeclaredIdentifiers[WeakID.first].insert(WeakID.second);
}
@@ -1009,9 +1018,13 @@ void Sema::emitAndClearUnusedLocalTypedefWarnings() {
/// is parsed. Note that the ASTContext may have already injected some
/// declarations.
void Sema::ActOnStartOfTranslationUnit() {
- if (getLangOpts().ModulesTS &&
- (getLangOpts().getCompilingModule() == LangOptions::CMK_ModuleInterface ||
- getLangOpts().getCompilingModule() == LangOptions::CMK_None)) {
+ if (getLangOpts().CPlusPlusModules &&
+ getLangOpts().getCompilingModule() == LangOptions::CMK_HeaderUnit)
+ HandleStartOfHeaderUnit();
+ else if (getLangOpts().ModulesTS &&
+ (getLangOpts().getCompilingModule() ==
+ LangOptions::CMK_ModuleInterface ||
+ getLangOpts().getCompilingModule() == LangOptions::CMK_None)) {
// We start in an implied global module fragment.
SourceLocation StartOfTU =
SourceMgr.getLocForStartOfFile(SourceMgr.getMainFileID());
@@ -1131,6 +1144,7 @@ void Sema::ActOnEndOfTranslationUnit() {
DiagnoseUnterminatedPragmaAlignPack();
DiagnoseUnterminatedPragmaAttribute();
+ DiagnoseUnterminatedOpenMPDeclareTarget();
// All delayed member exception specs should be checked or we end up accepting
// incompatible declarations.
@@ -1159,19 +1173,21 @@ void Sema::ActOnEndOfTranslationUnit() {
// Check for #pragma weak identifiers that were never declared
LoadExternalWeakUndeclaredIdentifiers();
- for (auto WeakID : WeakUndeclaredIdentifiers) {
- if (WeakID.second.getUsed())
+ for (const auto &WeakIDs : WeakUndeclaredIdentifiers) {
+ if (WeakIDs.second.empty())
continue;
- Decl *PrevDecl = LookupSingleName(TUScope, WeakID.first, SourceLocation(),
+ Decl *PrevDecl = LookupSingleName(TUScope, WeakIDs.first, SourceLocation(),
LookupOrdinaryName);
if (PrevDecl != nullptr &&
!(isa<FunctionDecl>(PrevDecl) || isa<VarDecl>(PrevDecl)))
- Diag(WeakID.second.getLocation(), diag::warn_attribute_wrong_decl_type)
- << "'weak'" << ExpectedVariableOrFunction;
+ for (const auto &WI : WeakIDs.second)
+ Diag(WI.getLocation(), diag::warn_attribute_wrong_decl_type)
+ << "'weak'" << ExpectedVariableOrFunction;
else
- Diag(WeakID.second.getLocation(), diag::warn_weak_identifier_undeclared)
- << WeakID.first;
+ for (const auto &WI : WeakIDs.second)
+ Diag(WI.getLocation(), diag::warn_weak_identifier_undeclared)
+ << WeakIDs.first;
}
if (LangOpts.CPlusPlus11 &&
@@ -1400,19 +1416,18 @@ void Sema::ActOnEndOfTranslationUnit() {
// Helper functions.
//===----------------------------------------------------------------------===//
-DeclContext *Sema::getFunctionLevelDeclContext() {
+DeclContext *Sema::getFunctionLevelDeclContext(bool AllowLambda) {
DeclContext *DC = CurContext;
while (true) {
if (isa<BlockDecl>(DC) || isa<EnumDecl>(DC) || isa<CapturedDecl>(DC) ||
isa<RequiresExprBodyDecl>(DC)) {
DC = DC->getParent();
- } else if (isa<CXXMethodDecl>(DC) &&
+ } else if (!AllowLambda && isa<CXXMethodDecl>(DC) &&
cast<CXXMethodDecl>(DC)->getOverloadedOperator() == OO_Call &&
cast<CXXRecordDecl>(DC->getParent())->isLambda()) {
DC = DC->getParent()->getParent();
- }
- else break;
+ } else break;
}
return DC;
@@ -1421,8 +1436,8 @@ DeclContext *Sema::getFunctionLevelDeclContext() {
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
-FunctionDecl *Sema::getCurFunctionDecl() {
- DeclContext *DC = getFunctionLevelDeclContext();
+FunctionDecl *Sema::getCurFunctionDecl(bool AllowLambda) {
+ DeclContext *DC = getFunctionLevelDeclContext(AllowLambda);
return dyn_cast<FunctionDecl>(DC);
}
@@ -2204,7 +2219,8 @@ operator()(sema::FunctionScopeInfo *Scope) const {
}
void Sema::PushCompoundScope(bool IsStmtExpr) {
- getCurFunction()->CompoundScopes.push_back(CompoundScopeInfo(IsStmtExpr));
+ getCurFunction()->CompoundScopes.push_back(
+ CompoundScopeInfo(IsStmtExpr, getCurFPFeatures()));
}
void Sema::PopCompoundScope() {
@@ -2630,3 +2646,15 @@ const llvm::MapVector<FieldDecl *, Sema::DeleteLocs> &
Sema::getMismatchingDeleteExpressions() const {
return DeleteExprs;
}
+
+Sema::FPFeaturesStateRAII::FPFeaturesStateRAII(Sema &S)
+ : S(S), OldFPFeaturesState(S.CurFPFeatures),
+ OldOverrides(S.FpPragmaStack.CurrentValue),
+ OldEvalMethod(S.PP.getCurrentFPEvalMethod()),
+ OldFPPragmaLocation(S.PP.getLastFPEvalPragmaLocation()) {}
+
+Sema::FPFeaturesStateRAII::~FPFeaturesStateRAII() {
+ S.CurFPFeatures = OldFPFeaturesState;
+ S.FpPragmaStack.CurrentValue = OldOverrides;
+ S.PP.setCurrentFPEvalMethod(OldFPPragmaLocation, OldEvalMethod);
+}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaAccess.cpp b/contrib/llvm-project/clang/lib/Sema/SemaAccess.cpp
index 3f7b387ec925..00d3efd19d7a 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaAccess.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaAccess.cpp
@@ -1761,14 +1761,11 @@ Sema::CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
return CheckAccess(*this, UseLoc, Entity);
}
-/// Checks access to an overloaded member operator, including
-/// conversion operators.
Sema::AccessResult Sema::CheckMemberOperatorAccess(SourceLocation OpLoc,
Expr *ObjectExpr,
- Expr *ArgExpr,
+ const SourceRange &Range,
DeclAccessPair Found) {
- if (!getLangOpts().AccessControl ||
- Found.getAccess() == AS_public)
+ if (!getLangOpts().AccessControl || Found.getAccess() == AS_public)
return AR_accessible;
const RecordType *RT = ObjectExpr->getType()->castAs<RecordType>();
@@ -1776,13 +1773,35 @@ Sema::AccessResult Sema::CheckMemberOperatorAccess(SourceLocation OpLoc,
AccessTarget Entity(Context, AccessTarget::Member, NamingClass, Found,
ObjectExpr->getType());
- Entity.setDiag(diag::err_access)
- << ObjectExpr->getSourceRange()
- << (ArgExpr ? ArgExpr->getSourceRange() : SourceRange());
+ Entity.setDiag(diag::err_access) << ObjectExpr->getSourceRange() << Range;
return CheckAccess(*this, OpLoc, Entity);
}
+/// Checks access to an overloaded member operator, including
+/// conversion operators.
+Sema::AccessResult Sema::CheckMemberOperatorAccess(SourceLocation OpLoc,
+ Expr *ObjectExpr,
+ Expr *ArgExpr,
+ DeclAccessPair Found) {
+ return CheckMemberOperatorAccess(
+ OpLoc, ObjectExpr, ArgExpr ? ArgExpr->getSourceRange() : SourceRange(),
+ Found);
+}
+
+Sema::AccessResult Sema::CheckMemberOperatorAccess(SourceLocation OpLoc,
+ Expr *ObjectExpr,
+ ArrayRef<Expr *> ArgExprs,
+ DeclAccessPair FoundDecl) {
+ SourceRange R;
+ if (!ArgExprs.empty()) {
+ R = SourceRange(ArgExprs.front()->getBeginLoc(),
+ ArgExprs.back()->getEndLoc());
+ }
+
+ return CheckMemberOperatorAccess(OpLoc, ObjectExpr, R, FoundDecl);
+}
+
/// Checks access to the target of a friend declaration.
Sema::AccessResult Sema::CheckFriendAccess(NamedDecl *target) {
assert(isa<CXXMethodDecl>(target->getAsFunction()));
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaAttr.cpp b/contrib/llvm-project/clang/lib/Sema/SemaAttr.cpp
index b69492768848..c997d018a406 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaAttr.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaAttr.cpp
@@ -384,6 +384,54 @@ void Sema::ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
AlignPackStack.Act(PragmaLoc, Action, SlotLabel, Info);
}
+bool Sema::ConstantFoldAttrArgs(const AttributeCommonInfo &CI,
+ MutableArrayRef<Expr *> Args) {
+ llvm::SmallVector<PartialDiagnosticAt, 8> Notes;
+ for (unsigned Idx = 0; Idx < Args.size(); Idx++) {
+ Expr *&E = Args.begin()[Idx];
+ assert(E && "error are handled before");
+ if (E->isValueDependent() || E->isTypeDependent())
+ continue;
+
+ // FIXME: Use DefaultFunctionArrayLValueConversion() in place of the logic
+ // that adds implicit casts here.
+ if (E->getType()->isArrayType())
+ E = ImpCastExprToType(E, Context.getPointerType(E->getType()),
+ clang::CK_ArrayToPointerDecay)
+ .get();
+ if (E->getType()->isFunctionType())
+ E = ImplicitCastExpr::Create(Context,
+ Context.getPointerType(E->getType()),
+ clang::CK_FunctionToPointerDecay, E, nullptr,
+ VK_PRValue, FPOptionsOverride());
+ if (E->isLValue())
+ E = ImplicitCastExpr::Create(Context, E->getType().getNonReferenceType(),
+ clang::CK_LValueToRValue, E, nullptr,
+ VK_PRValue, FPOptionsOverride());
+
+ Expr::EvalResult Eval;
+ Notes.clear();
+ Eval.Diag = &Notes;
+
+ bool Result = E->EvaluateAsConstantExpr(Eval, Context);
+
+ /// Result means the expression can be folded to a constant.
+ /// Note.empty() means the expression is a valid constant expression in the
+ /// current language mode.
+ if (!Result || !Notes.empty()) {
+ Diag(E->getBeginLoc(), diag::err_attribute_argument_n_type)
+ << CI << (Idx + 1) << AANT_ArgumentConstantExpr;
+ for (auto &Note : Notes)
+ Diag(Note.first, Note.second);
+ return false;
+ }
+ assert(Eval.Val.hasValue());
+ E = ConstantExpr::Create(Context, E, Eval.Val);
+ }
+
+ return true;
+}
+
void Sema::DiagnoseNonDefaultPragmaAlignPack(PragmaAlignPackDiagnoseKind Kind,
SourceLocation IncludeLoc) {
if (Kind == PragmaAlignPackDiagnoseKind::NonDefaultStateAtInclude) {
@@ -470,6 +518,33 @@ void Sema::ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
Consumer.HandleTopLevelDecl(DeclGroupRef(PDMD));
}
+void Sema::ActOnPragmaFPEvalMethod(SourceLocation Loc,
+ LangOptions::FPEvalMethodKind Value) {
+ FPOptionsOverride NewFPFeatures = CurFPFeatureOverrides();
+ switch (Value) {
+ default:
+ llvm_unreachable("invalid pragma eval_method kind");
+ case LangOptions::FEM_Source:
+ NewFPFeatures.setFPEvalMethodOverride(LangOptions::FEM_Source);
+ break;
+ case LangOptions::FEM_Double:
+ NewFPFeatures.setFPEvalMethodOverride(LangOptions::FEM_Double);
+ break;
+ case LangOptions::FEM_Extended:
+ NewFPFeatures.setFPEvalMethodOverride(LangOptions::FEM_Extended);
+ break;
+ }
+ if (getLangOpts().ApproxFunc)
+ Diag(Loc, diag::err_setting_eval_method_used_in_unsafe_context) << 0 << 0;
+ if (getLangOpts().AllowFPReassoc)
+ Diag(Loc, diag::err_setting_eval_method_used_in_unsafe_context) << 0 << 1;
+ if (getLangOpts().AllowRecip)
+ Diag(Loc, diag::err_setting_eval_method_used_in_unsafe_context) << 0 << 2;
+ FpPragmaStack.Act(Loc, PSK_Set, StringRef(), NewFPFeatures);
+ CurFPFeatures = NewFPFeatures.applyOverrides(getLangOpts());
+ PP.setCurrentFPEvalMethod(Loc, Value);
+}
+
void Sema::ActOnPragmaFloatControl(SourceLocation Loc,
PragmaMsStackAction Action,
PragmaFloatControlKind Value) {
@@ -487,25 +562,36 @@ void Sema::ActOnPragmaFloatControl(SourceLocation Loc,
case PFC_Precise:
NewFPFeatures.setFPPreciseEnabled(true);
FpPragmaStack.Act(Loc, Action, StringRef(), NewFPFeatures);
+ if (PP.getCurrentFPEvalMethod() ==
+ LangOptions::FPEvalMethodKind::FEM_Indeterminable &&
+ PP.getLastFPEvalPragmaLocation().isValid())
+ // A preceding `pragma float_control(precise,off)` has changed
+ // the value of the evaluation method.
+ // Set it back to its old value.
+ PP.setCurrentFPEvalMethod(SourceLocation(), PP.getLastFPEvalMethod());
break;
case PFC_NoPrecise:
- if (CurFPFeatures.getFPExceptionMode() == LangOptions::FPE_Strict)
+ if (CurFPFeatures.getExceptionMode() == LangOptions::FPE_Strict)
Diag(Loc, diag::err_pragma_fc_noprecise_requires_noexcept);
else if (CurFPFeatures.getAllowFEnvAccess())
Diag(Loc, diag::err_pragma_fc_noprecise_requires_nofenv);
else
NewFPFeatures.setFPPreciseEnabled(false);
FpPragmaStack.Act(Loc, Action, StringRef(), NewFPFeatures);
+ PP.setLastFPEvalMethod(PP.getCurrentFPEvalMethod());
+ // `AllowFPReassoc` or `AllowReciprocal` option is enabled.
+ PP.setCurrentFPEvalMethod(
+ Loc, LangOptions::FPEvalMethodKind::FEM_Indeterminable);
break;
case PFC_Except:
if (!isPreciseFPEnabled())
Diag(Loc, diag::err_pragma_fc_except_requires_precise);
else
- NewFPFeatures.setFPExceptionModeOverride(LangOptions::FPE_Strict);
+ NewFPFeatures.setSpecifiedExceptionModeOverride(LangOptions::FPE_Strict);
FpPragmaStack.Act(Loc, Action, StringRef(), NewFPFeatures);
break;
case PFC_NoExcept:
- NewFPFeatures.setFPExceptionModeOverride(LangOptions::FPE_Ignore);
+ NewFPFeatures.setSpecifiedExceptionModeOverride(LangOptions::FPE_Ignore);
FpPragmaStack.Act(Loc, Action, StringRef(), NewFPFeatures);
break;
case PFC_Push:
@@ -519,6 +605,12 @@ void Sema::ActOnPragmaFloatControl(SourceLocation Loc,
}
FpPragmaStack.Act(Loc, Action, StringRef(), NewFPFeatures);
NewFPFeatures = FpPragmaStack.CurrentValue;
+ if (CurFPFeatures.getAllowFPReassociate() ||
+ CurFPFeatures.getAllowReciprocal())
+ // Since we are popping the pragma, we don't want to be passing
+ // a location here.
+ PP.setCurrentFPEvalMethod(SourceLocation(),
+ CurFPFeatures.getFPEvalMethod());
break;
}
CurFPFeatures = NewFPFeatures.applyOverrides(getLangOpts());
@@ -697,6 +789,42 @@ void Sema::ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
CurInitSegLoc = PragmaLocation;
}
+void Sema::ActOnPragmaMSAllocText(
+ SourceLocation PragmaLocation, StringRef Section,
+ const SmallVector<std::tuple<IdentifierInfo *, SourceLocation>>
+ &Functions) {
+ if (!CurContext->getRedeclContext()->isFileContext()) {
+ Diag(PragmaLocation, diag::err_pragma_expected_file_scope) << "alloc_text";
+ return;
+ }
+
+ for (auto &Function : Functions) {
+ IdentifierInfo *II;
+ SourceLocation Loc;
+ std::tie(II, Loc) = Function;
+
+ DeclarationName DN(II);
+ NamedDecl *ND = LookupSingleName(TUScope, DN, Loc, LookupOrdinaryName);
+ if (!ND) {
+ Diag(Loc, diag::err_undeclared_use) << II->getName();
+ return;
+ }
+
+ auto *FD = dyn_cast<FunctionDecl>(ND->getCanonicalDecl());
+ if (!FD) {
+ Diag(Loc, diag::err_pragma_alloc_text_not_function);
+ return;
+ }
+
+ if (getLangOpts().CPlusPlus && !FD->isInExternCContext()) {
+ Diag(Loc, diag::err_pragma_alloc_text_c_linkage);
+ return;
+ }
+
+ FunctionToSectionMap[II->getName()] = std::make_tuple(Section, Loc);
+ }
+}
+
void Sema::ActOnPragmaUnused(const Token &IdTok, Scope *curScope,
SourceLocation PragmaLoc) {
@@ -1021,6 +1149,25 @@ void Sema::ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc) {
OptimizeOffPragmaLocation = PragmaLoc;
}
+void Sema::ActOnPragmaMSOptimize(SourceLocation Loc, bool IsOn) {
+ if (!CurContext->getRedeclContext()->isFileContext()) {
+ Diag(Loc, diag::err_pragma_expected_file_scope) << "optimize";
+ return;
+ }
+
+ MSPragmaOptimizeIsOn = IsOn;
+}
+
+void Sema::ActOnPragmaMSFunction(
+ SourceLocation Loc, const llvm::SmallVectorImpl<StringRef> &NoBuiltins) {
+ if (!CurContext->getRedeclContext()->isFileContext()) {
+ Diag(Loc, diag::err_pragma_expected_file_scope) << "function";
+ return;
+ }
+
+ MSFunctionNoBuiltins.insert(NoBuiltins.begin(), NoBuiltins.end());
+}
+
void Sema::AddRangeBasedOptnone(FunctionDecl *FD) {
// In the future, check other pragmas if they're implemented (e.g. pragma
// optimize 0 will probably map to this functionality too).
@@ -1028,6 +1175,29 @@ void Sema::AddRangeBasedOptnone(FunctionDecl *FD) {
AddOptnoneAttributeIfNoConflicts(FD, OptimizeOffPragmaLocation);
}
+void Sema::AddSectionMSAllocText(FunctionDecl *FD) {
+ if (!FD->getIdentifier())
+ return;
+
+ StringRef Name = FD->getName();
+ auto It = FunctionToSectionMap.find(Name);
+ if (It != FunctionToSectionMap.end()) {
+ StringRef Section;
+ SourceLocation Loc;
+ std::tie(Section, Loc) = It->second;
+
+ if (!FD->hasAttr<SectionAttr>())
+ FD->addAttr(SectionAttr::CreateImplicit(Context, Section));
+ }
+}
+
+void Sema::ModifyFnAttributesMSPragmaOptimize(FunctionDecl *FD) {
+ // Don't modify the function attributes if it's "on". "on" resets the
+ // optimizations to the ones listed on the command line
+ if (!MSPragmaOptimizeIsOn)
+ AddOptnoneAttributeIfNoConflicts(FD, FD->getBeginLoc());
+}
+
void Sema::AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD,
SourceLocation Loc) {
// Don't add a conflicting attribute. No diagnostic is needed.
@@ -1042,6 +1212,13 @@ void Sema::AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD,
FD->addAttr(NoInlineAttr::CreateImplicit(Context, Loc));
}
+void Sema::AddImplicitMSFunctionNoBuiltinAttr(FunctionDecl *FD) {
+ SmallVector<StringRef> V(MSFunctionNoBuiltins.begin(),
+ MSFunctionNoBuiltins.end());
+ if (!MSFunctionNoBuiltins.empty())
+ FD->addAttr(NoBuiltinAttr::CreateImplicit(Context, V.data(), V.size()));
+}
+
typedef std::vector<std::pair<unsigned, SourceLocation> > VisStack;
enum : unsigned { NoVisibility = ~0U };
@@ -1115,22 +1292,32 @@ void Sema::ActOnPragmaFPContract(SourceLocation Loc,
}
void Sema::ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled) {
+ if (IsEnabled) {
+ // For value unsafe context, combining this pragma with eval method
+ // setting is not recommended. See comment in function FixupInvocation#506.
+ int Reason = -1;
+ if (getLangOpts().getFPEvalMethod() != LangOptions::FEM_UnsetOnCommandLine)
+ // Eval method set using the option 'ffp-eval-method'.
+ Reason = 1;
+ if (PP.getLastFPEvalPragmaLocation().isValid())
+ // Eval method set using the '#pragma clang fp eval_method'.
+ // We could have both an option and a pragma used to the set the eval
+ // method. The pragma overrides the option in the command line. The Reason
+ // of the diagnostic is overriden too.
+ Reason = 0;
+ if (Reason != -1)
+ Diag(Loc, diag::err_setting_eval_method_used_in_unsafe_context)
+ << Reason << 4;
+ }
FPOptionsOverride NewFPFeatures = CurFPFeatureOverrides();
NewFPFeatures.setAllowFPReassociateOverride(IsEnabled);
FpPragmaStack.Act(Loc, PSK_Set, StringRef(), NewFPFeatures);
CurFPFeatures = NewFPFeatures.applyOverrides(getLangOpts());
}
-void Sema::setRoundingMode(SourceLocation Loc, llvm::RoundingMode FPR) {
- // C2x: 7.6.2p3 If the FE_DYNAMIC mode is specified and FENV_ACCESS is "off",
- // the translator may assume that the default rounding mode is in effect.
- if (FPR == llvm::RoundingMode::Dynamic &&
- !CurFPFeatures.getAllowFEnvAccess() &&
- CurFPFeatures.getFPExceptionMode() == LangOptions::FPE_Ignore)
- FPR = llvm::RoundingMode::NearestTiesToEven;
-
+void Sema::ActOnPragmaFEnvRound(SourceLocation Loc, llvm::RoundingMode FPR) {
FPOptionsOverride NewFPFeatures = CurFPFeatureOverrides();
- NewFPFeatures.setRoundingModeOverride(FPR);
+ NewFPFeatures.setConstRoundingModeOverride(FPR);
FpPragmaStack.Act(Loc, PSK_Set, StringRef(), NewFPFeatures);
CurFPFeatures = NewFPFeatures.applyOverrides(getLangOpts());
}
@@ -1138,14 +1325,13 @@ void Sema::setRoundingMode(SourceLocation Loc, llvm::RoundingMode FPR) {
void Sema::setExceptionMode(SourceLocation Loc,
LangOptions::FPExceptionModeKind FPE) {
FPOptionsOverride NewFPFeatures = CurFPFeatureOverrides();
- NewFPFeatures.setFPExceptionModeOverride(FPE);
+ NewFPFeatures.setSpecifiedExceptionModeOverride(FPE);
FpPragmaStack.Act(Loc, PSK_Set, StringRef(), NewFPFeatures);
CurFPFeatures = NewFPFeatures.applyOverrides(getLangOpts());
}
void Sema::ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled) {
FPOptionsOverride NewFPFeatures = CurFPFeatureOverrides();
- auto LO = getLangOpts();
if (IsEnabled) {
// Verify Microsoft restriction:
// You can't enable fenv_access unless precise semantics are enabled.
@@ -1153,16 +1339,10 @@ void Sema::ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled) {
// pragma, or by using the /fp:precise or /fp:strict compiler options
if (!isPreciseFPEnabled())
Diag(Loc, diag::err_pragma_fenv_requires_precise);
- NewFPFeatures.setAllowFEnvAccessOverride(true);
- // Enabling FENV access sets the RoundingMode to Dynamic.
- // and ExceptionBehavior to Strict
- NewFPFeatures.setRoundingModeOverride(llvm::RoundingMode::Dynamic);
- NewFPFeatures.setFPExceptionModeOverride(LangOptions::FPE_Strict);
- } else {
- NewFPFeatures.setAllowFEnvAccessOverride(false);
}
+ NewFPFeatures.setAllowFEnvAccessOverride(IsEnabled);
FpPragmaStack.Act(Loc, PSK_Set, StringRef(), NewFPFeatures);
- CurFPFeatures = NewFPFeatures.applyOverrides(LO);
+ CurFPFeatures = NewFPFeatures.applyOverrides(getLangOpts());
}
void Sema::ActOnPragmaFPExceptions(SourceLocation Loc,
@@ -1213,8 +1393,9 @@ void Sema::PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc) {
}
template <typename Ty>
-static bool checkCommonAttributeFeatures(Sema& S, const Ty *Node,
- const ParsedAttr& A) {
+static bool checkCommonAttributeFeatures(Sema &S, const Ty *Node,
+ const ParsedAttr &A,
+ bool SkipArgCountCheck) {
// Several attributes carry different semantics than the parsing requires, so
// those are opted out of the common argument checks.
//
@@ -1240,26 +1421,30 @@ static bool checkCommonAttributeFeatures(Sema& S, const Ty *Node,
if (A.hasCustomParsing())
return false;
- if (A.getMinArgs() == A.getMaxArgs()) {
- // If there are no optional arguments, then checking for the argument count
- // is trivial.
- if (!A.checkExactlyNumArgs(S, A.getMinArgs()))
- return true;
- } else {
- // There are optional arguments, so checking is slightly more involved.
- if (A.getMinArgs() && !A.checkAtLeastNumArgs(S, A.getMinArgs()))
- return true;
- else if (!A.hasVariadicArg() && A.getMaxArgs() &&
- !A.checkAtMostNumArgs(S, A.getMaxArgs()))
- return true;
+ if (!SkipArgCountCheck) {
+ if (A.getMinArgs() == A.getMaxArgs()) {
+ // If there are no optional arguments, then checking for the argument
+ // count is trivial.
+ if (!A.checkExactlyNumArgs(S, A.getMinArgs()))
+ return true;
+ } else {
+ // There are optional arguments, so checking is slightly more involved.
+ if (A.getMinArgs() && !A.checkAtLeastNumArgs(S, A.getMinArgs()))
+ return true;
+ else if (!A.hasVariadicArg() && A.getMaxArgs() &&
+ !A.checkAtMostNumArgs(S, A.getMaxArgs()))
+ return true;
+ }
}
return false;
}
-bool Sema::checkCommonAttributeFeatures(const Decl *D, const ParsedAttr &A) {
- return ::checkCommonAttributeFeatures(*this, D, A);
+bool Sema::checkCommonAttributeFeatures(const Decl *D, const ParsedAttr &A,
+ bool SkipArgCountCheck) {
+ return ::checkCommonAttributeFeatures(*this, D, A, SkipArgCountCheck);
}
-bool Sema::checkCommonAttributeFeatures(const Stmt *S, const ParsedAttr &A) {
- return ::checkCommonAttributeFeatures(*this, S, A);
+bool Sema::checkCommonAttributeFeatures(const Stmt *S, const ParsedAttr &A,
+ bool SkipArgCountCheck) {
+ return ::checkCommonAttributeFeatures(*this, S, A, SkipArgCountCheck);
}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaAvailability.cpp b/contrib/llvm-project/clang/lib/Sema/SemaAvailability.cpp
index dc15c87cb717..bf4a226668f7 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaAvailability.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaAvailability.cpp
@@ -501,7 +501,7 @@ static void DoEmitAvailabilityWarning(Sema &S, AvailabilityResult K,
SmallVector<StringRef, 12> SelectorSlotNames;
Optional<unsigned> NumParams = tryParseObjCMethodName(
Replacement, SelectorSlotNames, S.getLangOpts());
- if (NumParams && NumParams.getValue() == Sel.getNumArgs()) {
+ if (NumParams && *NumParams == Sel.getNumArgs()) {
assert(SelectorSlotNames.size() == Locs.size());
for (unsigned I = 0; I < Locs.size(); ++I) {
if (!Sel.getNameForSlot(I).empty()) {
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaCUDA.cpp b/contrib/llvm-project/clang/lib/Sema/SemaCUDA.cpp
index efa38554bc83..8f8144d658d8 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaCUDA.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaCUDA.cpp
@@ -145,9 +145,11 @@ Sema::CUDAFunctionTarget Sema::IdentifyCUDATarget(const FunctionDecl *D,
Sema::CUDAVariableTarget Sema::IdentifyCUDATarget(const VarDecl *Var) {
if (Var->hasAttr<HIPManagedAttr>())
return CVT_Unified;
- if (Var->isConstexpr() && !hasExplicitAttr<CUDAConstantAttr>(Var))
- return CVT_Both;
- if (Var->getType().isConstQualified() && Var->hasAttr<CUDAConstantAttr>() &&
+ // Only constexpr and const variabless with implicit constant attribute
+ // are emitted on both sides. Such variables are promoted to device side
+ // only if they have static constant intializers on device side.
+ if ((Var->isConstexpr() || Var->getType().isConstQualified()) &&
+ Var->hasAttr<CUDAConstantAttr>() &&
!hasExplicitAttr<CUDAConstantAttr>(Var))
return CVT_Both;
if (Var->hasAttr<CUDADeviceAttr>() || Var->hasAttr<CUDAConstantAttr>() ||
@@ -353,9 +355,7 @@ bool Sema::inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
}
if (!ClassDecl->isAbstract()) {
- for (const auto &VB : ClassDecl->vbases()) {
- Bases.push_back(&VB);
- }
+ llvm::append_range(Bases, llvm::make_pointer_range(ClassDecl->vbases()));
}
for (const auto *B : Bases) {
@@ -377,7 +377,7 @@ bool Sema::inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
continue;
CUDAFunctionTarget BaseMethodTarget = IdentifyCUDATarget(SMOR.getMethod());
- if (!InferredTarget.hasValue()) {
+ if (!InferredTarget) {
InferredTarget = BaseMethodTarget;
} else {
bool ResolutionError = resolveCalleeCUDATargetConflict(
@@ -421,7 +421,7 @@ bool Sema::inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CUDAFunctionTarget FieldMethodTarget =
IdentifyCUDATarget(SMOR.getMethod());
- if (!InferredTarget.hasValue()) {
+ if (!InferredTarget) {
InferredTarget = FieldMethodTarget;
} else {
bool ResolutionError = resolveCalleeCUDATargetConflict(
@@ -444,7 +444,7 @@ bool Sema::inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
// If no target was inferred, mark this member as __host__ __device__;
// it's the least restrictive option that can be invoked from any target.
bool NeedsH = true, NeedsD = true;
- if (InferredTarget.hasValue()) {
+ if (InferredTarget) {
if (InferredTarget.getValue() == CFT_Device)
NeedsH = false;
else if (InferredTarget.getValue() == CFT_Host)
@@ -718,9 +718,9 @@ void Sema::MaybeAddCUDAConstantAttr(VarDecl *VD) {
!VD->hasAttr<CUDAConstantAttr>() && !VD->hasAttr<CUDASharedAttr>() &&
(VD->isFileVarDecl() || VD->isStaticDataMember()) &&
!IsDependentVar(VD) &&
- (VD->isConstexpr() || (VD->getType().isConstQualified() &&
- HasAllowedCUDADeviceStaticInitializer(
- *this, VD, CICK_DeviceOrConstant)))) {
+ ((VD->isConstexpr() || VD->getType().isConstQualified()) &&
+ HasAllowedCUDADeviceStaticInitializer(*this, VD,
+ CICK_DeviceOrConstant))) {
VD->addAttr(CUDAConstantAttr::CreateImplicit(getASTContext()));
}
}
@@ -728,8 +728,9 @@ void Sema::MaybeAddCUDAConstantAttr(VarDecl *VD) {
Sema::SemaDiagnosticBuilder Sema::CUDADiagIfDeviceCode(SourceLocation Loc,
unsigned DiagID) {
assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
+ FunctionDecl *CurFunContext = getCurFunctionDecl(/*AllowLambda=*/true);
SemaDiagnosticBuilder::Kind DiagKind = [&] {
- if (!isa<FunctionDecl>(CurContext))
+ if (!CurFunContext)
return SemaDiagnosticBuilder::K_Nop;
switch (CurrentCUDATarget()) {
case CFT_Global:
@@ -743,7 +744,7 @@ Sema::SemaDiagnosticBuilder Sema::CUDADiagIfDeviceCode(SourceLocation Loc,
return SemaDiagnosticBuilder::K_Nop;
if (IsLastErrorImmediate && Diags.getDiagnosticIDs()->isBuiltinNote(DiagID))
return SemaDiagnosticBuilder::K_Immediate;
- return (getEmissionStatus(cast<FunctionDecl>(CurContext)) ==
+ return (getEmissionStatus(CurFunContext) ==
FunctionEmissionStatus::Emitted)
? SemaDiagnosticBuilder::K_ImmediateWithCallStack
: SemaDiagnosticBuilder::K_Deferred;
@@ -751,15 +752,15 @@ Sema::SemaDiagnosticBuilder Sema::CUDADiagIfDeviceCode(SourceLocation Loc,
return SemaDiagnosticBuilder::K_Nop;
}
}();
- return SemaDiagnosticBuilder(DiagKind, Loc, DiagID,
- dyn_cast<FunctionDecl>(CurContext), *this);
+ return SemaDiagnosticBuilder(DiagKind, Loc, DiagID, CurFunContext, *this);
}
Sema::SemaDiagnosticBuilder Sema::CUDADiagIfHostCode(SourceLocation Loc,
unsigned DiagID) {
assert(getLangOpts().CUDA && "Should only be called during CUDA compilation");
+ FunctionDecl *CurFunContext = getCurFunctionDecl(/*AllowLambda=*/true);
SemaDiagnosticBuilder::Kind DiagKind = [&] {
- if (!isa<FunctionDecl>(CurContext))
+ if (!CurFunContext)
return SemaDiagnosticBuilder::K_Nop;
switch (CurrentCUDATarget()) {
case CFT_Host:
@@ -772,7 +773,7 @@ Sema::SemaDiagnosticBuilder Sema::CUDADiagIfHostCode(SourceLocation Loc,
return SemaDiagnosticBuilder::K_Nop;
if (IsLastErrorImmediate && Diags.getDiagnosticIDs()->isBuiltinNote(DiagID))
return SemaDiagnosticBuilder::K_Immediate;
- return (getEmissionStatus(cast<FunctionDecl>(CurContext)) ==
+ return (getEmissionStatus(CurFunContext) ==
FunctionEmissionStatus::Emitted)
? SemaDiagnosticBuilder::K_ImmediateWithCallStack
: SemaDiagnosticBuilder::K_Deferred;
@@ -780,8 +781,7 @@ Sema::SemaDiagnosticBuilder Sema::CUDADiagIfHostCode(SourceLocation Loc,
return SemaDiagnosticBuilder::K_Nop;
}
}();
- return SemaDiagnosticBuilder(DiagKind, Loc, DiagID,
- dyn_cast<FunctionDecl>(CurContext), *this);
+ return SemaDiagnosticBuilder(DiagKind, Loc, DiagID, CurFunContext, *this);
}
bool Sema::CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee) {
@@ -794,7 +794,7 @@ bool Sema::CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee) {
// FIXME: Is bailing out early correct here? Should we instead assume that
// the caller is a global initializer?
- FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext);
+ FunctionDecl *Caller = getCurFunctionDecl(/*AllowLambda=*/true);
if (!Caller)
return true;
@@ -819,8 +819,13 @@ bool Sema::CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee) {
}
}();
- if (DiagKind == SemaDiagnosticBuilder::K_Nop)
+ if (DiagKind == SemaDiagnosticBuilder::K_Nop) {
+ // For -fgpu-rdc, keep track of external kernels used by host functions.
+ if (LangOpts.CUDAIsDevice && LangOpts.GPURelocatableDeviceCode &&
+ Callee->hasAttr<CUDAGlobalAttr>() && !Callee->isDefined())
+ getASTContext().CUDAExternalDeviceDeclODRUsedByHost.insert(Callee);
return true;
+ }
// Avoid emitting this error twice for the same location. Using a hashtable
// like this is unfortunate, but because we must continue parsing as normal
@@ -860,7 +865,7 @@ void Sema::CUDACheckLambdaCapture(CXXMethodDecl *Callee,
// File-scope lambda can only do init captures for global variables, which
// results in passing by value for these global variables.
- FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext);
+ FunctionDecl *Caller = getCurFunctionDecl(/*AllowLambda=*/true);
if (!Caller)
return;
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaCXXScopeSpec.cpp b/contrib/llvm-project/clang/lib/Sema/SemaCXXScopeSpec.cpp
index 4781d71080c9..3f8fedda7174 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaCXXScopeSpec.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaCXXScopeSpec.cpp
@@ -121,7 +121,7 @@ DeclContext *Sema::computeDeclContext(const CXXScopeSpec &SS,
// entering the context, and that can't happen in a SFINAE context.
assert(!isSFINAEContext() &&
"partial specialization scope specifier in SFINAE context?");
- if (!hasVisibleDeclaration(PartialSpec))
+ if (!hasReachableDefinition(PartialSpec))
diagnoseMissingImport(SS.getLastQualifierNameLoc(), PartialSpec,
MissingImportKind::PartialSpecialization,
/*Recover*/true);
@@ -243,8 +243,8 @@ bool Sema::RequireCompleteEnumDecl(EnumDecl *EnumD, SourceLocation L,
if (EnumD->isCompleteDefinition()) {
// If we know about the definition but it is not visible, complain.
NamedDecl *SuggestedDef = nullptr;
- if (!hasVisibleDefinition(EnumD, &SuggestedDef,
- /*OnlyNeedComplete*/false)) {
+ if (!hasReachableDefinition(EnumD, &SuggestedDef,
+ /*OnlyNeedComplete*/ false)) {
// If the user is going to see an error here, recover by making the
// definition visible.
bool TreatAsComplete = !isSFINAEContext();
@@ -828,10 +828,14 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo,
}
if (!Found.empty()) {
- if (TypeDecl *TD = Found.getAsSingle<TypeDecl>())
+ if (TypeDecl *TD = Found.getAsSingle<TypeDecl>()) {
Diag(IdInfo.IdentifierLoc, diag::err_expected_class_or_namespace)
<< Context.getTypeDeclType(TD) << getLangOpts().CPlusPlus;
- else {
+ } else if (Found.getAsSingle<TemplateDecl>()) {
+ ParsedType SuggestedType;
+ DiagnoseUnknownTypeName(IdInfo.Identifier, IdInfo.IdentifierLoc, S, &SS,
+ SuggestedType);
+ } else {
Diag(IdInfo.IdentifierLoc, diag::err_expected_class_or_namespace)
<< IdInfo.Identifier << getLangOpts().CPlusPlus;
if (NamedDecl *ND = Found.getAsSingle<NamedDecl>())
@@ -850,7 +854,6 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo,
bool Sema::ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo,
bool EnteringContext, CXXScopeSpec &SS,
- bool ErrorRecoveryLookup,
bool *IsCorrectedToColon,
bool OnlyNamespace) {
if (SS.isInvalid())
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaCast.cpp b/contrib/llvm-project/clang/lib/Sema/SemaCast.cpp
index 7ef1732496c2..7b5bc7ca80b1 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaCast.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaCast.cpp
@@ -1356,7 +1356,7 @@ static TryCastResult TryStaticCast(Sema &Self, ExprResult &SrcExpr,
if (SrcType->isIntegralOrEnumerationType()) {
// [expr.static.cast]p10 If the enumeration type has a fixed underlying
// type, the value is first converted to that type by integral conversion
- const EnumType *Enum = DestType->getAs<EnumType>();
+ const EnumType *Enum = DestType->castAs<EnumType>();
Kind = Enum->getDecl()->isFixed() &&
Enum->getDecl()->getIntegerType()->isBooleanType()
? CK_IntegralToBoolean
@@ -2545,7 +2545,7 @@ static TryCastResult TryReinterpretCast(Sema &Self, ExprResult &SrcExpr,
static TryCastResult TryAddressSpaceCast(Sema &Self, ExprResult &SrcExpr,
QualType DestType, bool CStyle,
unsigned &msg, CastKind &Kind) {
- if (!Self.getLangOpts().OpenCL)
+ if (!Self.getLangOpts().OpenCL && !Self.getLangOpts().SYCLIsDevice)
// FIXME: As compiler doesn't have any information about overlapping addr
// spaces at the moment we have to be permissive here.
return TC_NotApplicable;
@@ -3129,6 +3129,23 @@ void CastOperation::CheckCStyleCast() {
Self.Diag(OpRange.getBegin(), diag::warn_cast_function_type)
<< SrcType << DestType << OpRange;
+ if (isa<PointerType>(SrcType) && isa<PointerType>(DestType)) {
+ QualType SrcTy = cast<PointerType>(SrcType)->getPointeeType();
+ QualType DestTy = cast<PointerType>(DestType)->getPointeeType();
+
+ const RecordDecl *SrcRD = SrcTy->getAsRecordDecl();
+ const RecordDecl *DestRD = DestTy->getAsRecordDecl();
+
+ if (SrcRD && DestRD && SrcRD->hasAttr<RandomizeLayoutAttr>() &&
+ SrcRD != DestRD) {
+ // The struct we are casting the pointer from was randomized.
+ Self.Diag(OpRange.getBegin(), diag::err_cast_from_randomized_struct)
+ << SrcType << DestType;
+ SrcExpr = ExprError();
+ return;
+ }
+ }
+
DiagnoseCastOfObjCSEL(Self, SrcExpr, DestType);
DiagnoseCallingConvCast(Self, SrcExpr, DestType, OpRange);
DiagnoseBadFunctionCast(Self, SrcExpr, DestType);
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaChecking.cpp b/contrib/llvm-project/clang/lib/Sema/SemaChecking.cpp
index 69dcc3aaaaf3..79420cc27699 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaChecking.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaChecking.cpp
@@ -109,24 +109,38 @@ SourceLocation Sema::getLocationOfStringLiteralByte(const StringLiteral *SL,
Context.getTargetInfo());
}
+/// Checks that a call expression's argument count is at least the desired
+/// number. This is useful when doing custom type-checking on a variadic
+/// function. Returns true on error.
+static bool checkArgCountAtLeast(Sema &S, CallExpr *Call,
+ unsigned MinArgCount) {
+ unsigned ArgCount = Call->getNumArgs();
+ if (ArgCount >= MinArgCount)
+ return false;
+
+ return S.Diag(Call->getEndLoc(), diag::err_typecheck_call_too_few_args)
+ << 0 /*function call*/ << MinArgCount << ArgCount
+ << Call->getSourceRange();
+}
+
/// Checks that a call expression's argument count is the desired number.
/// This is useful when doing custom type-checking. Returns true on error.
-static bool checkArgCount(Sema &S, CallExpr *call, unsigned desiredArgCount) {
- unsigned argCount = call->getNumArgs();
- if (argCount == desiredArgCount) return false;
+static bool checkArgCount(Sema &S, CallExpr *Call, unsigned DesiredArgCount) {
+ unsigned ArgCount = Call->getNumArgs();
+ if (ArgCount == DesiredArgCount)
+ return false;
- if (argCount < desiredArgCount)
- return S.Diag(call->getEndLoc(), diag::err_typecheck_call_too_few_args)
- << 0 /*function call*/ << desiredArgCount << argCount
- << call->getSourceRange();
+ if (checkArgCountAtLeast(S, Call, DesiredArgCount))
+ return true;
+ assert(ArgCount > DesiredArgCount && "should have diagnosed this");
// Highlight all the excess arguments.
- SourceRange range(call->getArg(desiredArgCount)->getBeginLoc(),
- call->getArg(argCount - 1)->getEndLoc());
+ SourceRange Range(Call->getArg(DesiredArgCount)->getBeginLoc(),
+ Call->getArg(ArgCount - 1)->getEndLoc());
- return S.Diag(range.getBegin(), diag::err_typecheck_call_too_many_args)
- << 0 /*function call*/ << desiredArgCount << argCount
- << call->getArg(1)->getSourceRange();
+ return S.Diag(Range.getBegin(), diag::err_typecheck_call_too_many_args)
+ << 0 /*function call*/ << DesiredArgCount << ArgCount
+ << Call->getArg(1)->getSourceRange();
}
/// Check that the first argument to __builtin_annotation is an integer
@@ -147,7 +161,7 @@ static bool SemaBuiltinAnnotation(Sema &S, CallExpr *TheCall) {
// Second argument should be a constant string.
Expr *StrArg = TheCall->getArg(1)->IgnoreParenCasts();
StringLiteral *Literal = dyn_cast<StringLiteral>(StrArg);
- if (!Literal || !Literal->isAscii()) {
+ if (!Literal || !Literal->isOrdinary()) {
S.Diag(StrArg->getBeginLoc(), diag::err_builtin_annotation_second_arg)
<< StrArg->getSourceRange();
return true;
@@ -366,6 +380,311 @@ static bool SemaBuiltinOverflow(Sema &S, CallExpr *TheCall,
return false;
}
+namespace {
+struct BuiltinDumpStructGenerator {
+ Sema &S;
+ CallExpr *TheCall;
+ SourceLocation Loc = TheCall->getBeginLoc();
+ SmallVector<Expr *, 32> Actions;
+ DiagnosticErrorTrap ErrorTracker;
+ PrintingPolicy Policy;
+
+ BuiltinDumpStructGenerator(Sema &S, CallExpr *TheCall)
+ : S(S), TheCall(TheCall), ErrorTracker(S.getDiagnostics()),
+ Policy(S.Context.getPrintingPolicy()) {
+ Policy.AnonymousTagLocations = false;
+ }
+
+ Expr *makeOpaqueValueExpr(Expr *Inner) {
+ auto *OVE = new (S.Context)
+ OpaqueValueExpr(Loc, Inner->getType(), Inner->getValueKind(),
+ Inner->getObjectKind(), Inner);
+ Actions.push_back(OVE);
+ return OVE;
+ }
+
+ Expr *getStringLiteral(llvm::StringRef Str) {
+ Expr *Lit = S.Context.getPredefinedStringLiteralFromCache(Str);
+ // Wrap the literal in parentheses to attach a source location.
+ return new (S.Context) ParenExpr(Loc, Loc, Lit);
+ }
+
+ bool callPrintFunction(llvm::StringRef Format,
+ llvm::ArrayRef<Expr *> Exprs = {}) {
+ SmallVector<Expr *, 8> Args;
+ assert(TheCall->getNumArgs() >= 2);
+ Args.reserve((TheCall->getNumArgs() - 2) + /*Format*/ 1 + Exprs.size());
+ Args.assign(TheCall->arg_begin() + 2, TheCall->arg_end());
+ Args.push_back(getStringLiteral(Format));
+ Args.insert(Args.end(), Exprs.begin(), Exprs.end());
+
+ // Register a note to explain why we're performing the call.
+ Sema::CodeSynthesisContext Ctx;
+ Ctx.Kind = Sema::CodeSynthesisContext::BuildingBuiltinDumpStructCall;
+ Ctx.PointOfInstantiation = Loc;
+ Ctx.CallArgs = Args.data();
+ Ctx.NumCallArgs = Args.size();
+ S.pushCodeSynthesisContext(Ctx);
+
+ ExprResult RealCall =
+ S.BuildCallExpr(/*Scope=*/nullptr, TheCall->getArg(1),
+ TheCall->getBeginLoc(), Args, TheCall->getRParenLoc());
+
+ S.popCodeSynthesisContext();
+ if (!RealCall.isInvalid())
+ Actions.push_back(RealCall.get());
+ // Bail out if we've hit any errors, even if we managed to build the
+ // call. We don't want to produce more than one error.
+ return RealCall.isInvalid() || ErrorTracker.hasErrorOccurred();
+ }
+
+ Expr *getIndentString(unsigned Depth) {
+ if (!Depth)
+ return nullptr;
+
+ llvm::SmallString<32> Indent;
+ Indent.resize(Depth * Policy.Indentation, ' ');
+ return getStringLiteral(Indent);
+ }
+
+ Expr *getTypeString(QualType T) {
+ return getStringLiteral(T.getAsString(Policy));
+ }
+
+ bool appendFormatSpecifier(QualType T, llvm::SmallVectorImpl<char> &Str) {
+ llvm::raw_svector_ostream OS(Str);
+
+ // Format 'bool', 'char', 'signed char', 'unsigned char' as numbers, rather
+ // than trying to print a single character.
+ if (auto *BT = T->getAs<BuiltinType>()) {
+ switch (BT->getKind()) {
+ case BuiltinType::Bool:
+ OS << "%d";
+ return true;
+ case BuiltinType::Char_U:
+ case BuiltinType::UChar:
+ OS << "%hhu";
+ return true;
+ case BuiltinType::Char_S:
+ case BuiltinType::SChar:
+ OS << "%hhd";
+ return true;
+ default:
+ break;
+ }
+ }
+
+ analyze_printf::PrintfSpecifier Specifier;
+ if (Specifier.fixType(T, S.getLangOpts(), S.Context, /*IsObjCLiteral=*/false)) {
+ // We were able to guess how to format this.
+ if (Specifier.getConversionSpecifier().getKind() ==
+ analyze_printf::PrintfConversionSpecifier::sArg) {
+ // Wrap double-quotes around a '%s' specifier and limit its maximum
+ // length. Ideally we'd also somehow escape special characters in the
+ // contents but printf doesn't support that.
+ // FIXME: '%s' formatting is not safe in general.
+ OS << '"';
+ Specifier.setPrecision(analyze_printf::OptionalAmount(32u));
+ Specifier.toString(OS);
+ OS << '"';
+ // FIXME: It would be nice to include a '...' if the string doesn't fit
+ // in the length limit.
+ } else {
+ Specifier.toString(OS);
+ }
+ return true;
+ }
+
+ if (T->isPointerType()) {
+ // Format all pointers with '%p'.
+ OS << "%p";
+ return true;
+ }
+
+ return false;
+ }
+
+ bool dumpUnnamedRecord(const RecordDecl *RD, Expr *E, unsigned Depth) {
+ Expr *IndentLit = getIndentString(Depth);
+ Expr *TypeLit = getTypeString(S.Context.getRecordType(RD));
+ if (IndentLit ? callPrintFunction("%s%s", {IndentLit, TypeLit})
+ : callPrintFunction("%s", {TypeLit}))
+ return true;
+
+ return dumpRecordValue(RD, E, IndentLit, Depth);
+ }
+
+ // Dump a record value. E should be a pointer or lvalue referring to an RD.
+ bool dumpRecordValue(const RecordDecl *RD, Expr *E, Expr *RecordIndent,
+ unsigned Depth) {
+ // FIXME: Decide what to do if RD is a union. At least we should probably
+ // turn off printing `const char*` members with `%s`, because that is very
+ // likely to crash if that's not the active member. Whatever we decide, we
+ // should document it.
+
+ // Build an OpaqueValueExpr so we can refer to E more than once without
+ // triggering re-evaluation.
+ Expr *RecordArg = makeOpaqueValueExpr(E);
+ bool RecordArgIsPtr = RecordArg->getType()->isPointerType();
+
+ if (callPrintFunction(" {\n"))
+ return true;
+
+ // Dump each base class, regardless of whether they're aggregates.
+ if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
+ for (const auto &Base : CXXRD->bases()) {
+ QualType BaseType =
+ RecordArgIsPtr ? S.Context.getPointerType(Base.getType())
+ : S.Context.getLValueReferenceType(Base.getType());
+ ExprResult BasePtr = S.BuildCStyleCastExpr(
+ Loc, S.Context.getTrivialTypeSourceInfo(BaseType, Loc), Loc,
+ RecordArg);
+ if (BasePtr.isInvalid() ||
+ dumpUnnamedRecord(Base.getType()->getAsRecordDecl(), BasePtr.get(),
+ Depth + 1))
+ return true;
+ }
+ }
+
+ Expr *FieldIndentArg = getIndentString(Depth + 1);
+
+ // Dump each field.
+ for (auto *D : RD->decls()) {
+ auto *IFD = dyn_cast<IndirectFieldDecl>(D);
+ auto *FD = IFD ? IFD->getAnonField() : dyn_cast<FieldDecl>(D);
+ if (!FD || FD->isUnnamedBitfield() || FD->isAnonymousStructOrUnion())
+ continue;
+
+ llvm::SmallString<20> Format = llvm::StringRef("%s%s %s ");
+ llvm::SmallVector<Expr *, 5> Args = {FieldIndentArg,
+ getTypeString(FD->getType()),
+ getStringLiteral(FD->getName())};
+
+ if (FD->isBitField()) {
+ Format += ": %zu ";
+ QualType SizeT = S.Context.getSizeType();
+ llvm::APInt BitWidth(S.Context.getIntWidth(SizeT),
+ FD->getBitWidthValue(S.Context));
+ Args.push_back(IntegerLiteral::Create(S.Context, BitWidth, SizeT, Loc));
+ }
+
+ Format += "=";
+
+ ExprResult Field =
+ IFD ? S.BuildAnonymousStructUnionMemberReference(
+ CXXScopeSpec(), Loc, IFD,
+ DeclAccessPair::make(IFD, AS_public), RecordArg, Loc)
+ : S.BuildFieldReferenceExpr(
+ RecordArg, RecordArgIsPtr, Loc, CXXScopeSpec(), FD,
+ DeclAccessPair::make(FD, AS_public),
+ DeclarationNameInfo(FD->getDeclName(), Loc));
+ if (Field.isInvalid())
+ return true;
+
+ auto *InnerRD = FD->getType()->getAsRecordDecl();
+ auto *InnerCXXRD = dyn_cast_or_null<CXXRecordDecl>(InnerRD);
+ if (InnerRD && (!InnerCXXRD || InnerCXXRD->isAggregate())) {
+ // Recursively print the values of members of aggregate record type.
+ if (callPrintFunction(Format, Args) ||
+ dumpRecordValue(InnerRD, Field.get(), FieldIndentArg, Depth + 1))
+ return true;
+ } else {
+ Format += " ";
+ if (appendFormatSpecifier(FD->getType(), Format)) {
+ // We know how to print this field.
+ Args.push_back(Field.get());
+ } else {
+ // We don't know how to print this field. Print out its address
+ // with a format specifier that a smart tool will be able to
+ // recognize and treat specially.
+ Format += "*%p";
+ ExprResult FieldAddr =
+ S.BuildUnaryOp(nullptr, Loc, UO_AddrOf, Field.get());
+ if (FieldAddr.isInvalid())
+ return true;
+ Args.push_back(FieldAddr.get());
+ }
+ Format += "\n";
+ if (callPrintFunction(Format, Args))
+ return true;
+ }
+ }
+
+ return RecordIndent ? callPrintFunction("%s}\n", RecordIndent)
+ : callPrintFunction("}\n");
+ }
+
+ Expr *buildWrapper() {
+ auto *Wrapper = PseudoObjectExpr::Create(S.Context, TheCall, Actions,
+ PseudoObjectExpr::NoResult);
+ TheCall->setType(Wrapper->getType());
+ TheCall->setValueKind(Wrapper->getValueKind());
+ return Wrapper;
+ }
+};
+} // namespace
+
+static ExprResult SemaBuiltinDumpStruct(Sema &S, CallExpr *TheCall) {
+ if (checkArgCountAtLeast(S, TheCall, 2))
+ return ExprError();
+
+ ExprResult PtrArgResult = S.DefaultLvalueConversion(TheCall->getArg(0));
+ if (PtrArgResult.isInvalid())
+ return ExprError();
+ TheCall->setArg(0, PtrArgResult.get());
+
+ // First argument should be a pointer to a struct.
+ QualType PtrArgType = PtrArgResult.get()->getType();
+ if (!PtrArgType->isPointerType() ||
+ !PtrArgType->getPointeeType()->isRecordType()) {
+ S.Diag(PtrArgResult.get()->getBeginLoc(),
+ diag::err_expected_struct_pointer_argument)
+ << 1 << TheCall->getDirectCallee() << PtrArgType;
+ return ExprError();
+ }
+ const RecordDecl *RD = PtrArgType->getPointeeType()->getAsRecordDecl();
+
+ // Second argument is a callable, but we can't fully validate it until we try
+ // calling it.
+ QualType FnArgType = TheCall->getArg(1)->getType();
+ if (!FnArgType->isFunctionType() && !FnArgType->isFunctionPointerType() &&
+ !FnArgType->isBlockPointerType() &&
+ !(S.getLangOpts().CPlusPlus && FnArgType->isRecordType())) {
+ auto *BT = FnArgType->getAs<BuiltinType>();
+ switch (BT ? BT->getKind() : BuiltinType::Void) {
+ case BuiltinType::Dependent:
+ case BuiltinType::Overload:
+ case BuiltinType::BoundMember:
+ case BuiltinType::PseudoObject:
+ case BuiltinType::UnknownAny:
+ case BuiltinType::BuiltinFn:
+ // This might be a callable.
+ break;
+
+ default:
+ S.Diag(TheCall->getArg(1)->getBeginLoc(),
+ diag::err_expected_callable_argument)
+ << 2 << TheCall->getDirectCallee() << FnArgType;
+ return ExprError();
+ }
+ }
+
+ BuiltinDumpStructGenerator Generator(S, TheCall);
+
+ // Wrap parentheses around the given pointer. This is not necessary for
+ // correct code generation, but it means that when we pretty-print the call
+ // arguments in our diagnostics we will produce '(&s)->n' instead of the
+ // incorrect '&s->n'.
+ Expr *PtrArg = PtrArgResult.get();
+ PtrArg = new (S.Context)
+ ParenExpr(PtrArg->getBeginLoc(),
+ S.getLocForEndOfToken(PtrArg->getEndLoc()), PtrArg);
+ if (Generator.dumpUnnamedRecord(RD, PtrArg, 0))
+ return ExprError();
+
+ return Generator.buildWrapper();
+}
+
static bool SemaBuiltinCallWithStaticChain(Sema &S, CallExpr *BuiltinCall) {
if (checkArgCount(S, BuiltinCall, 2))
return true;
@@ -715,7 +1034,7 @@ void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD,
Optional<unsigned> IndexOptional = TranslateIndex(Index);
if (!IndexOptional)
return llvm::None;
- unsigned NewIndex = IndexOptional.getValue();
+ unsigned NewIndex = *IndexOptional;
Expr::EvalResult Result;
Expr *SizeArg = TheCall->getArg(NewIndex);
if (!SizeArg->EvaluateAsInt(Result, getASTContext()))
@@ -740,7 +1059,7 @@ void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD,
Optional<unsigned> IndexOptional = TranslateIndex(Index);
if (!IndexOptional)
return llvm::None;
- unsigned NewIndex = IndexOptional.getValue();
+ unsigned NewIndex = *IndexOptional;
const Expr *ObjArg = TheCall->getArg(NewIndex);
uint64_t Result;
@@ -755,7 +1074,7 @@ void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD,
Optional<unsigned> IndexOptional = TranslateIndex(Index);
if (!IndexOptional)
return llvm::None;
- unsigned NewIndex = IndexOptional.getValue();
+ unsigned NewIndex = *IndexOptional;
const Expr *ObjArg = TheCall->getArg(NewIndex);
uint64_t Result;
@@ -820,7 +1139,7 @@ void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD,
if (!Format)
return;
- if (!Format->isAscii() && !Format->isUTF8())
+ if (!Format->isOrdinary() && !Format->isUTF8())
return;
auto Diagnose = [&](unsigned ArgIndex, unsigned DestSize,
@@ -865,7 +1184,7 @@ void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD,
if (auto *Format = dyn_cast<StringLiteral>(FormatExpr)) {
- if (!Format->isAscii() && !Format->isUTF8())
+ if (!Format->isOrdinary() && !Format->isUTF8())
return;
StringRef FormatStrRef = Format->getString();
@@ -965,8 +1284,7 @@ void Sema::checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD,
}
if (!SourceSize || !DestinationSize ||
- llvm::APSInt::compareValues(SourceSize.getValue(),
- DestinationSize.getValue()) <= 0)
+ llvm::APSInt::compareValues(*SourceSize, *DestinationSize) <= 0)
return;
StringRef FunctionName = GetFunctionName();
@@ -1555,7 +1873,7 @@ static ExprResult SemaBuiltinLaunder(Sema &S, CallExpr *TheCall) {
return 2;
return llvm::Optional<unsigned>{};
}();
- if (DiagSelect.hasValue()) {
+ if (DiagSelect) {
S.Diag(TheCall->getBeginLoc(), diag::err_builtin_launder_invalid_arg)
<< DiagSelect.getValue() << TheCall->getSourceRange();
return ExprError();
@@ -1679,7 +1997,10 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
if ((ICEArguments & (1 << ArgNo)) == 0) continue;
llvm::APSInt Result;
- if (SemaBuiltinConstantArg(TheCall, ArgNo, Result))
+ // If we don't have enough arguments, continue so we can issue better
+ // diagnostic in checkArgCount(...)
+ if (ArgNo < TheCall->getNumArgs() &&
+ SemaBuiltinConstantArg(TheCall, ArgNo, Result))
return true;
ICEArguments &= ~(1 << ArgNo);
}
@@ -1955,6 +2276,17 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
}
break;
}
+ case Builtin::BI__builtin_memset_inline: {
+ clang::Expr *SizeOp = TheCall->getArg(2);
+ // We warn about filling to `nullptr` pointers when `size` is greater than
+ // 0. When `size` is value dependent we cannot evaluate its value so we bail
+ // out.
+ if (SizeOp->isValueDependent())
+ break;
+ if (!SizeOp->EvaluateKnownConstInt(Context).isZero())
+ CheckNonNullArgument(*this, TheCall->getArg(0), TheCall->getExprLoc());
+ break;
+ }
#define BUILTIN(ID, TYPE, ATTRS)
#define ATOMIC_BUILTIN(ID, TYPE, ATTRS) \
case Builtin::BI##ID: \
@@ -1997,62 +2329,8 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
CorrectDelayedTyposInExpr(TheCallResult.get());
return Res;
}
- case Builtin::BI__builtin_dump_struct: {
- // We first want to ensure we are called with 2 arguments
- if (checkArgCount(*this, TheCall, 2))
- return ExprError();
- // Ensure that the first argument is of type 'struct XX *'
- const Expr *PtrArg = TheCall->getArg(0)->IgnoreParenImpCasts();
- const QualType PtrArgType = PtrArg->getType();
- if (!PtrArgType->isPointerType() ||
- !PtrArgType->getPointeeType()->isRecordType()) {
- Diag(PtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
- << PtrArgType << "structure pointer" << 1 << 0 << 3 << 1 << PtrArgType
- << "structure pointer";
- return ExprError();
- }
-
- // Ensure that the second argument is of type 'FunctionType'
- const Expr *FnPtrArg = TheCall->getArg(1)->IgnoreImpCasts();
- const QualType FnPtrArgType = FnPtrArg->getType();
- if (!FnPtrArgType->isPointerType()) {
- Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
- << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2
- << FnPtrArgType << "'int (*)(const char *, ...)'";
- return ExprError();
- }
-
- const auto *FuncType =
- FnPtrArgType->getPointeeType()->getAs<FunctionType>();
-
- if (!FuncType) {
- Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
- << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3 << 2
- << FnPtrArgType << "'int (*)(const char *, ...)'";
- return ExprError();
- }
-
- if (const auto *FT = dyn_cast<FunctionProtoType>(FuncType)) {
- if (!FT->getNumParams()) {
- Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
- << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3
- << 2 << FnPtrArgType << "'int (*)(const char *, ...)'";
- return ExprError();
- }
- QualType PT = FT->getParamType(0);
- if (!FT->isVariadic() || FT->getReturnType() != Context.IntTy ||
- !PT->isPointerType() || !PT->getPointeeType()->isCharType() ||
- !PT->getPointeeType().isConstQualified()) {
- Diag(FnPtrArg->getBeginLoc(), diag::err_typecheck_convert_incompatible)
- << FnPtrArgType << "'int (*)(const char *, ...)'" << 1 << 0 << 3
- << 2 << FnPtrArgType << "'int (*)(const char *, ...)'";
- return ExprError();
- }
- }
-
- TheCall->setType(Context.IntTy);
- break;
- }
+ case Builtin::BI__builtin_dump_struct:
+ return SemaBuiltinDumpStruct(*this, TheCall);
case Builtin::BI__builtin_expect_with_probability: {
// We first want to ensure we are called with 3 arguments
if (checkArgCount(*this, TheCall, 3))
@@ -2114,6 +2392,32 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
TheCall->setType(Context.VoidPtrTy);
break;
+ case Builtin::BIaddressof:
+ case Builtin::BI__addressof:
+ case Builtin::BIforward:
+ case Builtin::BImove:
+ case Builtin::BImove_if_noexcept:
+ case Builtin::BIas_const: {
+ // These are all expected to be of the form
+ // T &/&&/* f(U &/&&)
+ // where T and U only differ in qualification.
+ if (checkArgCount(*this, TheCall, 1))
+ return ExprError();
+ QualType Param = FDecl->getParamDecl(0)->getType();
+ QualType Result = FDecl->getReturnType();
+ bool ReturnsPointer = BuiltinID == Builtin::BIaddressof ||
+ BuiltinID == Builtin::BI__addressof;
+ if (!(Param->isReferenceType() &&
+ (ReturnsPointer ? Result->isPointerType()
+ : Result->isReferenceType()) &&
+ Context.hasSameUnqualifiedType(Param->getPointeeType(),
+ Result->getPointeeType()))) {
+ Diag(TheCall->getBeginLoc(), diag::err_builtin_move_forward_unsupported)
+ << FDecl;
+ return ExprError();
+ }
+ break;
+ }
// OpenCL v2.0, s6.13.16 - Pipe functions
case Builtin::BIread_pipe:
case Builtin::BIwrite_pipe:
@@ -2244,6 +2548,28 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
break;
}
+ // These builtins restrict the element type to integer
+ // types only.
+ case Builtin::BI__builtin_elementwise_add_sat:
+ case Builtin::BI__builtin_elementwise_sub_sat: {
+ if (SemaBuiltinElementwiseMath(TheCall))
+ return ExprError();
+
+ const Expr *Arg = TheCall->getArg(0);
+ QualType ArgTy = Arg->getType();
+ QualType EltTy = ArgTy;
+
+ if (auto *VecTy = EltTy->getAs<VectorType>())
+ EltTy = VecTy->getElementType();
+
+ if (!EltTy->isIntegerType()) {
+ Diag(Arg->getBeginLoc(), diag::err_builtin_invalid_arg_type)
+ << 1 << /* integer ty */ 6 << ArgTy;
+ return ExprError();
+ }
+ break;
+ }
+
case Builtin::BI__builtin_elementwise_min:
case Builtin::BI__builtin_elementwise_max:
if (SemaBuiltinElementwiseMath(TheCall))
@@ -2267,6 +2593,9 @@ Sema::CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID,
}
// These builtins support vectors of integers only.
+ // TODO: ADD/MUL should support floating-point types.
+ case Builtin::BI__builtin_reduce_add:
+ case Builtin::BI__builtin_reduce_mul:
case Builtin::BI__builtin_reduce_xor:
case Builtin::BI__builtin_reduce_or:
case Builtin::BI__builtin_reduce_and: {
@@ -2884,6 +3213,9 @@ bool Sema::CheckAArch64BuiltinFunctionCall(const TargetInfo &TI,
if (BuiltinID == AArch64::BI__getReg)
return SemaBuiltinConstantArgRange(TheCall, 0, 0, 31);
+ if (BuiltinID == AArch64::BI__break)
+ return SemaBuiltinConstantArgRange(TheCall, 0, 0, 0xffff);
+
if (CheckNeonBuiltinFunctionCall(TI, BuiltinID, TheCall))
return true;
@@ -2917,23 +3249,12 @@ static bool isValidBPFPreserveFieldInfoArg(Expr *Arg) {
isa<ArraySubscriptExpr>(Arg->IgnoreParens()));
}
-static bool isEltOfVectorTy(ASTContext &Context, CallExpr *Call, Sema &S,
- QualType VectorTy, QualType EltTy) {
- QualType VectorEltTy = VectorTy->castAs<VectorType>()->getElementType();
- if (!Context.hasSameType(VectorEltTy, EltTy)) {
- S.Diag(Call->getBeginLoc(), diag::err_typecheck_call_different_arg_types)
- << Call->getSourceRange() << VectorEltTy << EltTy;
- return false;
- }
- return true;
-}
-
static bool isValidBPFPreserveTypeInfoArg(Expr *Arg) {
QualType ArgType = Arg->getType();
if (ArgType->getAsPlaceholderType())
return false;
- // for TYPE_EXISTENCE/TYPE_SIZEOF reloc type
+ // for TYPE_EXISTENCE/TYPE_MATCH/TYPE_SIZEOF reloc type
// format:
// 1. __builtin_preserve_type_info(*(<type> *)0, flag);
// 2. <type> var;
@@ -3582,6 +3903,8 @@ static bool isPPC_64Builtin(unsigned BuiltinID) {
case PPC::BI__builtin_divde:
case PPC::BI__builtin_divdeu:
case PPC::BI__builtin_bpermd:
+ case PPC::BI__builtin_pdepd:
+ case PPC::BI__builtin_pextd:
case PPC::BI__builtin_ppc_ldarx:
case PPC::BI__builtin_ppc_stdcx:
case PPC::BI__builtin_ppc_tdw:
@@ -3741,16 +4064,12 @@ bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
case PPC::BI__builtin_pack_vector_int128:
return SemaFeatureCheck(*this, TheCall, "vsx",
diag::err_ppc_builtin_only_on_arch, "7");
+ case PPC::BI__builtin_pdepd:
+ case PPC::BI__builtin_pextd:
+ return SemaFeatureCheck(*this, TheCall, "isa-v31-instructions",
+ diag::err_ppc_builtin_only_on_arch, "10");
case PPC::BI__builtin_altivec_vgnb:
return SemaBuiltinConstantArgRange(TheCall, 1, 2, 7);
- case PPC::BI__builtin_altivec_vec_replace_elt:
- case PPC::BI__builtin_altivec_vec_replace_unaligned: {
- QualType VecTy = TheCall->getArg(0)->getType();
- QualType EltTy = TheCall->getArg(1)->getType();
- unsigned Width = Context.getIntWidth(EltTy);
- return SemaBuiltinConstantArgRange(TheCall, 2, 0, Width == 32 ? 12 : 8) ||
- !isEltOfVectorTy(Context, TheCall, *this, VecTy, EltTy);
- }
case PPC::BI__builtin_vsx_xxeval:
return SemaBuiltinConstantArgRange(TheCall, 3, 0, 255);
case PPC::BI__builtin_altivec_vsldbi:
@@ -3860,6 +4179,33 @@ bool Sema::CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID,
diag::err_ppc_builtin_requires_vsx) ||
SemaBuiltinConstantArgRange(TheCall, 1, 0, 127);
}
+ case PPC::BI__builtin_ppc_maxfe:
+ case PPC::BI__builtin_ppc_minfe:
+ case PPC::BI__builtin_ppc_maxfl:
+ case PPC::BI__builtin_ppc_minfl:
+ case PPC::BI__builtin_ppc_maxfs:
+ case PPC::BI__builtin_ppc_minfs: {
+ if (Context.getTargetInfo().getTriple().isOSAIX() &&
+ (BuiltinID == PPC::BI__builtin_ppc_maxfe ||
+ BuiltinID == PPC::BI__builtin_ppc_minfe))
+ return Diag(TheCall->getBeginLoc(), diag::err_target_unsupported_type)
+ << "builtin" << true << 128 << QualType(Context.LongDoubleTy)
+ << false << Context.getTargetInfo().getTriple().str();
+ // Argument type should be exact.
+ QualType ArgType = QualType(Context.LongDoubleTy);
+ if (BuiltinID == PPC::BI__builtin_ppc_maxfl ||
+ BuiltinID == PPC::BI__builtin_ppc_minfl)
+ ArgType = QualType(Context.DoubleTy);
+ else if (BuiltinID == PPC::BI__builtin_ppc_maxfs ||
+ BuiltinID == PPC::BI__builtin_ppc_minfs)
+ ArgType = QualType(Context.FloatTy);
+ for (unsigned I = 0, E = TheCall->getNumArgs(); I < E; ++I)
+ if (TheCall->getArg(I)->getType() != ArgType)
+ return Diag(TheCall->getBeginLoc(),
+ diag::err_typecheck_convert_incompatible)
+ << TheCall->getArg(I)->getType() << ArgType << 1 << 0 << 0;
+ return false;
+ }
case PPC::BI__builtin_ppc_load8r:
case PPC::BI__builtin_ppc_store8r:
return SemaFeatureCheck(*this, TheCall, "isa-v206-instructions",
@@ -3970,6 +4316,27 @@ bool Sema::CheckRISCVLMUL(CallExpr *TheCall, unsigned ArgNum) {
<< Arg->getSourceRange();
}
+static bool isRISCV32Builtin(unsigned BuiltinID) {
+ // These builtins only work on riscv32 targets.
+ switch (BuiltinID) {
+ case RISCV::BI__builtin_riscv_zip_32:
+ case RISCV::BI__builtin_riscv_unzip_32:
+ case RISCV::BI__builtin_riscv_aes32dsi_32:
+ case RISCV::BI__builtin_riscv_aes32dsmi_32:
+ case RISCV::BI__builtin_riscv_aes32esi_32:
+ case RISCV::BI__builtin_riscv_aes32esmi_32:
+ case RISCV::BI__builtin_riscv_sha512sig0h_32:
+ case RISCV::BI__builtin_riscv_sha512sig0l_32:
+ case RISCV::BI__builtin_riscv_sha512sig1h_32:
+ case RISCV::BI__builtin_riscv_sha512sig1l_32:
+ case RISCV::BI__builtin_riscv_sha512sum0r_32:
+ case RISCV::BI__builtin_riscv_sha512sum1r_32:
+ return true;
+ }
+
+ return false;
+}
+
bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI,
unsigned BuiltinID,
CallExpr *TheCall) {
@@ -3980,6 +4347,12 @@ bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI,
StringRef Features = Context.BuiltinInfo.getRequiredFeatures(BuiltinID);
Features.split(ReqFeatures, ',');
+ // Check for 32-bit only builtins on a 64-bit target.
+ const llvm::Triple &TT = TI.getTriple();
+ if (TT.getArch() != llvm::Triple::riscv32 && isRISCV32Builtin(BuiltinID))
+ return Diag(TheCall->getCallee()->getBeginLoc(),
+ diag::err_32_bit_builtin_64_bit_tgt);
+
// Check if each required feature is included
for (StringRef F : ReqFeatures) {
SmallVector<StringRef> ReqOpFeatures;
@@ -4027,6 +4400,41 @@ bool Sema::CheckRISCVBuiltinFunctionCall(const TargetInfo &TI,
case RISCVVector::BI__builtin_rvv_vsetvlimax:
return SemaBuiltinConstantArgRange(TheCall, 0, 0, 3) ||
CheckRISCVLMUL(TheCall, 1);
+ case RISCVVector::BI__builtin_rvv_vget_v: {
+ ASTContext::BuiltinVectorTypeInfo ResVecInfo =
+ Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
+ TheCall->getType().getCanonicalType().getTypePtr()));
+ ASTContext::BuiltinVectorTypeInfo VecInfo =
+ Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
+ TheCall->getArg(0)->getType().getCanonicalType().getTypePtr()));
+ unsigned MaxIndex =
+ (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors) /
+ (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors);
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1);
+ }
+ case RISCVVector::BI__builtin_rvv_vset_v: {
+ ASTContext::BuiltinVectorTypeInfo ResVecInfo =
+ Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
+ TheCall->getType().getCanonicalType().getTypePtr()));
+ ASTContext::BuiltinVectorTypeInfo VecInfo =
+ Context.getBuiltinVectorTypeInfo(cast<BuiltinType>(
+ TheCall->getArg(2)->getType().getCanonicalType().getTypePtr()));
+ unsigned MaxIndex =
+ (ResVecInfo.EC.getKnownMinValue() * ResVecInfo.NumVectors) /
+ (VecInfo.EC.getKnownMinValue() * VecInfo.NumVectors);
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, MaxIndex - 1);
+ }
+ // Check if byteselect is in [0, 3]
+ case RISCV::BI__builtin_riscv_aes32dsi_32:
+ case RISCV::BI__builtin_riscv_aes32dsmi_32:
+ case RISCV::BI__builtin_riscv_aes32esi_32:
+ case RISCV::BI__builtin_riscv_aes32esmi_32:
+ case RISCV::BI__builtin_riscv_sm4ks:
+ case RISCV::BI__builtin_riscv_sm4ed:
+ return SemaBuiltinConstantArgRange(TheCall, 2, 0, 3);
+ // Check if rnum is in [0, 10]
+ case RISCV::BI__builtin_riscv_aes64ks1i_64:
+ return SemaBuiltinConstantArgRange(TheCall, 1, 0, 10);
}
return false;
@@ -5415,7 +5823,9 @@ bool Sema::CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
if (!FnInfo)
return false;
- CheckTCBEnforcement(TheCall, FDecl);
+ // Enforce TCB except for builtin calls, which are always allowed.
+ if (FDecl->getBuiltinID() == 0)
+ CheckTCBEnforcement(TheCall->getExprLoc(), FDecl);
CheckAbsoluteValueFunction(TheCall, FDecl);
CheckMaxUnsignedZero(TheCall, FDecl);
@@ -5455,6 +5865,8 @@ bool Sema::CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation lbrac,
/*IsMemberFunction=*/false, lbrac, Method->getSourceRange(),
CallType);
+ CheckTCBEnforcement(lbrac, Method);
+
return false;
}
@@ -6059,7 +6471,7 @@ static bool checkBuiltinArgument(Sema &S, CallExpr *E, unsigned ArgIndex) {
InitializedEntity Entity =
InitializedEntity::InitializeParameter(S.Context, Param);
- ExprResult Arg = E->getArg(0);
+ ExprResult Arg = E->getArg(ArgIndex);
Arg = S.PerformCopyInitialization(Entity, SourceLocation(), Arg);
if (Arg.isInvalid())
return true;
@@ -6526,7 +6938,7 @@ bool Sema::CheckObjCString(Expr *Arg) {
Arg = Arg->IgnoreParenCasts();
StringLiteral *Literal = dyn_cast<StringLiteral>(Arg);
- if (!Literal || !Literal->isAscii()) {
+ if (!Literal || !Literal->isOrdinary()) {
Diag(Arg->getBeginLoc(), diag::err_cfstring_literal_not_string_constant)
<< Arg->getSourceRange();
return true;
@@ -6561,7 +6973,7 @@ ExprResult Sema::CheckOSLogFormatStringArg(Expr *Arg) {
}
}
- if (!Literal || (!Literal->isAscii() && !Literal->isUTF8())) {
+ if (!Literal || (!Literal->isOrdinary() && !Literal->isUTF8())) {
return ExprError(
Diag(Arg->getBeginLoc(), diag::err_os_log_format_not_string_constant)
<< Arg->getSourceRange());
@@ -7978,7 +8390,7 @@ class FormatStringLiteral {
QualType getType() const { return FExpr->getType(); }
- bool isAscii() const { return FExpr->isAscii(); }
+ bool isAscii() const { return FExpr->isOrdinary(); }
bool isWide() const { return FExpr->isWide(); }
bool isUTF8() const { return FExpr->isUTF8(); }
bool isUTF16() const { return FExpr->isUTF16(); }
@@ -10766,7 +11178,10 @@ static void CheckMemaccessSize(Sema &S, unsigned BId, const CallExpr *Call) {
Call->getArg(BId == Builtin::BImemset ? 2 : 1)->IgnoreImpCasts();
auto isLiteralZero = [](const Expr *E) {
- return isa<IntegerLiteral>(E) && cast<IntegerLiteral>(E)->getValue() == 0;
+ return (isa<IntegerLiteral>(E) &&
+ cast<IntegerLiteral>(E)->getValue() == 0) ||
+ (isa<CharacterLiteral>(E) &&
+ cast<CharacterLiteral>(E)->getValue() == 0);
};
// If we're memsetting or bzeroing 0 bytes, then this is likely an error.
@@ -11288,7 +11703,7 @@ void CheckFreeArgumentsCast(Sema &S, const std::string &CalleeName,
/// Alerts the user that they are attempting to free a non-malloc'd object.
void Sema::CheckFreeArguments(const CallExpr *E) {
const std::string CalleeName =
- dyn_cast<FunctionDecl>(E->getCalleeDecl())->getQualifiedNameAsString();
+ cast<FunctionDecl>(E->getCalleeDecl())->getQualifiedNameAsString();
{ // Prefer something that doesn't involve a cast to make things simpler.
const Expr *Arg = E->getArg(0)->IgnoreParenCasts();
@@ -11359,12 +11774,40 @@ Sema::CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
CheckPPCMMAType(RetValExp->getType(), ReturnLoc);
}
-//===--- CHECK: Floating-Point comparisons (-Wfloat-equal) ---------------===//
+/// Check for comparisons of floating-point values using == and !=. Issue a
+/// warning if the comparison is not likely to do what the programmer intended.
+void Sema::CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS,
+ BinaryOperatorKind Opcode) {
+ // Match and capture subexpressions such as "(float) X == 0.1".
+ FloatingLiteral *FPLiteral;
+ CastExpr *FPCast;
+ auto getCastAndLiteral = [&FPLiteral, &FPCast](Expr *L, Expr *R) {
+ FPLiteral = dyn_cast<FloatingLiteral>(L->IgnoreParens());
+ FPCast = dyn_cast<CastExpr>(R->IgnoreParens());
+ return FPLiteral && FPCast;
+ };
+
+ if (getCastAndLiteral(LHS, RHS) || getCastAndLiteral(RHS, LHS)) {
+ auto *SourceTy = FPCast->getSubExpr()->getType()->getAs<BuiltinType>();
+ auto *TargetTy = FPLiteral->getType()->getAs<BuiltinType>();
+ if (SourceTy && TargetTy && SourceTy->isFloatingPoint() &&
+ TargetTy->isFloatingPoint()) {
+ bool Lossy;
+ llvm::APFloat TargetC = FPLiteral->getValue();
+ TargetC.convert(Context.getFloatTypeSemantics(QualType(SourceTy, 0)),
+ llvm::APFloat::rmNearestTiesToEven, &Lossy);
+ if (Lossy) {
+ // If the literal cannot be represented in the source type, then a
+ // check for == is always false and check for != is always true.
+ Diag(Loc, diag::warn_float_compare_literal)
+ << (Opcode == BO_EQ) << QualType(SourceTy, 0)
+ << LHS->getSourceRange() << RHS->getSourceRange();
+ return;
+ }
+ }
+ }
-/// Check for comparisons of floating point operands using != and ==.
-/// Issue a warning if these are no self-comparisons, as they are not likely
-/// to do what the programmer intended.
-void Sema::CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr *RHS) {
+ // Match a more general floating-point equality comparison (-Wfloat-equal).
Expr* LeftExprSansParen = LHS->IgnoreParenImpCasts();
Expr* RightExprSansParen = RHS->IgnoreParenImpCasts();
@@ -13111,6 +13554,29 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
const BuiltinType *SourceBT = dyn_cast<BuiltinType>(Source);
const BuiltinType *TargetBT = dyn_cast<BuiltinType>(Target);
+ // Strip SVE vector types
+ if (SourceBT && SourceBT->isVLSTBuiltinType()) {
+ // Need the original target type for vector type checks
+ const Type *OriginalTarget = S.Context.getCanonicalType(T).getTypePtr();
+ // Handle conversion from scalable to fixed when msve-vector-bits is
+ // specified
+ if (S.Context.areCompatibleSveTypes(QualType(OriginalTarget, 0),
+ QualType(Source, 0)) ||
+ S.Context.areLaxCompatibleSveTypes(QualType(OriginalTarget, 0),
+ QualType(Source, 0)))
+ return;
+
+ // If the vector cast is cast between two vectors of the same size, it is
+ // a bitcast, not a conversion.
+ if (S.Context.getTypeSize(Source) == S.Context.getTypeSize(Target))
+ return;
+
+ Source = SourceBT->getSveEltType(S.Context).getTypePtr();
+ }
+
+ if (TargetBT && TargetBT->isVLSTBuiltinType())
+ Target = TargetBT->getSveEltType(S.Context).getTypePtr();
+
// If the source is floating point...
if (SourceBT && SourceBT->isFloatingPoint()) {
// ...and the target is floating point...
@@ -13396,9 +13862,10 @@ static void CheckImplicitConversion(Sema &S, Expr *E, QualType T,
// Fall through for non-constants to give a sign conversion warning.
}
- if ((TargetRange.NonNegative && !LikelySourceRange.NonNegative) ||
- (!TargetRange.NonNegative && LikelySourceRange.NonNegative &&
- LikelySourceRange.Width == TargetRange.Width)) {
+ if ((!isa<EnumType>(Target) || !isa<EnumType>(Source)) &&
+ ((TargetRange.NonNegative && !LikelySourceRange.NonNegative) ||
+ (!TargetRange.NonNegative && LikelySourceRange.NonNegative &&
+ LikelySourceRange.Width == TargetRange.Width))) {
if (S.SourceMgr.isInSystemMacro(CC))
return;
@@ -13629,6 +14096,13 @@ static void AnalyzeImplicitConversions(
if (!ChildExpr)
continue;
+ if (auto *CSE = dyn_cast<CoroutineSuspendExpr>(E))
+ if (ChildExpr == CSE->getOperand())
+ // Do not recurse over a CoroutineSuspendExpr's operand.
+ // The operand is also a subexpression of getCommonExpr(), and
+ // recursing into it directly would produce duplicate diagnostics.
+ continue;
+
if (IsLogicalAndOperator &&
isa<StringLiteral>(ChildExpr->IgnoreParenImpCasts()))
// Ignore checking string literals that are in logical and operators.
@@ -15330,6 +15804,8 @@ void Sema::CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
ND = ME->getMemberDecl();
if (IsUnboundedArray) {
+ if (EffectiveType->isFunctionType())
+ return;
if (index.isUnsigned() || !index.isNegative()) {
const auto &ASTC = getASTContext();
unsigned AddrBits =
@@ -16197,7 +16673,7 @@ void Sema::DiagnoseEmptyLoopBody(const Stmt *S,
Body = FS->getBody();
DiagID = diag::warn_empty_for_body;
} else if (const WhileStmt *WS = dyn_cast<WhileStmt>(S)) {
- StmtLoc = WS->getCond()->getSourceRange().getEnd();
+ StmtLoc = WS->getRParenLoc();
Body = WS->getBody();
DiagID = diag::warn_empty_while_body;
} else
@@ -17249,33 +17725,28 @@ ExprResult Sema::SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall,
/// CheckTCBEnforcement - Enforces that every function in a named TCB only
/// directly calls other functions in the same TCB as marked by the enforce_tcb
/// and enforce_tcb_leaf attributes.
-void Sema::CheckTCBEnforcement(const CallExpr *TheCall,
- const FunctionDecl *Callee) {
- const FunctionDecl *Caller = getCurFunctionDecl();
+void Sema::CheckTCBEnforcement(const SourceLocation CallExprLoc,
+ const NamedDecl *Callee) {
+ const NamedDecl *Caller = getCurFunctionOrMethodDecl();
- // Calls to builtins are not enforced.
- if (!Caller || !Caller->hasAttr<EnforceTCBAttr>() ||
- Callee->getBuiltinID() != 0)
+ if (!Caller || !Caller->hasAttr<EnforceTCBAttr>())
return;
// Search through the enforce_tcb and enforce_tcb_leaf attributes to find
// all TCBs the callee is a part of.
llvm::StringSet<> CalleeTCBs;
- for_each(Callee->specific_attrs<EnforceTCBAttr>(),
- [&](const auto *A) { CalleeTCBs.insert(A->getTCBName()); });
- for_each(Callee->specific_attrs<EnforceTCBLeafAttr>(),
- [&](const auto *A) { CalleeTCBs.insert(A->getTCBName()); });
+ for (const auto *A : Callee->specific_attrs<EnforceTCBAttr>())
+ CalleeTCBs.insert(A->getTCBName());
+ for (const auto *A : Callee->specific_attrs<EnforceTCBLeafAttr>())
+ CalleeTCBs.insert(A->getTCBName());
// Go through the TCBs the caller is a part of and emit warnings if Caller
// is in a TCB that the Callee is not.
- for_each(
- Caller->specific_attrs<EnforceTCBAttr>(),
- [&](const auto *A) {
- StringRef CallerTCB = A->getTCBName();
- if (CalleeTCBs.count(CallerTCB) == 0) {
- this->Diag(TheCall->getExprLoc(),
- diag::warn_tcb_enforcement_violation) << Callee
- << CallerTCB;
- }
- });
+ for (const auto *A : Caller->specific_attrs<EnforceTCBAttr>()) {
+ StringRef CallerTCB = A->getTCBName();
+ if (CalleeTCBs.count(CallerTCB) == 0) {
+ this->Diag(CallExprLoc, diag::warn_tcb_enforcement_violation)
+ << Callee << CallerTCB;
+ }
+ }
}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaCodeComplete.cpp b/contrib/llvm-project/clang/lib/Sema/SemaCodeComplete.cpp
index 01fdf51c60c3..8c9ed5389488 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaCodeComplete.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaCodeComplete.cpp
@@ -1095,7 +1095,9 @@ void ResultBuilder::MaybeAddResult(Result R, DeclContext *CurContext) {
if (const UsingShadowDecl *Using = dyn_cast<UsingShadowDecl>(R.Declaration)) {
CodeCompletionResult Result(Using->getTargetDecl(),
getBasePriority(Using->getTargetDecl()),
- R.Qualifier);
+ R.Qualifier, false,
+ (R.Availability == CXAvailability_Available ||
+ R.Availability == CXAvailability_Deprecated));
Result.ShadowDecl = Using;
MaybeAddResult(Result, CurContext);
return;
@@ -1268,7 +1270,9 @@ void ResultBuilder::AddResult(Result R, DeclContext *CurContext,
if (const auto *Using = dyn_cast<UsingShadowDecl>(R.Declaration)) {
CodeCompletionResult Result(Using->getTargetDecl(),
getBasePriority(Using->getTargetDecl()),
- R.Qualifier);
+ R.Qualifier, false,
+ (R.Availability == CXAvailability_Available ||
+ R.Availability == CXAvailability_Deprecated));
Result.ShadowDecl = Using;
AddResult(Result, CurContext, Hiding);
return;
@@ -2122,8 +2126,7 @@ static void AddOrdinaryNameResults(Sema::ParserCompletionContext CCC, Scope *S,
if (CCC == Sema::PCC_Class) {
AddTypedefResult(Results);
- bool IsNotInheritanceScope =
- !(S->getFlags() & Scope::ClassInheritanceScope);
+ bool IsNotInheritanceScope = !S->isClassInheritanceScope();
// public:
Builder.AddTypedTextChunk("public");
if (IsNotInheritanceScope && Results.includeCodePatterns())
@@ -4045,6 +4048,9 @@ CXCursorKind clang::getCursorKindForDecl(const Decl *D) {
case Decl::ObjCTypeParam:
return CXCursor_TemplateTypeParameter;
+ case Decl::Concept:
+ return CXCursor_ConceptDecl;
+
default:
if (const auto *TD = dyn_cast<TagDecl>(D)) {
switch (TD->getTagKind()) {
@@ -5048,7 +5054,7 @@ static void AddRecordMembersCompletionResults(
Results.allowNestedNameSpecifiers();
std::vector<FixItHint> FixIts;
if (AccessOpFixIt)
- FixIts.emplace_back(AccessOpFixIt.getValue());
+ FixIts.emplace_back(*AccessOpFixIt);
CodeCompletionDeclConsumer Consumer(Results, RD, BaseType, std::move(FixIts));
SemaRef.LookupVisibleDecls(RD, Sema::LookupMemberName, Consumer,
SemaRef.CodeCompleter->includeGlobals(),
@@ -5639,7 +5645,7 @@ void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
// Objective-C property reference. Bail if we're performing fix-it code
// completion since Objective-C properties are normally backed by ivars,
// most Objective-C fix-its here would have little value.
- if (AccessOpFixIt.hasValue()) {
+ if (AccessOpFixIt) {
return false;
}
AddedPropertiesSet AddedProperties;
@@ -5664,7 +5670,7 @@ void Sema::CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base,
// Objective-C instance variable access. Bail if we're performing fix-it
// code completion since Objective-C properties are normally backed by
// ivars, most Objective-C fix-its here would have little value.
- if (AccessOpFixIt.hasValue()) {
+ if (AccessOpFixIt) {
return false;
}
ObjCInterfaceDecl *Class = nullptr;
@@ -9138,8 +9144,8 @@ static void AddObjCKeyValueCompletions(ObjCPropertyDecl *Property,
if (IsInstanceMethod &&
(ReturnType.isNull() ||
(ReturnType->isObjCObjectPointerType() &&
- ReturnType->getAs<ObjCObjectPointerType>()->getInterfaceDecl() &&
- ReturnType->getAs<ObjCObjectPointerType>()
+ ReturnType->castAs<ObjCObjectPointerType>()->getInterfaceDecl() &&
+ ReturnType->castAs<ObjCObjectPointerType>()
->getInterfaceDecl()
->getName() == "NSEnumerator"))) {
std::string SelectorName = (Twine("enumeratorOf") + UpperKey).str();
@@ -9505,8 +9511,7 @@ void Sema::CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
IFace = Category->getClassInterface();
if (IFace)
- for (auto *Cat : IFace->visible_categories())
- Containers.push_back(Cat);
+ llvm::append_range(Containers, IFace->visible_categories());
if (IsInstanceMethod) {
for (unsigned I = 0, N = Containers.size(); I != N; ++I)
@@ -9787,7 +9792,7 @@ void Sema::CodeCompletePreprocessorMacroName(bool IsDefinition) {
CodeCompleter->getCodeCompletionTUInfo(),
IsDefinition ? CodeCompletionContext::CCC_MacroName
: CodeCompletionContext::CCC_MacroNameUse);
- if (!IsDefinition && (!CodeCompleter || CodeCompleter->includeMacros())) {
+ if (!IsDefinition && CodeCompleter->includeMacros()) {
// Add just the names of macros, not their arguments.
CodeCompletionBuilder Builder(Results.getAllocator(),
Results.getCodeCompletionTUInfo());
@@ -9814,9 +9819,8 @@ void Sema::CodeCompletePreprocessorExpression() {
CodeCompleter->getCodeCompletionTUInfo(),
CodeCompletionContext::CCC_PreprocessorExpression);
- if (!CodeCompleter || CodeCompleter->includeMacros())
- AddMacroResults(PP, Results,
- !CodeCompleter || CodeCompleter->loadExternal(), true);
+ if (CodeCompleter->includeMacros())
+ AddMacroResults(PP, Results, CodeCompleter->loadExternal(), true);
// defined (<macro>)
Results.EnterNewScope();
@@ -9974,7 +9978,7 @@ void Sema::CodeCompleteIncludedFile(llvm::StringRef Dir, bool Angled) {
using llvm::make_range;
if (!Angled) {
// The current directory is on the include path for "quoted" includes.
- auto *CurFile = PP.getCurrentFileLexer()->getFileEntry();
+ const FileEntry *CurFile = PP.getCurrentFileLexer()->getFileEntry();
if (CurFile && CurFile->getDir())
AddFilesFromIncludeDir(CurFile->getDir()->getName(), false,
DirectoryLookup::LT_NormalDir);
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaConcept.cpp b/contrib/llvm-project/clang/lib/Sema/SemaConcept.cpp
index ce99d4848cca..239e5dc4394c 100755
--- a/contrib/llvm-project/clang/lib/Sema/SemaConcept.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaConcept.cpp
@@ -317,36 +317,30 @@ bool Sema::CheckConstraintSatisfaction(
OutSatisfaction.IsSatisfied = true;
return false;
}
-
+ if (!Template) {
+ return ::CheckConstraintSatisfaction(*this, nullptr, ConstraintExprs,
+ TemplateArgs, TemplateIDRange,
+ OutSatisfaction);
+ }
llvm::FoldingSetNodeID ID;
+ ConstraintSatisfaction::Profile(ID, Context, Template, TemplateArgs);
void *InsertPos;
- ConstraintSatisfaction *Satisfaction = nullptr;
- bool ShouldCache = LangOpts.ConceptSatisfactionCaching && Template;
- if (ShouldCache) {
- ConstraintSatisfaction::Profile(ID, Context, Template, TemplateArgs);
- Satisfaction = SatisfactionCache.FindNodeOrInsertPos(ID, InsertPos);
- if (Satisfaction) {
- OutSatisfaction = *Satisfaction;
- return false;
- }
- Satisfaction = new ConstraintSatisfaction(Template, TemplateArgs);
- } else {
- Satisfaction = &OutSatisfaction;
+ if (auto *Cached = SatisfactionCache.FindNodeOrInsertPos(ID, InsertPos)) {
+ OutSatisfaction = *Cached;
+ return false;
}
+ auto Satisfaction =
+ std::make_unique<ConstraintSatisfaction>(Template, TemplateArgs);
if (::CheckConstraintSatisfaction(*this, Template, ConstraintExprs,
TemplateArgs, TemplateIDRange,
*Satisfaction)) {
- if (ShouldCache)
- delete Satisfaction;
return true;
}
-
- if (ShouldCache) {
- // We cannot use InsertNode here because CheckConstraintSatisfaction might
- // have invalidated it.
- SatisfactionCache.InsertNode(Satisfaction);
- OutSatisfaction = *Satisfaction;
- }
+ OutSatisfaction = *Satisfaction;
+ // We cannot use InsertPos here because CheckConstraintSatisfaction might have
+ // invalidated it.
+ // Note that entries of SatisfactionCache are deleted in Sema's destructor.
+ SatisfactionCache.InsertNode(Satisfaction.release());
return false;
}
@@ -354,8 +348,9 @@ bool Sema::CheckConstraintSatisfaction(const Expr *ConstraintExpr,
ConstraintSatisfaction &Satisfaction) {
return calculateConstraintSatisfaction(
*this, ConstraintExpr, Satisfaction,
- [](const Expr *AtomicExpr) -> ExprResult {
- return ExprResult(const_cast<Expr *>(AtomicExpr));
+ [this](const Expr *AtomicExpr) -> ExprResult {
+ // We only do this to immitate lvalue-to-rvalue conversion.
+ return PerformContextuallyConvertToBool(const_cast<Expr *>(AtomicExpr));
});
}
@@ -409,6 +404,52 @@ bool Sema::EnsureTemplateArgumentListConstraints(
return false;
}
+bool Sema::CheckInstantiatedFunctionTemplateConstraints(
+ SourceLocation PointOfInstantiation, FunctionDecl *Decl,
+ ArrayRef<TemplateArgument> TemplateArgs,
+ ConstraintSatisfaction &Satisfaction) {
+ // In most cases we're not going to have constraints, so check for that first.
+ FunctionTemplateDecl *Template = Decl->getPrimaryTemplate();
+ // Note - code synthesis context for the constraints check is created
+ // inside CheckConstraintsSatisfaction.
+ SmallVector<const Expr *, 3> TemplateAC;
+ Template->getAssociatedConstraints(TemplateAC);
+ if (TemplateAC.empty()) {
+ Satisfaction.IsSatisfied = true;
+ return false;
+ }
+
+ // Enter the scope of this instantiation. We don't use
+ // PushDeclContext because we don't have a scope.
+ Sema::ContextRAII savedContext(*this, Decl);
+ LocalInstantiationScope Scope(*this);
+
+ // If this is not an explicit specialization - we need to get the instantiated
+ // version of the template arguments and add them to scope for the
+ // substitution.
+ if (Decl->isTemplateInstantiation()) {
+ InstantiatingTemplate Inst(*this, Decl->getPointOfInstantiation(),
+ InstantiatingTemplate::ConstraintsCheck{}, Decl->getPrimaryTemplate(),
+ TemplateArgs, SourceRange());
+ if (Inst.isInvalid())
+ return true;
+ MultiLevelTemplateArgumentList MLTAL(
+ *Decl->getTemplateSpecializationArgs());
+ if (addInstantiatedParametersToScope(
+ Decl, Decl->getPrimaryTemplate()->getTemplatedDecl(), Scope, MLTAL))
+ return true;
+ }
+ Qualifiers ThisQuals;
+ CXXRecordDecl *Record = nullptr;
+ if (auto *Method = dyn_cast<CXXMethodDecl>(Decl)) {
+ ThisQuals = Method->getMethodQualifiers();
+ Record = Method->getParent();
+ }
+ CXXThisScopeRAII ThisScope(*this, Record, ThisQuals, Record != nullptr);
+ return CheckConstraintSatisfaction(Template, TemplateAC, TemplateArgs,
+ PointOfInstantiation, Satisfaction);
+}
+
static void diagnoseUnsatisfiedRequirement(Sema &S,
concepts::ExprRequirement *Req,
bool First) {
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaCoroutine.cpp b/contrib/llvm-project/clang/lib/Sema/SemaCoroutine.cpp
index cd3ae62ebbe2..a738befdd6ce 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaCoroutine.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaCoroutine.cpp
@@ -246,44 +246,22 @@ static bool isValidCoroutineContext(Sema &S, SourceLocation Loc,
return !Diagnosed;
}
-static ExprResult buildOperatorCoawaitLookupExpr(Sema &SemaRef, Scope *S,
- SourceLocation Loc) {
- DeclarationName OpName =
- SemaRef.Context.DeclarationNames.getCXXOperatorName(OO_Coawait);
- LookupResult Operators(SemaRef, OpName, SourceLocation(),
- Sema::LookupOperatorName);
- SemaRef.LookupName(Operators, S);
-
- assert(!Operators.isAmbiguous() && "Operator lookup cannot be ambiguous");
- const auto &Functions = Operators.asUnresolvedSet();
- bool IsOverloaded =
- Functions.size() > 1 ||
- (Functions.size() == 1 && isa<FunctionTemplateDecl>(*Functions.begin()));
- Expr *CoawaitOp = UnresolvedLookupExpr::Create(
- SemaRef.Context, /*NamingClass*/ nullptr, NestedNameSpecifierLoc(),
- DeclarationNameInfo(OpName, Loc), /*RequiresADL*/ true, IsOverloaded,
- Functions.begin(), Functions.end());
- assert(CoawaitOp);
- return CoawaitOp;
-}
-
/// Build a call to 'operator co_await' if there is a suitable operator for
/// the given expression.
-static ExprResult buildOperatorCoawaitCall(Sema &SemaRef, SourceLocation Loc,
- Expr *E,
- UnresolvedLookupExpr *Lookup) {
+ExprResult Sema::BuildOperatorCoawaitCall(SourceLocation Loc, Expr *E,
+ UnresolvedLookupExpr *Lookup) {
UnresolvedSet<16> Functions;
Functions.append(Lookup->decls_begin(), Lookup->decls_end());
- return SemaRef.CreateOverloadedUnaryOp(Loc, UO_Coawait, Functions, E);
+ return CreateOverloadedUnaryOp(Loc, UO_Coawait, Functions, E);
}
static ExprResult buildOperatorCoawaitCall(Sema &SemaRef, Scope *S,
SourceLocation Loc, Expr *E) {
- ExprResult R = buildOperatorCoawaitLookupExpr(SemaRef, S, Loc);
+ ExprResult R = SemaRef.BuildOperatorCoawaitLookupExpr(S, Loc);
if (R.isInvalid())
return ExprError();
- return buildOperatorCoawaitCall(SemaRef, Loc, E,
- cast<UnresolvedLookupExpr>(R.get()));
+ return SemaRef.BuildOperatorCoawaitCall(Loc, E,
+ cast<UnresolvedLookupExpr>(R.get()));
}
static ExprResult buildCoroutineHandle(Sema &S, QualType PromiseType,
@@ -680,7 +658,7 @@ static void checkNoThrow(Sema &S, const Stmt *E,
QualType::DestructionKind::DK_cxx_destructor) {
const auto *T =
cast<RecordType>(ReturnType.getCanonicalType().getTypePtr());
- checkDeclNoexcept(dyn_cast<CXXRecordDecl>(T->getDecl())->getDestructor(),
+ checkDeclNoexcept(cast<CXXRecordDecl>(T->getDecl())->getDestructor(),
/*IsDtor=*/true);
}
} else
@@ -727,14 +705,15 @@ bool Sema::ActOnCoroutineBodyStart(Scope *SC, SourceLocation KWLoc,
SourceLocation Loc = Fn->getLocation();
// Build the initial suspend point
auto buildSuspends = [&](StringRef Name) mutable -> StmtResult {
- ExprResult Suspend =
+ ExprResult Operand =
buildPromiseCall(*this, ScopeInfo->CoroutinePromise, Loc, Name, None);
- if (Suspend.isInvalid())
+ if (Operand.isInvalid())
return StmtError();
- Suspend = buildOperatorCoawaitCall(*this, SC, Loc, Suspend.get());
+ ExprResult Suspend =
+ buildOperatorCoawaitCall(*this, SC, Loc, Operand.get());
if (Suspend.isInvalid())
return StmtError();
- Suspend = BuildResolvedCoawaitExpr(Loc, Suspend.get(),
+ Suspend = BuildResolvedCoawaitExpr(Loc, Operand.get(), Suspend.get(),
/*IsImplicit*/ true);
Suspend = ActOnFinishFullExpr(Suspend.get(), /*DiscardedValue*/ false);
if (Suspend.isInvalid()) {
@@ -776,8 +755,8 @@ static bool isWithinCatchScope(Scope *S) {
// }();
// }
// }
- while (S && !(S->getFlags() & Scope::FnScope)) {
- if (S->getFlags() & Scope::CatchScope)
+ while (S && !S->isFunctionScope()) {
+ if (S->isCatchScope())
return true;
S = S->getParent();
}
@@ -815,88 +794,112 @@ ExprResult Sema::ActOnCoawaitExpr(Scope *S, SourceLocation Loc, Expr *E) {
if (R.isInvalid()) return ExprError();
E = R.get();
}
- ExprResult Lookup = buildOperatorCoawaitLookupExpr(*this, S, Loc);
+ ExprResult Lookup = BuildOperatorCoawaitLookupExpr(S, Loc);
if (Lookup.isInvalid())
return ExprError();
return BuildUnresolvedCoawaitExpr(Loc, E,
cast<UnresolvedLookupExpr>(Lookup.get()));
}
-ExprResult Sema::BuildUnresolvedCoawaitExpr(SourceLocation Loc, Expr *E,
+ExprResult Sema::BuildOperatorCoawaitLookupExpr(Scope *S, SourceLocation Loc) {
+ DeclarationName OpName =
+ Context.DeclarationNames.getCXXOperatorName(OO_Coawait);
+ LookupResult Operators(*this, OpName, SourceLocation(),
+ Sema::LookupOperatorName);
+ LookupName(Operators, S);
+
+ assert(!Operators.isAmbiguous() && "Operator lookup cannot be ambiguous");
+ const auto &Functions = Operators.asUnresolvedSet();
+ bool IsOverloaded =
+ Functions.size() > 1 ||
+ (Functions.size() == 1 && isa<FunctionTemplateDecl>(*Functions.begin()));
+ Expr *CoawaitOp = UnresolvedLookupExpr::Create(
+ Context, /*NamingClass*/ nullptr, NestedNameSpecifierLoc(),
+ DeclarationNameInfo(OpName, Loc), /*RequiresADL*/ true, IsOverloaded,
+ Functions.begin(), Functions.end());
+ assert(CoawaitOp);
+ return CoawaitOp;
+}
+
+// Attempts to resolve and build a CoawaitExpr from "raw" inputs, bailing out to
+// DependentCoawaitExpr if needed.
+ExprResult Sema::BuildUnresolvedCoawaitExpr(SourceLocation Loc, Expr *Operand,
UnresolvedLookupExpr *Lookup) {
auto *FSI = checkCoroutineContext(*this, Loc, "co_await");
if (!FSI)
return ExprError();
- if (E->hasPlaceholderType()) {
- ExprResult R = CheckPlaceholderExpr(E);
+ if (Operand->hasPlaceholderType()) {
+ ExprResult R = CheckPlaceholderExpr(Operand);
if (R.isInvalid())
return ExprError();
- E = R.get();
+ Operand = R.get();
}
auto *Promise = FSI->CoroutinePromise;
if (Promise->getType()->isDependentType()) {
- Expr *Res =
- new (Context) DependentCoawaitExpr(Loc, Context.DependentTy, E, Lookup);
+ Expr *Res = new (Context)
+ DependentCoawaitExpr(Loc, Context.DependentTy, Operand, Lookup);
return Res;
}
auto *RD = Promise->getType()->getAsCXXRecordDecl();
+ auto *Transformed = Operand;
if (lookupMember(*this, "await_transform", RD, Loc)) {
- ExprResult R = buildPromiseCall(*this, Promise, Loc, "await_transform", E);
+ ExprResult R =
+ buildPromiseCall(*this, Promise, Loc, "await_transform", Operand);
if (R.isInvalid()) {
Diag(Loc,
diag::note_coroutine_promise_implicit_await_transform_required_here)
- << E->getSourceRange();
+ << Operand->getSourceRange();
return ExprError();
}
- E = R.get();
+ Transformed = R.get();
}
- ExprResult Awaitable = buildOperatorCoawaitCall(*this, Loc, E, Lookup);
- if (Awaitable.isInvalid())
+ ExprResult Awaiter = BuildOperatorCoawaitCall(Loc, Transformed, Lookup);
+ if (Awaiter.isInvalid())
return ExprError();
- return BuildResolvedCoawaitExpr(Loc, Awaitable.get());
+ return BuildResolvedCoawaitExpr(Loc, Operand, Awaiter.get());
}
-ExprResult Sema::BuildResolvedCoawaitExpr(SourceLocation Loc, Expr *E,
- bool IsImplicit) {
+ExprResult Sema::BuildResolvedCoawaitExpr(SourceLocation Loc, Expr *Operand,
+ Expr *Awaiter, bool IsImplicit) {
auto *Coroutine = checkCoroutineContext(*this, Loc, "co_await", IsImplicit);
if (!Coroutine)
return ExprError();
- if (E->hasPlaceholderType()) {
- ExprResult R = CheckPlaceholderExpr(E);
+ if (Awaiter->hasPlaceholderType()) {
+ ExprResult R = CheckPlaceholderExpr(Awaiter);
if (R.isInvalid()) return ExprError();
- E = R.get();
+ Awaiter = R.get();
}
- if (E->getType()->isDependentType()) {
+ if (Awaiter->getType()->isDependentType()) {
Expr *Res = new (Context)
- CoawaitExpr(Loc, Context.DependentTy, E, IsImplicit);
+ CoawaitExpr(Loc, Context.DependentTy, Operand, Awaiter, IsImplicit);
return Res;
}
// If the expression is a temporary, materialize it as an lvalue so that we
// can use it multiple times.
- if (E->isPRValue())
- E = CreateMaterializeTemporaryExpr(E->getType(), E, true);
+ if (Awaiter->isPRValue())
+ Awaiter = CreateMaterializeTemporaryExpr(Awaiter->getType(), Awaiter, true);
// The location of the `co_await` token cannot be used when constructing
// the member call expressions since it's before the location of `Expr`, which
// is used as the start of the member call expression.
- SourceLocation CallLoc = E->getExprLoc();
+ SourceLocation CallLoc = Awaiter->getExprLoc();
// Build the await_ready, await_suspend, await_resume calls.
- ReadySuspendResumeResult RSS = buildCoawaitCalls(
- *this, Coroutine->CoroutinePromise, CallLoc, E);
+ ReadySuspendResumeResult RSS =
+ buildCoawaitCalls(*this, Coroutine->CoroutinePromise, CallLoc, Awaiter);
if (RSS.IsInvalid)
return ExprError();
- Expr *Res =
- new (Context) CoawaitExpr(Loc, E, RSS.Results[0], RSS.Results[1],
- RSS.Results[2], RSS.OpaqueValue, IsImplicit);
+ Expr *Res = new (Context)
+ CoawaitExpr(Loc, Operand, Awaiter, RSS.Results[0], RSS.Results[1],
+ RSS.Results[2], RSS.OpaqueValue, IsImplicit);
return Res;
}
@@ -933,8 +936,10 @@ ExprResult Sema::BuildCoyieldExpr(SourceLocation Loc, Expr *E) {
E = R.get();
}
+ Expr *Operand = E;
+
if (E->getType()->isDependentType()) {
- Expr *Res = new (Context) CoyieldExpr(Loc, Context.DependentTy, E);
+ Expr *Res = new (Context) CoyieldExpr(Loc, Context.DependentTy, Operand, E);
return Res;
}
@@ -950,7 +955,7 @@ ExprResult Sema::BuildCoyieldExpr(SourceLocation Loc, Expr *E) {
return ExprError();
Expr *Res =
- new (Context) CoyieldExpr(Loc, E, RSS.Results[0], RSS.Results[1],
+ new (Context) CoyieldExpr(Loc, Operand, E, RSS.Results[0], RSS.Results[1],
RSS.Results[2], RSS.OpaqueValue);
return Res;
@@ -1081,6 +1086,14 @@ void Sema::CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body) {
return;
}
+ // The always_inline attribute doesn't reliably apply to a coroutine,
+ // because the coroutine will be split into pieces and some pieces
+ // might be called indirectly, as in a virtual call. Even the ramp
+ // function cannot be inlined at -O0, due to pipeline ordering
+ // problems (see https://llvm.org/PR53413). Tell the user about it.
+ if (FD->hasAttr<AlwaysInlineAttr>())
+ Diag(FD->getLocation(), diag::warn_always_inline_coroutine);
+
// [stmt.return.coroutine]p1:
// A coroutine shall not enclose a return statement ([stmt.return]).
if (Fn->FirstReturnLoc.isValid()) {
@@ -1226,6 +1239,41 @@ bool CoroutineStmtBuilder::makeReturnOnAllocFailure() {
return true;
}
+// Collect placement arguments for allocation function of coroutine FD.
+// Return true if we collect placement arguments succesfully. Return false,
+// otherwise.
+static bool collectPlacementArgs(Sema &S, FunctionDecl &FD, SourceLocation Loc,
+ SmallVectorImpl<Expr *> &PlacementArgs) {
+ if (auto *MD = dyn_cast<CXXMethodDecl>(&FD)) {
+ if (MD->isInstance() && !isLambdaCallOperator(MD)) {
+ ExprResult ThisExpr = S.ActOnCXXThis(Loc);
+ if (ThisExpr.isInvalid())
+ return false;
+ ThisExpr = S.CreateBuiltinUnaryOp(Loc, UO_Deref, ThisExpr.get());
+ if (ThisExpr.isInvalid())
+ return false;
+ PlacementArgs.push_back(ThisExpr.get());
+ }
+ }
+
+ for (auto *PD : FD.parameters()) {
+ if (PD->getType()->isDependentType())
+ continue;
+
+ // Build a reference to the parameter.
+ auto PDLoc = PD->getLocation();
+ ExprResult PDRefExpr =
+ S.BuildDeclRefExpr(PD, PD->getOriginalType().getNonReferenceType(),
+ ExprValueKind::VK_LValue, PDLoc);
+ if (PDRefExpr.isInvalid())
+ return false;
+
+ PlacementArgs.push_back(PDRefExpr.get());
+ }
+
+ return true;
+}
+
bool CoroutineStmtBuilder::makeNewAndDeleteExpr() {
// Form and check allocation and deallocation calls.
assert(!IsPromiseDependentType &&
@@ -1242,13 +1290,7 @@ bool CoroutineStmtBuilder::makeNewAndDeleteExpr() {
// allocated, followed by the coroutine function's arguments. If a matching
// allocation function exists, use it. Otherwise, use an allocation function
// that just takes the requested size.
-
- FunctionDecl *OperatorNew = nullptr;
- FunctionDecl *OperatorDelete = nullptr;
- FunctionDecl *UnusedResult = nullptr;
- bool PassAlignment = false;
- SmallVector<Expr *, 1> PlacementArgs;
-
+ //
// [dcl.fct.def.coroutine]p9
// An implementation may need to allocate additional storage for a
// coroutine.
@@ -1275,58 +1317,55 @@ bool CoroutineStmtBuilder::makeNewAndDeleteExpr() {
// and p_i denotes the i-th function parameter otherwise. For a non-static
// member function, q_1 is an lvalue that denotes *this; any other q_i is an
// lvalue that denotes the parameter copy corresponding to p_i.
- if (auto *MD = dyn_cast<CXXMethodDecl>(&FD)) {
- if (MD->isInstance() && !isLambdaCallOperator(MD)) {
- ExprResult ThisExpr = S.ActOnCXXThis(Loc);
- if (ThisExpr.isInvalid())
- return false;
- ThisExpr = S.CreateBuiltinUnaryOp(Loc, UO_Deref, ThisExpr.get());
- if (ThisExpr.isInvalid())
- return false;
- PlacementArgs.push_back(ThisExpr.get());
- }
- }
- for (auto *PD : FD.parameters()) {
- if (PD->getType()->isDependentType())
- continue;
- // Build a reference to the parameter.
- auto PDLoc = PD->getLocation();
- ExprResult PDRefExpr =
- S.BuildDeclRefExpr(PD, PD->getOriginalType().getNonReferenceType(),
- ExprValueKind::VK_LValue, PDLoc);
- if (PDRefExpr.isInvalid())
- return false;
+ FunctionDecl *OperatorNew = nullptr;
+ FunctionDecl *OperatorDelete = nullptr;
+ FunctionDecl *UnusedResult = nullptr;
+ bool PassAlignment = false;
+ SmallVector<Expr *, 1> PlacementArgs;
- PlacementArgs.push_back(PDRefExpr.get());
- }
- S.FindAllocationFunctions(Loc, SourceRange(), /*NewScope*/ Sema::AFS_Class,
- /*DeleteScope*/ Sema::AFS_Both, PromiseType,
- /*isArray*/ false, PassAlignment, PlacementArgs,
- OperatorNew, UnusedResult, /*Diagnose*/ false);
+ bool PromiseContainsNew = [this, &PromiseType]() -> bool {
+ DeclarationName NewName =
+ S.getASTContext().DeclarationNames.getCXXOperatorName(OO_New);
+ LookupResult R(S, NewName, Loc, Sema::LookupOrdinaryName);
- // [dcl.fct.def.coroutine]p9
- // If no viable function is found ([over.match.viable]), overload resolution
- // is performed again on a function call created by passing just the amount of
- // space required as an argument of type std::size_t.
- if (!OperatorNew && !PlacementArgs.empty()) {
- PlacementArgs.clear();
- S.FindAllocationFunctions(Loc, SourceRange(), /*NewScope*/ Sema::AFS_Class,
+ if (PromiseType->isRecordType())
+ S.LookupQualifiedName(R, PromiseType->getAsCXXRecordDecl());
+
+ return !R.empty() && !R.isAmbiguous();
+ }();
+
+ auto LookupAllocationFunction = [&]() {
+ // [dcl.fct.def.coroutine]p9
+ // The allocation function's name is looked up by searching for it in the
+ // scope of the promise type.
+ // - If any declarations are found, ...
+ // - If no declarations are found in the scope of the promise type, a search
+ // is performed in the global scope.
+ Sema::AllocationFunctionScope NewScope =
+ PromiseContainsNew ? Sema::AFS_Class : Sema::AFS_Global;
+ S.FindAllocationFunctions(Loc, SourceRange(),
+ NewScope,
/*DeleteScope*/ Sema::AFS_Both, PromiseType,
/*isArray*/ false, PassAlignment, PlacementArgs,
OperatorNew, UnusedResult, /*Diagnose*/ false);
- }
+ };
+
+ // We don't expect to call to global operator new with (size, p0, …, pn).
+ // So if we choose to lookup the allocation function in global scope, we
+ // shouldn't lookup placement arguments.
+ if (PromiseContainsNew && !collectPlacementArgs(S, FD, Loc, PlacementArgs))
+ return false;
+
+ LookupAllocationFunction();
// [dcl.fct.def.coroutine]p9
- // The allocation function's name is looked up by searching for it in the
- // scope of the promise type.
- // - If any declarations are found, ...
- // - Otherwise, a search is performed in the global scope.
- if (!OperatorNew) {
- S.FindAllocationFunctions(Loc, SourceRange(), /*NewScope*/ Sema::AFS_Global,
- /*DeleteScope*/ Sema::AFS_Both, PromiseType,
- /*isArray*/ false, PassAlignment, PlacementArgs,
- OperatorNew, UnusedResult);
+ // If no viable function is found ([over.match.viable]), overload resolution
+ // is performed again on a function call created by passing just the amount of
+ // space required as an argument of type std::size_t.
+ if (!OperatorNew && !PlacementArgs.empty() && PromiseContainsNew) {
+ PlacementArgs.clear();
+ LookupAllocationFunction();
}
bool IsGlobalOverload =
@@ -1346,8 +1385,12 @@ bool CoroutineStmtBuilder::makeNewAndDeleteExpr() {
OperatorNew, UnusedResult);
}
- if (!OperatorNew)
+ if (!OperatorNew) {
+ if (PromiseContainsNew)
+ S.Diag(Loc, diag::err_coroutine_unusable_new) << PromiseType << &FD;
+
return false;
+ }
if (RequiresNoThrowAlloc) {
const auto *FT = OperatorNew->getType()->castAs<FunctionProtoType>();
@@ -1382,8 +1425,7 @@ bool CoroutineStmtBuilder::makeNewAndDeleteExpr() {
return false;
SmallVector<Expr *, 2> NewArgs(1, FrameSize);
- for (auto Arg : PlacementArgs)
- NewArgs.push_back(Arg);
+ llvm::append_range(NewArgs, PlacementArgs);
ExprResult NewExpr =
S.BuildCallExpr(S.getCurScope(), NewRef.get(), Loc, NewArgs, Loc);
@@ -1569,7 +1611,6 @@ bool CoroutineStmtBuilder::makeGroDeclAndReturnStmt() {
if (Res.isInvalid())
return false;
- this->ResultDecl = Res.get();
return true;
}
@@ -1582,51 +1623,11 @@ bool CoroutineStmtBuilder::makeGroDeclAndReturnStmt() {
return false;
}
- auto *GroDecl = VarDecl::Create(
- S.Context, &FD, FD.getLocation(), FD.getLocation(),
- &S.PP.getIdentifierTable().get("__coro_gro"), GroType,
- S.Context.getTrivialTypeSourceInfo(GroType, Loc), SC_None);
- GroDecl->setImplicit();
-
- S.CheckVariableDeclarationType(GroDecl);
- if (GroDecl->isInvalidDecl())
- return false;
-
- InitializedEntity Entity = InitializedEntity::InitializeVariable(GroDecl);
- ExprResult Res =
- S.PerformCopyInitialization(Entity, SourceLocation(), ReturnValue);
- if (Res.isInvalid())
- return false;
-
- Res = S.ActOnFinishFullExpr(Res.get(), /*DiscardedValue*/ false);
- if (Res.isInvalid())
- return false;
-
- S.AddInitializerToDecl(GroDecl, Res.get(),
- /*DirectInit=*/false);
-
- S.FinalizeDeclaration(GroDecl);
-
- // Form a declaration statement for the return declaration, so that AST
- // visitors can more easily find it.
- StmtResult GroDeclStmt =
- S.ActOnDeclStmt(S.ConvertDeclToDeclGroup(GroDecl), Loc, Loc);
- if (GroDeclStmt.isInvalid())
- return false;
-
- this->ResultDecl = GroDeclStmt.get();
-
- ExprResult declRef = S.BuildDeclRefExpr(GroDecl, GroType, VK_LValue, Loc);
- if (declRef.isInvalid())
- return false;
-
- StmtResult ReturnStmt = S.BuildReturnStmt(Loc, declRef.get());
+ StmtResult ReturnStmt = S.BuildReturnStmt(Loc, ReturnValue);
if (ReturnStmt.isInvalid()) {
noteMemberDeclaredHere(S, ReturnValue, Fn);
return false;
}
- if (cast<clang::ReturnStmt>(ReturnStmt.get())->getNRVOCandidate() == GroDecl)
- GroDecl->setNRVOVariable(true);
this->ReturnStmt = ReturnStmt.get();
return true;
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp b/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp
index bcadf4139046..1139088ecde2 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaDecl.cpp
@@ -24,6 +24,7 @@
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/NonTrivialTypeVisitor.h"
+#include "clang/AST/Randstruct.h"
#include "clang/AST/StmtCXX.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/PartialDiagnostic.h"
@@ -503,9 +504,11 @@ ParsedType Sema::getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
FoundUsingShadow = nullptr;
} else if (AllowDeducedTemplate) {
if (auto *TD = getAsTypeTemplateDecl(IIDecl)) {
- // FIXME: TemplateName should include FoundUsingShadow sugar.
- T = Context.getDeducedTemplateSpecializationType(TemplateName(TD),
- QualType(), false);
+ assert(!FoundUsingShadow || FoundUsingShadow->getTargetDecl() == TD);
+ TemplateName Template =
+ FoundUsingShadow ? TemplateName(FoundUsingShadow) : TemplateName(TD);
+ T = Context.getDeducedTemplateSpecializationType(Template, QualType(),
+ false);
// Don't wrap in a further UsingType.
FoundUsingShadow = nullptr;
}
@@ -930,9 +933,13 @@ Corrected:
//
// appeared.
//
- // We also allow this in C99 as an extension.
- if (NamedDecl *D = ImplicitlyDefineFunction(NameLoc, *Name, S))
- return NameClassification::NonType(D);
+ // We also allow this in C99 as an extension. However, this is not
+ // allowed in all language modes as functions without prototypes may not
+ // be supported.
+ if (getLangOpts().implicitFunctionsAllowed()) {
+ if (NamedDecl *D = ImplicitlyDefineFunction(NameLoc, *Name, S))
+ return NameClassification::NonType(D);
+ }
}
if (getLangOpts().CPlusPlus20 && SS.isEmpty() && NextToken.is(tok::less)) {
@@ -1106,12 +1113,16 @@ Corrected:
IsFunctionTemplate = isa<FunctionTemplateDecl>(TD);
IsVarTemplate = isa<VarTemplateDecl>(TD);
+ UsingShadowDecl *FoundUsingShadow =
+ dyn_cast<UsingShadowDecl>(*Result.begin());
+ assert(!FoundUsingShadow ||
+ TD == cast<TemplateDecl>(FoundUsingShadow->getTargetDecl()));
+ Template =
+ FoundUsingShadow ? TemplateName(FoundUsingShadow) : TemplateName(TD);
if (SS.isNotEmpty())
- Template =
- Context.getQualifiedTemplateName(SS.getScopeRep(),
- /*TemplateKeyword=*/false, TD);
- else
- Template = TemplateName(TD);
+ Template = Context.getQualifiedTemplateName(SS.getScopeRep(),
+ /*TemplateKeyword=*/false,
+ Template);
} else {
// All results were non-template functions. This is a function template
// name.
@@ -1460,27 +1471,38 @@ void Sema::ActOnExitFunctionContext() {
assert(CurContext && "Popped translation unit!");
}
-/// Determine whether we allow overloading of the function
-/// PrevDecl with another declaration.
+/// Determine whether overloading is allowed for a new function
+/// declaration considering prior declarations of the same name.
///
/// This routine determines whether overloading is possible, not
-/// whether some new function is actually an overload. It will return
-/// true in C++ (where we can always provide overloads) or, as an
-/// extension, in C when the previous function is already an
-/// overloaded function declaration or has the "overloadable"
-/// attribute.
-static bool AllowOverloadingOfFunction(LookupResult &Previous,
+/// whether a new declaration actually overloads a previous one.
+/// It will return true in C++ (where overloads are alway permitted)
+/// or, as a C extension, when either the new declaration or a
+/// previous one is declared with the 'overloadable' attribute.
+static bool AllowOverloadingOfFunction(const LookupResult &Previous,
ASTContext &Context,
const FunctionDecl *New) {
- if (Context.getLangOpts().CPlusPlus)
+ if (Context.getLangOpts().CPlusPlus || New->hasAttr<OverloadableAttr>())
return true;
- if (Previous.getResultKind() == LookupResult::FoundOverloaded)
- return true;
+ // Multiversion function declarations are not overloads in the
+ // usual sense of that term, but lookup will report that an
+ // overload set was found if more than one multiversion function
+ // declaration is present for the same name. It is therefore
+ // inadequate to assume that some prior declaration(s) had
+ // the overloadable attribute; checking is required. Since one
+ // declaration is permitted to omit the attribute, it is necessary
+ // to check at least two; hence the 'any_of' check below. Note that
+ // the overloadable attribute is implicitly added to declarations
+ // that were required to have it but did not.
+ if (Previous.getResultKind() == LookupResult::FoundOverloaded) {
+ return llvm::any_of(Previous, [](const NamedDecl *ND) {
+ return ND->hasAttr<OverloadableAttr>();
+ });
+ } else if (Previous.getResultKind() == LookupResult::Found)
+ return Previous.getFoundDecl()->hasAttr<OverloadableAttr>();
- return Previous.getResultKind() == LookupResult::Found &&
- (Previous.getFoundDecl()->hasAttr<OverloadableAttr>() ||
- New->hasAttr<OverloadableAttr>());
+ return false;
}
/// Add this decl to the scope shadowed decl chains.
@@ -1608,6 +1630,14 @@ bool Sema::CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old) {
if (OldM && OldM->Kind == Module::PrivateModuleFragment)
OldM = OldM->Parent;
+ // If we have a decl in a module partition, it is part of the containing
+ // module (which is the only thing that can be importing it).
+ if (NewM && OldM &&
+ (OldM->Kind == Module::ModulePartitionInterface ||
+ OldM->Kind == Module::ModulePartitionImplementation)) {
+ return false;
+ }
+
if (NewM == OldM)
return false;
@@ -1660,7 +1690,13 @@ bool Sema::CheckRedeclarationExported(NamedDecl *New, NamedDecl *Old) {
assert(IsNewExported);
- Diag(New->getLocation(), diag::err_redeclaration_non_exported) << New;
+ auto Lk = Old->getFormalLinkage();
+ int S = 0;
+ if (Lk == Linkage::InternalLinkage)
+ S = 1;
+ else if (Lk == Linkage::ModuleLinkage)
+ S = 2;
+ Diag(New->getLocation(), diag::err_redeclaration_non_exported) << New << S;
Diag(Old->getLocation(), diag::note_previous_declaration);
return true;
}
@@ -1870,15 +1906,28 @@ static bool ShouldDiagnoseUnusedDecl(const NamedDecl *D) {
// Types of valid local variables should be complete, so this should succeed.
if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
- // White-list anything with an __attribute__((unused)) type.
+ const Expr *Init = VD->getInit();
+ if (const auto *Cleanups = dyn_cast_or_null<ExprWithCleanups>(Init))
+ Init = Cleanups->getSubExpr();
+
const auto *Ty = VD->getType().getTypePtr();
// Only look at the outermost level of typedef.
if (const TypedefType *TT = Ty->getAs<TypedefType>()) {
+ // Allow anything marked with __attribute__((unused)).
if (TT->getDecl()->hasAttr<UnusedAttr>())
return false;
}
+ // Warn for reference variables whose initializtion performs lifetime
+ // extension.
+ if (const auto *MTE = dyn_cast_or_null<MaterializeTemporaryExpr>(Init)) {
+ if (MTE->getExtendingDecl()) {
+ Ty = VD->getType().getNonReferenceType().getTypePtr();
+ Init = MTE->getSubExpr()->IgnoreImplicitAsWritten();
+ }
+ }
+
// If we failed to complete the type for some reason, or if the type is
// dependent, don't diagnose the variable.
if (Ty->isIncompleteType() || Ty->isDependentType())
@@ -1897,10 +1946,7 @@ static bool ShouldDiagnoseUnusedDecl(const NamedDecl *D) {
if (!RD->hasTrivialDestructor() && !RD->hasAttr<WarnUnusedAttr>())
return false;
- if (const Expr *Init = VD->getInit()) {
- if (const ExprWithCleanups *Cleanups =
- dyn_cast<ExprWithCleanups>(Init))
- Init = Cleanups->getSubExpr();
+ if (Init) {
const CXXConstructExpr *Construct =
dyn_cast<CXXConstructExpr>(Init);
if (Construct && !Construct->isElidable()) {
@@ -1912,10 +1958,16 @@ static bool ShouldDiagnoseUnusedDecl(const NamedDecl *D) {
// Suppress the warning if we don't know how this is constructed, and
// it could possibly be non-trivial constructor.
- if (Init->isTypeDependent())
+ if (Init->isTypeDependent()) {
for (const CXXConstructorDecl *Ctor : RD->ctors())
if (!Ctor->isTrivial())
return false;
+ }
+
+ // Suppress the warning if the constructor is unresolved because
+ // its arguments are dependent.
+ if (isa<CXXUnresolvedConstructExpr>(Init))
+ return false;
}
}
}
@@ -2008,6 +2060,12 @@ void Sema::DiagnoseUnusedButSetDecl(const VarDecl *VD) {
if (VD->hasAttr<BlocksAttr>() && Ty->isObjCObjectPointerType())
return;
+ // Don't warn about Objective-C pointer variables with precise lifetime
+ // semantics; they can be used to ensure ARC releases the object at a known
+ // time, which may mean assignment but no other references.
+ if (VD->hasAttr<ObjCPreciseLifetimeAttr>() && Ty->isObjCObjectPointerType())
+ return;
+
auto iter = RefsMinusAssignments.find(VD);
if (iter == RefsMinusAssignments.end())
return;
@@ -2244,7 +2302,8 @@ NamedDecl *Sema::LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
if (!ForRedeclaration &&
(Context.BuiltinInfo.isPredefinedLibFunction(ID) ||
Context.BuiltinInfo.isHeaderDependentFunction(ID))) {
- Diag(Loc, diag::ext_implicit_lib_function_decl)
+ Diag(Loc, LangOpts.C99 ? diag::ext_implicit_lib_function_decl_c99
+ : diag::ext_implicit_lib_function_decl)
<< Context.BuiltinInfo.getName(ID) << R;
if (const char *Header = Context.BuiltinInfo.getHeaderName(ID))
Diag(Loc, diag::note_include_header_or_declare)
@@ -2745,6 +2804,11 @@ static bool mergeDeclAttribute(Sema &S, NamedDecl *D,
NewAttr = S.mergeEnforceTCBLeafAttr(D, *TCBLA);
else if (const auto *BTFA = dyn_cast<BTFDeclTagAttr>(Attr))
NewAttr = S.mergeBTFDeclTagAttr(D, *BTFA);
+ else if (const auto *NT = dyn_cast<HLSLNumThreadsAttr>(Attr))
+ NewAttr =
+ S.mergeHLSLNumThreadsAttr(D, *NT, NT->getX(), NT->getY(), NT->getZ());
+ else if (const auto *SA = dyn_cast<HLSLShaderAttr>(Attr))
+ NewAttr = S.mergeHLSLShaderAttr(D, *SA, SA->getType());
else if (Attr->shouldInheritEvenIfAlreadyPresent() || !DeclHasAttr(D, Attr))
NewAttr = cast<InheritableAttr>(Attr->clone(S.Context));
@@ -3195,6 +3259,10 @@ getNoteDiagForInvalidRedeclaration(const T *Old, const T *New) {
PrevDiag = diag::note_previous_definition;
else if (Old->isImplicit()) {
PrevDiag = diag::note_previous_implicit_declaration;
+ if (const auto *FD = dyn_cast<FunctionDecl>(Old)) {
+ if (FD->getBuiltinID())
+ PrevDiag = diag::note_previous_builtin_declaration;
+ }
if (OldLocation.isInvalid())
OldLocation = New->getLocation();
} else
@@ -3346,8 +3414,8 @@ static void adjustDeclContextForDeclaratorDecl(DeclaratorDecl *NewD,
/// merged with.
///
/// Returns true if there was an error, false otherwise.
-bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD,
- Scope *S, bool MergeTypeWithOld) {
+bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD, Scope *S,
+ bool MergeTypeWithOld, bool NewDeclIsDefn) {
// Verify the old decl was also a function.
FunctionDecl *Old = OldD->getAsFunction();
if (!Old) {
@@ -3828,39 +3896,109 @@ bool Sema::MergeFunctionDecl(FunctionDecl *New, NamedDecl *&OldD,
// C: Function types need to be compatible, not identical. This handles
// duplicate function decls like "void f(int); void f(enum X);" properly.
- if (!getLangOpts().CPlusPlus &&
- Context.typesAreCompatible(OldQType, NewQType)) {
- const FunctionType *OldFuncType = OldQType->getAs<FunctionType>();
- const FunctionType *NewFuncType = NewQType->getAs<FunctionType>();
- const FunctionProtoType *OldProto = nullptr;
- if (MergeTypeWithOld && isa<FunctionNoProtoType>(NewFuncType) &&
- (OldProto = dyn_cast<FunctionProtoType>(OldFuncType))) {
- // The old declaration provided a function prototype, but the
- // new declaration does not. Merge in the prototype.
- assert(!OldProto->hasExceptionSpec() && "Exception spec in C");
- SmallVector<QualType, 16> ParamTypes(OldProto->param_types());
- NewQType =
- Context.getFunctionType(NewFuncType->getReturnType(), ParamTypes,
- OldProto->getExtProtoInfo());
- New->setType(NewQType);
- New->setHasInheritedPrototype();
-
- // Synthesize parameters with the same types.
- SmallVector<ParmVarDecl*, 16> Params;
- for (const auto &ParamType : OldProto->param_types()) {
- ParmVarDecl *Param = ParmVarDecl::Create(Context, New, SourceLocation(),
- SourceLocation(), nullptr,
- ParamType, /*TInfo=*/nullptr,
- SC_None, nullptr);
- Param->setScopeInfo(0, Params.size());
- Param->setImplicit();
- Params.push_back(Param);
+ if (!getLangOpts().CPlusPlus) {
+ // C99 6.7.5.3p15: ...If one type has a parameter type list and the other
+ // type is specified by a function definition that contains a (possibly
+ // empty) identifier list, both shall agree in the number of parameters
+ // and the type of each parameter shall be compatible with the type that
+ // results from the application of default argument promotions to the
+ // type of the corresponding identifier. ...
+ // This cannot be handled by ASTContext::typesAreCompatible() because that
+ // doesn't know whether the function type is for a definition or not when
+ // eventually calling ASTContext::mergeFunctionTypes(). The only situation
+ // we need to cover here is that the number of arguments agree as the
+ // default argument promotion rules were already checked by
+ // ASTContext::typesAreCompatible().
+ if (Old->hasPrototype() && !New->hasWrittenPrototype() && NewDeclIsDefn &&
+ Old->getNumParams() != New->getNumParams()) {
+ if (Old->hasInheritedPrototype())
+ Old = Old->getCanonicalDecl();
+ Diag(New->getLocation(), diag::err_conflicting_types) << New;
+ Diag(Old->getLocation(), PrevDiag) << Old << Old->getType();
+ return true;
+ }
+
+ // If we are merging two functions where only one of them has a prototype,
+ // we may have enough information to decide to issue a diagnostic that the
+ // function without a protoype will change behavior in C2x. This handles
+ // cases like:
+ // void i(); void i(int j);
+ // void i(int j); void i();
+ // void i(); void i(int j) {}
+ // See ActOnFinishFunctionBody() for other cases of the behavior change
+ // diagnostic. See GetFullTypeForDeclarator() for handling of a function
+ // type without a prototype.
+ if (New->hasWrittenPrototype() != Old->hasWrittenPrototype() &&
+ !New->isImplicit() && !Old->isImplicit()) {
+ const FunctionDecl *WithProto, *WithoutProto;
+ if (New->hasWrittenPrototype()) {
+ WithProto = New;
+ WithoutProto = Old;
+ } else {
+ WithProto = Old;
+ WithoutProto = New;
}
- New->setParams(Params);
+ if (WithProto->getNumParams() != 0) {
+ if (WithoutProto->getBuiltinID() == 0 && !WithoutProto->isImplicit()) {
+ // The one without the prototype will be changing behavior in C2x, so
+ // warn about that one so long as it's a user-visible declaration.
+ bool IsWithoutProtoADef = false, IsWithProtoADef = false;
+ if (WithoutProto == New)
+ IsWithoutProtoADef = NewDeclIsDefn;
+ else
+ IsWithProtoADef = NewDeclIsDefn;
+ Diag(WithoutProto->getLocation(),
+ diag::warn_non_prototype_changes_behavior)
+ << IsWithoutProtoADef << (WithoutProto->getNumParams() ? 0 : 1)
+ << (WithoutProto == Old) << IsWithProtoADef;
+
+ // The reason the one without the prototype will be changing behavior
+ // is because of the one with the prototype, so note that so long as
+ // it's a user-visible declaration. There is one exception to this:
+ // when the new declaration is a definition without a prototype, the
+ // old declaration with a prototype is not the cause of the issue,
+ // and that does not need to be noted because the one with a
+ // prototype will not change behavior in C2x.
+ if (WithProto->getBuiltinID() == 0 && !WithProto->isImplicit() &&
+ !IsWithoutProtoADef)
+ Diag(WithProto->getLocation(), diag::note_conflicting_prototype);
+ }
+ }
}
- return MergeCompatibleFunctionDecls(New, Old, S, MergeTypeWithOld);
+ if (Context.typesAreCompatible(OldQType, NewQType)) {
+ const FunctionType *OldFuncType = OldQType->getAs<FunctionType>();
+ const FunctionType *NewFuncType = NewQType->getAs<FunctionType>();
+ const FunctionProtoType *OldProto = nullptr;
+ if (MergeTypeWithOld && isa<FunctionNoProtoType>(NewFuncType) &&
+ (OldProto = dyn_cast<FunctionProtoType>(OldFuncType))) {
+ // The old declaration provided a function prototype, but the
+ // new declaration does not. Merge in the prototype.
+ assert(!OldProto->hasExceptionSpec() && "Exception spec in C");
+ SmallVector<QualType, 16> ParamTypes(OldProto->param_types());
+ NewQType =
+ Context.getFunctionType(NewFuncType->getReturnType(), ParamTypes,
+ OldProto->getExtProtoInfo());
+ New->setType(NewQType);
+ New->setHasInheritedPrototype();
+
+ // Synthesize parameters with the same types.
+ SmallVector<ParmVarDecl *, 16> Params;
+ for (const auto &ParamType : OldProto->param_types()) {
+ ParmVarDecl *Param = ParmVarDecl::Create(
+ Context, New, SourceLocation(), SourceLocation(), nullptr,
+ ParamType, /*TInfo=*/nullptr, SC_None, nullptr);
+ Param->setScopeInfo(0, Params.size());
+ Param->setImplicit();
+ Params.push_back(Param);
+ }
+
+ New->setParams(Params);
+ }
+
+ return MergeCompatibleFunctionDecls(New, Old, S, MergeTypeWithOld);
+ }
}
// Check if the function types are compatible when pointer size address
@@ -4370,15 +4508,15 @@ void Sema::MergeVarDecl(VarDecl *New, LookupResult &Previous) {
}
// C++ doesn't have tentative definitions, so go right ahead and check here.
- if (getLangOpts().CPlusPlus &&
- New->isThisDeclarationADefinition() == VarDecl::Definition) {
+ if (getLangOpts().CPlusPlus) {
if (Old->isStaticDataMember() && Old->getCanonicalDecl()->isInline() &&
Old->getCanonicalDecl()->isConstexpr()) {
// This definition won't be a definition any more once it's been merged.
Diag(New->getLocation(),
diag::warn_deprecated_redundant_constexpr_static_def);
- } else if (VarDecl *Def = Old->getDefinition()) {
- if (checkVarDeclRedefinition(Def, New))
+ } else if (New->isThisDeclarationADefinition() == VarDecl::Definition) {
+ VarDecl *Def = Old->getDefinition();
+ if (Def && checkVarDeclRedefinition(Def, New))
return;
}
}
@@ -4491,11 +4629,12 @@ bool Sema::checkVarDeclRedefinition(VarDecl *Old, VarDecl *New) {
/// ParsedFreeStandingDeclSpec - This method is invoked when a declspec with
/// no declarator (e.g. "struct foo;") is parsed.
-Decl *
-Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
- RecordDecl *&AnonRecord) {
- return ParsedFreeStandingDeclSpec(S, AS, DS, MultiTemplateParamsArg(), false,
- AnonRecord);
+Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
+ DeclSpec &DS,
+ const ParsedAttributesView &DeclAttrs,
+ RecordDecl *&AnonRecord) {
+ return ParsedFreeStandingDeclSpec(
+ S, AS, DS, DeclAttrs, MultiTemplateParamsArg(), false, AnonRecord);
}
// The MS ABI changed between VS2013 and VS2015 with regard to numbers used to
@@ -4708,11 +4847,12 @@ static unsigned GetDiagnosticTypeSpecifierID(DeclSpec::TST T) {
/// ParsedFreeStandingDeclSpec - This method is invoked when a declspec with
/// no declarator (e.g. "struct foo;") is parsed. It also accepts template
/// parameters to cope with template friend declarations.
-Decl *
-Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
- MultiTemplateParamsArg TemplateParams,
- bool IsExplicitInstantiation,
- RecordDecl *&AnonRecord) {
+Decl *Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS,
+ DeclSpec &DS,
+ const ParsedAttributesView &DeclAttrs,
+ MultiTemplateParamsArg TemplateParams,
+ bool IsExplicitInstantiation,
+ RecordDecl *&AnonRecord) {
Decl *TagD = nullptr;
TagDecl *Tag = nullptr;
if (DS.getTypeSpecType() == DeclSpec::TST_class ||
@@ -4951,7 +5091,7 @@ Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
// Warn about ignored type attributes, for example:
// __attribute__((aligned)) struct A;
// Attributes should be placed after tag to apply to type declaration.
- if (!DS.getAttributes().empty()) {
+ if (!DS.getAttributes().empty() || !DeclAttrs.empty()) {
DeclSpec::TST TypeSpecType = DS.getTypeSpecType();
if (TypeSpecType == DeclSpec::TST_class ||
TypeSpecType == DeclSpec::TST_struct ||
@@ -4961,6 +5101,9 @@ Sema::ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
for (const ParsedAttr &AL : DS.getAttributes())
Diag(AL.getLoc(), diag::warn_declspec_attribute_ignored)
<< AL << GetDiagnosticTypeSpecifierID(TypeSpecType);
+ for (const ParsedAttr &AL : DeclAttrs)
+ Diag(AL.getLoc(), diag::warn_declspec_attribute_ignored)
+ << AL << GetDiagnosticTypeSpecifierID(TypeSpecType);
}
}
@@ -5323,7 +5466,7 @@ Decl *Sema::BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
Diag(DS.getBeginLoc(), diag::ext_no_declarators) << DS.getSourceRange();
// Mock up a declarator.
- Declarator Dc(DS, DeclaratorContext::Member);
+ Declarator Dc(DS, ParsedAttributesView::none(), DeclaratorContext::Member);
TypeSourceInfo *TInfo = GetTypeForDeclarator(Dc, S);
assert(TInfo && "couldn't build declarator info for anonymous struct/union");
@@ -5420,7 +5563,7 @@ Decl *Sema::BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
assert(Record && "expected a record!");
// Mock up a declarator.
- Declarator Dc(DS, DeclaratorContext::TypeName);
+ Declarator Dc(DS, ParsedAttributesView::none(), DeclaratorContext::TypeName);
TypeSourceInfo *TInfo = GetTypeForDeclarator(Dc, S);
assert(TInfo && "couldn't build declarator info for anonymous struct");
@@ -5629,7 +5772,7 @@ static bool hasSimilarParameters(ASTContext &Context,
return true;
}
-/// NeedsRebuildingInCurrentInstantiation - Checks whether the given
+/// RebuildDeclaratorInCurrentInstantiation - Checks whether the given
/// declarator needs to be rebuilt in the current instantiation.
/// Any bits of declarator which appear before the name are valid for
/// consideration here. That's specifically the type in the decl spec
@@ -5725,12 +5868,25 @@ void Sema::warnOnReservedIdentifier(const NamedDecl *D) {
Decl *Sema::ActOnDeclarator(Scope *S, Declarator &D) {
D.setFunctionDefinitionKind(FunctionDefinitionKind::Declaration);
+
+ // Check if we are in an `omp begin/end declare variant` scope. Handle this
+ // declaration only if the `bind_to_declaration` extension is set.
+ SmallVector<FunctionDecl *, 4> Bases;
+ if (LangOpts.OpenMP && isInOpenMPDeclareVariantScope())
+ if (getOMPTraitInfoForSurroundingScope()->isExtensionActive(llvm::omp::TraitProperty::
+ implementation_extension_bind_to_declaration))
+ ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(
+ S, D, MultiTemplateParamsArg(), Bases);
+
Decl *Dcl = HandleDeclarator(S, D, MultiTemplateParamsArg());
if (OriginalLexicalContext && OriginalLexicalContext->isObjCContainer() &&
Dcl && Dcl->getDeclContext()->isFileContext())
Dcl->setTopLevelDeclInObjCContainer();
+ if (!Bases.empty())
+ ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(Dcl, Bases);
+
return Dcl;
}
@@ -6862,7 +7018,8 @@ static bool hasParsedAttr(Scope *S, const Declarator &PD,
}
// Finally, check attributes on the decl itself.
- return PD.getAttributes().hasAttribute(Kind);
+ return PD.getAttributes().hasAttribute(Kind) ||
+ PD.getDeclarationAttributes().hasAttribute(Kind);
}
/// Adjust the \c DeclContext for a function or variable that might be a
@@ -8640,15 +8797,24 @@ static FunctionDecl *CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
bool isInline = D.getDeclSpec().isInlineSpecified();
if (!SemaRef.getLangOpts().CPlusPlus) {
- // Determine whether the function was written with a
- // prototype. This true when:
+ // Determine whether the function was written with a prototype. This is
+ // true when:
// - there is a prototype in the declarator, or
// - the type R of the function is some kind of typedef or other non-
// attributed reference to a type name (which eventually refers to a
- // function type).
+ // function type). Note, we can't always look at the adjusted type to
+ // check this case because attributes may cause a non-function
+ // declarator to still have a function type. e.g.,
+ // typedef void func(int a);
+ // __attribute__((noreturn)) func other_func; // This has a prototype
bool HasPrototype =
- (D.isFunctionDeclarator() && D.getFunctionTypeInfo().hasPrototype) ||
- (!R->getAsAdjusted<FunctionType>() && R->isFunctionProtoType());
+ (D.isFunctionDeclarator() && D.getFunctionTypeInfo().hasPrototype) ||
+ (D.getDeclSpec().isTypeRep() &&
+ D.getDeclSpec().getRepAsType().get()->isFunctionProtoType()) ||
+ (!R->getAsAdjusted<FunctionType>() && R->isFunctionProtoType());
+ assert(
+ (HasPrototype || !SemaRef.getLangOpts().requiresStrictPrototypes()) &&
+ "Strict prototypes are required");
NewFD = FunctionDecl::Create(
SemaRef.Context, DC, D.getBeginLoc(), NameInfo, R, TInfo, SC,
@@ -8704,6 +8870,10 @@ static FunctionDecl *CreateNewFunctionDecl(Sema &SemaRef, Declarator &D,
SemaRef.getCurFPFeatures().isFPConstrained(), isInline,
/*isImplicitlyDeclared=*/false, ConstexprKind,
TrailingRequiresClause);
+ // User defined destructors start as not selected if the class definition is still
+ // not done.
+ if (Record->isBeingDefined())
+ NewDD->setIneligibleOrNotSelected(true);
// If the destructor needs an implicit exception specification, set it
// now. FIXME: It'd be nice to be able to create the right type to start
@@ -9100,6 +9270,32 @@ static Scope *getTagInjectionScope(Scope *S, const LangOptions &LangOpts) {
return S;
}
+/// Determine whether a declaration matches a known function in namespace std.
+static bool isStdBuiltin(ASTContext &Ctx, FunctionDecl *FD,
+ unsigned BuiltinID) {
+ switch (BuiltinID) {
+ case Builtin::BI__GetExceptionInfo:
+ // No type checking whatsoever.
+ return Ctx.getTargetInfo().getCXXABI().isMicrosoft();
+
+ case Builtin::BIaddressof:
+ case Builtin::BI__addressof:
+ case Builtin::BIforward:
+ case Builtin::BImove:
+ case Builtin::BImove_if_noexcept:
+ case Builtin::BIas_const: {
+ // Ensure that we don't treat the algorithm
+ // OutputIt std::move(InputIt, InputIt, OutputIt)
+ // as the builtin std::move.
+ const auto *FPT = FD->getType()->castAs<FunctionProtoType>();
+ return FPT->getNumParams() == 1 && !FPT->isVariadic();
+ }
+
+ default:
+ return false;
+ }
+}
+
NamedDecl*
Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo, LookupResult &Previous,
@@ -9112,8 +9308,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
Diag(D.getIdentifierLoc(), diag::err_function_decl_cmse_ns_call);
SmallVector<TemplateParameterList *, 4> TemplateParamLists;
- for (TemplateParameterList *TPL : TemplateParamListsRef)
- TemplateParamLists.push_back(TPL);
+ llvm::append_range(TemplateParamLists, TemplateParamListsRef);
if (TemplateParameterList *Invented = D.getInventedTemplateParameterList()) {
if (!TemplateParamLists.empty() &&
Invented->getDepth() == TemplateParamLists.back()->getDepth())
@@ -9193,7 +9388,7 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
}
if ((Parent->isClass() || Parent->isStruct()) &&
Parent->hasAttr<SYCLSpecialClassAttr>() &&
- NewFD->getKind() == Decl::Kind::CXXMethod &&
+ NewFD->getKind() == Decl::Kind::CXXMethod && NewFD->getIdentifier() &&
NewFD->getName() == "__init" && D.isFunctionDefinition()) {
if (auto *Def = Parent->getDefinition())
Def->setInitMethod(true);
@@ -9673,7 +9868,8 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
if (!NewFD->isInvalidDecl())
D.setRedeclaration(CheckFunctionDeclaration(S, NewFD, Previous,
- isMemberSpecialization));
+ isMemberSpecialization,
+ D.isFunctionDefinition()));
else if (!Previous.empty())
// Recover gracefully from an invalid redeclaration.
D.setRedeclaration(true);
@@ -9823,7 +10019,8 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
if (!NewFD->isInvalidDecl())
D.setRedeclaration(CheckFunctionDeclaration(S, NewFD, Previous,
- isMemberSpecialization));
+ isMemberSpecialization,
+ D.isFunctionDefinition()));
else if (!Previous.empty())
// Recover gracefully from an invalid redeclaration.
D.setRedeclaration(true);
@@ -9951,28 +10148,30 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// If this is the first declaration of a library builtin function, add
// attributes as appropriate.
- if (!D.isRedeclaration() &&
- NewFD->getDeclContext()->getRedeclContext()->isFileContext()) {
+ if (!D.isRedeclaration()) {
if (IdentifierInfo *II = Previous.getLookupName().getAsIdentifierInfo()) {
if (unsigned BuiltinID = II->getBuiltinID()) {
- if (NewFD->getLanguageLinkage() == CLanguageLinkage) {
- // Validate the type matches unless this builtin is specified as
- // matching regardless of its declared type.
- if (Context.BuiltinInfo.allowTypeMismatch(BuiltinID)) {
- NewFD->addAttr(BuiltinAttr::CreateImplicit(Context, BuiltinID));
- } else {
- ASTContext::GetBuiltinTypeError Error;
- LookupNecessaryTypesForBuiltin(S, BuiltinID);
- QualType BuiltinType = Context.GetBuiltinType(BuiltinID, Error);
-
- if (!Error && !BuiltinType.isNull() &&
- Context.hasSameFunctionTypeIgnoringExceptionSpec(
- NewFD->getType(), BuiltinType))
+ bool InStdNamespace = Context.BuiltinInfo.isInStdNamespace(BuiltinID);
+ if (!InStdNamespace &&
+ NewFD->getDeclContext()->getRedeclContext()->isFileContext()) {
+ if (NewFD->getLanguageLinkage() == CLanguageLinkage) {
+ // Validate the type matches unless this builtin is specified as
+ // matching regardless of its declared type.
+ if (Context.BuiltinInfo.allowTypeMismatch(BuiltinID)) {
NewFD->addAttr(BuiltinAttr::CreateImplicit(Context, BuiltinID));
+ } else {
+ ASTContext::GetBuiltinTypeError Error;
+ LookupNecessaryTypesForBuiltin(S, BuiltinID);
+ QualType BuiltinType = Context.GetBuiltinType(BuiltinID, Error);
+
+ if (!Error && !BuiltinType.isNull() &&
+ Context.hasSameFunctionTypeIgnoringExceptionSpec(
+ NewFD->getType(), BuiltinType))
+ NewFD->addAttr(BuiltinAttr::CreateImplicit(Context, BuiltinID));
+ }
}
- } else if (BuiltinID == Builtin::BI__GetExceptionInfo &&
- Context.getTargetInfo().getCXXABI().isMicrosoft()) {
- // FIXME: We should consider this a builtin only in the std namespace.
+ } else if (InStdNamespace && NewFD->isInStdNamespace() &&
+ isStdBuiltin(Context, NewFD, BuiltinID)) {
NewFD->addAttr(BuiltinAttr::CreateImplicit(Context, BuiltinID));
}
}
@@ -10010,10 +10209,14 @@ Sema::ActOnFunctionDeclarator(Scope *S, Declarator &D, DeclContext *DC,
// marking the function.
AddCFAuditedAttribute(NewFD);
- // If this is a function definition, check if we have to apply optnone due to
- // a pragma.
- if(D.isFunctionDefinition())
+ // If this is a function definition, check if we have to apply any
+ // attributes (i.e. optnone and no_builtin) due to a pragma.
+ if (D.isFunctionDefinition()) {
AddRangeBasedOptnone(NewFD);
+ AddImplicitMSFunctionNoBuiltinAttr(NewFD);
+ AddSectionMSAllocText(NewFD);
+ ModifyFnAttributesMSPragmaOptimize(NewFD);
+ }
// If this is the first declaration of an extern C variable, update
// the map of such variables.
@@ -10337,14 +10540,14 @@ static bool CheckMultiVersionValue(Sema &S, const FunctionDecl *FD) {
// Provide a white-list of attributes that are allowed to be combined with
// multiversion functions.
static bool AttrCompatibleWithMultiVersion(attr::Kind Kind,
- MultiVersionKind MVType) {
+ MultiVersionKind MVKind) {
// Note: this list/diagnosis must match the list in
// checkMultiversionAttributesAllSame.
switch (Kind) {
default:
return false;
case attr::Used:
- return MVType == MultiVersionKind::Target;
+ return MVKind == MultiVersionKind::Target;
case attr::NonNull:
case attr::NoThrow:
return true;
@@ -10354,10 +10557,10 @@ static bool AttrCompatibleWithMultiVersion(attr::Kind Kind,
static bool checkNonMultiVersionCompatAttributes(Sema &S,
const FunctionDecl *FD,
const FunctionDecl *CausedFD,
- MultiVersionKind MVType) {
- const auto Diagnose = [FD, CausedFD, MVType](Sema &S, const Attr *A) {
+ MultiVersionKind MVKind) {
+ const auto Diagnose = [FD, CausedFD, MVKind](Sema &S, const Attr *A) {
S.Diag(FD->getLocation(), diag::err_multiversion_disallowed_other_attr)
- << static_cast<unsigned>(MVType) << A;
+ << static_cast<unsigned>(MVKind) << A;
if (CausedFD)
S.Diag(CausedFD->getLocation(), diag::note_multiversioning_caused_here);
return true;
@@ -10367,20 +10570,20 @@ static bool checkNonMultiVersionCompatAttributes(Sema &S,
switch (A->getKind()) {
case attr::CPUDispatch:
case attr::CPUSpecific:
- if (MVType != MultiVersionKind::CPUDispatch &&
- MVType != MultiVersionKind::CPUSpecific)
+ if (MVKind != MultiVersionKind::CPUDispatch &&
+ MVKind != MultiVersionKind::CPUSpecific)
return Diagnose(S, A);
break;
case attr::Target:
- if (MVType != MultiVersionKind::Target)
+ if (MVKind != MultiVersionKind::Target)
return Diagnose(S, A);
break;
case attr::TargetClones:
- if (MVType != MultiVersionKind::TargetClones)
+ if (MVKind != MultiVersionKind::TargetClones)
return Diagnose(S, A);
break;
default:
- if (!AttrCompatibleWithMultiVersion(A->getKind(), MVType))
+ if (!AttrCompatibleWithMultiVersion(A->getKind(), MVKind))
return Diagnose(S, A);
break;
}
@@ -10504,7 +10707,7 @@ bool Sema::areMultiversionVariantFunctionsCompatible(
static bool CheckMultiVersionAdditionalRules(Sema &S, const FunctionDecl *OldFD,
const FunctionDecl *NewFD,
bool CausesMV,
- MultiVersionKind MVType) {
+ MultiVersionKind MVKind) {
if (!S.getASTContext().getTargetInfo().supportsMultiVersioning()) {
S.Diag(NewFD->getLocation(), diag::err_multiversion_not_supported);
if (OldFD)
@@ -10512,15 +10715,15 @@ static bool CheckMultiVersionAdditionalRules(Sema &S, const FunctionDecl *OldFD,
return true;
}
- bool IsCPUSpecificCPUDispatchMVType =
- MVType == MultiVersionKind::CPUDispatch ||
- MVType == MultiVersionKind::CPUSpecific;
+ bool IsCPUSpecificCPUDispatchMVKind =
+ MVKind == MultiVersionKind::CPUDispatch ||
+ MVKind == MultiVersionKind::CPUSpecific;
if (CausesMV && OldFD &&
- checkNonMultiVersionCompatAttributes(S, OldFD, NewFD, MVType))
+ checkNonMultiVersionCompatAttributes(S, OldFD, NewFD, MVKind))
return true;
- if (checkNonMultiVersionCompatAttributes(S, NewFD, nullptr, MVType))
+ if (checkNonMultiVersionCompatAttributes(S, NewFD, nullptr, MVKind))
return true;
// Only allow transition to MultiVersion if it hasn't been used.
@@ -10533,11 +10736,11 @@ static bool CheckMultiVersionAdditionalRules(Sema &S, const FunctionDecl *OldFD,
S.PDiag(diag::note_multiversioning_caused_here)),
PartialDiagnosticAt(NewFD->getLocation(),
S.PDiag(diag::err_multiversion_doesnt_support)
- << static_cast<unsigned>(MVType)),
+ << static_cast<unsigned>(MVKind)),
PartialDiagnosticAt(NewFD->getLocation(),
S.PDiag(diag::err_multiversion_diff)),
/*TemplatesSupported=*/false,
- /*ConstexprSupported=*/!IsCPUSpecificCPUDispatchMVType,
+ /*ConstexprSupported=*/!IsCPUSpecificCPUDispatchMVKind,
/*CLinkageMayDiffer=*/false);
}
@@ -10548,22 +10751,22 @@ static bool CheckMultiVersionAdditionalRules(Sema &S, const FunctionDecl *OldFD,
///
/// Returns true if there was an error, false otherwise.
static bool CheckMultiVersionFirstFunction(Sema &S, FunctionDecl *FD,
- MultiVersionKind MVType,
+ MultiVersionKind MVKind,
const TargetAttr *TA) {
- assert(MVType != MultiVersionKind::None &&
+ assert(MVKind != MultiVersionKind::None &&
"Function lacks multiversion attribute");
// Target only causes MV if it is default, otherwise this is a normal
// function.
- if (MVType == MultiVersionKind::Target && !TA->isDefaultVersion())
+ if (MVKind == MultiVersionKind::Target && !TA->isDefaultVersion())
return false;
- if (MVType == MultiVersionKind::Target && CheckMultiVersionValue(S, FD)) {
+ if (MVKind == MultiVersionKind::Target && CheckMultiVersionValue(S, FD)) {
FD->setInvalidDecl();
return true;
}
- if (CheckMultiVersionAdditionalRules(S, nullptr, FD, true, MVType)) {
+ if (CheckMultiVersionAdditionalRules(S, nullptr, FD, true, MVKind)) {
FD->setInvalidDecl();
return true;
}
@@ -10583,8 +10786,7 @@ static bool PreviousDeclsHaveMultiVersionAttribute(const FunctionDecl *FD) {
static bool CheckTargetCausesMultiVersioning(
Sema &S, FunctionDecl *OldFD, FunctionDecl *NewFD, const TargetAttr *NewTA,
- bool &Redeclaration, NamedDecl *&OldDecl, bool &MergeTypeWithPrevious,
- LookupResult &Previous) {
+ bool &Redeclaration, NamedDecl *&OldDecl, LookupResult &Previous) {
const auto *OldTA = OldFD->getAttr<TargetAttr>();
ParsedTargetAttr NewParsed = NewTA->parse();
// Sort order doesn't matter, it just needs to be consistent.
@@ -10597,13 +10799,6 @@ static bool CheckTargetCausesMultiVersioning(
return false;
// Otherwise, this decl causes MultiVersioning.
- if (!S.getASTContext().getTargetInfo().supportsMultiVersioning()) {
- S.Diag(NewFD->getLocation(), diag::err_multiversion_not_supported);
- S.Diag(OldFD->getLocation(), diag::note_previous_declaration);
- NewFD->setInvalidDecl();
- return true;
- }
-
if (CheckMultiVersionAdditionalRules(S, OldFD, NewFD, true,
MultiVersionKind::Target)) {
NewFD->setInvalidDecl();
@@ -10656,7 +10851,6 @@ static bool CheckTargetCausesMultiVersioning(
OldFD->setIsMultiVersion();
NewFD->setIsMultiVersion();
Redeclaration = false;
- MergeTypeWithPrevious = false;
OldDecl = nullptr;
Previous.clear();
return false;
@@ -10678,14 +10872,14 @@ static bool MultiVersionTypesCompatible(MultiVersionKind Old,
/// multiversioned declaration collection.
static bool CheckMultiVersionAdditionalDecl(
Sema &S, FunctionDecl *OldFD, FunctionDecl *NewFD,
- MultiVersionKind NewMVType, const TargetAttr *NewTA,
+ MultiVersionKind NewMVKind, const TargetAttr *NewTA,
const CPUDispatchAttr *NewCPUDisp, const CPUSpecificAttr *NewCPUSpec,
const TargetClonesAttr *NewClones, bool &Redeclaration, NamedDecl *&OldDecl,
- bool &MergeTypeWithPrevious, LookupResult &Previous) {
+ LookupResult &Previous) {
- MultiVersionKind OldMVType = OldFD->getMultiVersionKind();
+ MultiVersionKind OldMVKind = OldFD->getMultiVersionKind();
// Disallow mixing of multiversioning types.
- if (!MultiVersionTypesCompatible(OldMVType, NewMVType)) {
+ if (!MultiVersionTypesCompatible(OldMVKind, NewMVKind)) {
S.Diag(NewFD->getLocation(), diag::err_multiversion_types_mixed);
S.Diag(OldFD->getLocation(), diag::note_previous_declaration);
NewFD->setInvalidDecl();
@@ -10701,18 +10895,22 @@ static bool CheckMultiVersionAdditionalDecl(
bool UseMemberUsingDeclRules =
S.CurContext->isRecord() && !NewFD->getFriendObjectKind();
+ bool MayNeedOverloadableChecks =
+ AllowOverloadingOfFunction(Previous, S.Context, NewFD);
+
// Next, check ALL non-overloads to see if this is a redeclaration of a
// previous member of the MultiVersion set.
for (NamedDecl *ND : Previous) {
FunctionDecl *CurFD = ND->getAsFunction();
if (!CurFD)
continue;
- if (S.IsOverload(NewFD, CurFD, UseMemberUsingDeclRules))
+ if (MayNeedOverloadableChecks &&
+ S.IsOverload(NewFD, CurFD, UseMemberUsingDeclRules))
continue;
- switch (NewMVType) {
+ switch (NewMVKind) {
case MultiVersionKind::None:
- assert(OldMVType == MultiVersionKind::TargetClones &&
+ assert(OldMVKind == MultiVersionKind::TargetClones &&
"Only target_clones can be omitted in subsequent declarations");
break;
case MultiVersionKind::Target: {
@@ -10737,7 +10935,6 @@ static bool CheckMultiVersionAdditionalDecl(
const auto *CurClones = CurFD->getAttr<TargetClonesAttr>();
Redeclaration = true;
OldDecl = CurFD;
- MergeTypeWithPrevious = true;
NewFD->setIsMultiVersion();
if (CurClones && NewClones &&
@@ -10760,7 +10957,7 @@ static bool CheckMultiVersionAdditionalDecl(
// Handle CPUDispatch/CPUSpecific versions.
// Only 1 CPUDispatch function is allowed, this will make it go through
// the redeclaration errors.
- if (NewMVType == MultiVersionKind::CPUDispatch &&
+ if (NewMVKind == MultiVersionKind::CPUDispatch &&
CurFD->hasAttr<CPUDispatchAttr>()) {
if (CurCPUDisp->cpus_size() == NewCPUDisp->cpus_size() &&
std::equal(
@@ -10781,8 +10978,7 @@ static bool CheckMultiVersionAdditionalDecl(
NewFD->setInvalidDecl();
return true;
}
- if (NewMVType == MultiVersionKind::CPUSpecific && CurCPUSpec) {
-
+ if (NewMVKind == MultiVersionKind::CPUSpecific && CurCPUSpec) {
if (CurCPUSpec->cpus_size() == NewCPUSpec->cpus_size() &&
std::equal(
CurCPUSpec->cpus_begin(), CurCPUSpec->cpus_end(),
@@ -10817,14 +11013,14 @@ static bool CheckMultiVersionAdditionalDecl(
// Else, this is simply a non-redecl case. Checking the 'value' is only
// necessary in the Target case, since The CPUSpecific/Dispatch cases are
// handled in the attribute adding step.
- if (NewMVType == MultiVersionKind::Target &&
+ if (NewMVKind == MultiVersionKind::Target &&
CheckMultiVersionValue(S, NewFD)) {
NewFD->setInvalidDecl();
return true;
}
if (CheckMultiVersionAdditionalRules(S, OldFD, NewFD,
- !OldFD->isMultiVersion(), NewMVType)) {
+ !OldFD->isMultiVersion(), NewMVKind)) {
NewFD->setInvalidDecl();
return true;
}
@@ -10840,7 +11036,6 @@ static bool CheckMultiVersionAdditionalDecl(
NewFD->setIsMultiVersion();
Redeclaration = false;
- MergeTypeWithPrevious = false;
OldDecl = nullptr;
Previous.clear();
return false;
@@ -10854,19 +11049,18 @@ static bool CheckMultiVersionAdditionalDecl(
/// Returns true if there was an error, false otherwise.
static bool CheckMultiVersionFunction(Sema &S, FunctionDecl *NewFD,
bool &Redeclaration, NamedDecl *&OldDecl,
- bool &MergeTypeWithPrevious,
LookupResult &Previous) {
const auto *NewTA = NewFD->getAttr<TargetAttr>();
const auto *NewCPUDisp = NewFD->getAttr<CPUDispatchAttr>();
const auto *NewCPUSpec = NewFD->getAttr<CPUSpecificAttr>();
const auto *NewClones = NewFD->getAttr<TargetClonesAttr>();
- MultiVersionKind MVType = NewFD->getMultiVersionKind();
+ MultiVersionKind MVKind = NewFD->getMultiVersionKind();
// Main isn't allowed to become a multiversion function, however it IS
// permitted to have 'main' be marked with the 'target' optimization hint.
if (NewFD->isMain()) {
- if (MVType != MultiVersionKind::None &&
- !(MVType == MultiVersionKind::Target && !NewTA->isDefaultVersion())) {
+ if (MVKind != MultiVersionKind::None &&
+ !(MVKind == MultiVersionKind::Target && !NewTA->isDefaultVersion())) {
S.Diag(NewFD->getLocation(), diag::err_multiversion_not_allowed_on_main);
NewFD->setInvalidDecl();
return true;
@@ -10879,19 +11073,19 @@ static bool CheckMultiVersionFunction(Sema &S, FunctionDecl *NewFD,
NewFD->getDeclContext()->getRedeclContext()) {
// If there's no previous declaration, AND this isn't attempting to cause
// multiversioning, this isn't an error condition.
- if (MVType == MultiVersionKind::None)
+ if (MVKind == MultiVersionKind::None)
return false;
- return CheckMultiVersionFirstFunction(S, NewFD, MVType, NewTA);
+ return CheckMultiVersionFirstFunction(S, NewFD, MVKind, NewTA);
}
FunctionDecl *OldFD = OldDecl->getAsFunction();
- if (!OldFD->isMultiVersion() && MVType == MultiVersionKind::None)
+ if (!OldFD->isMultiVersion() && MVKind == MultiVersionKind::None)
return false;
// Multiversioned redeclarations aren't allowed to omit the attribute, except
// for target_clones.
- if (OldFD->isMultiVersion() && MVType == MultiVersionKind::None &&
+ if (OldFD->isMultiVersion() && MVKind == MultiVersionKind::None &&
OldFD->getMultiVersionKind() != MultiVersionKind::TargetClones) {
S.Diag(NewFD->getLocation(), diag::err_multiversion_required_in_redecl)
<< (OldFD->getMultiVersionKind() != MultiVersionKind::Target);
@@ -10900,11 +11094,10 @@ static bool CheckMultiVersionFunction(Sema &S, FunctionDecl *NewFD,
}
if (!OldFD->isMultiVersion()) {
- switch (MVType) {
+ switch (MVKind) {
case MultiVersionKind::Target:
return CheckTargetCausesMultiVersioning(S, OldFD, NewFD, NewTA,
- Redeclaration, OldDecl,
- MergeTypeWithPrevious, Previous);
+ Redeclaration, OldDecl, Previous);
case MultiVersionKind::TargetClones:
if (OldFD->isUsed(false)) {
NewFD->setInvalidDecl();
@@ -10918,18 +11111,13 @@ static bool CheckMultiVersionFunction(Sema &S, FunctionDecl *NewFD,
break;
}
}
- // Handle the target potentially causes multiversioning case.
- if (!OldFD->isMultiVersion() && MVType == MultiVersionKind::Target)
- return CheckTargetCausesMultiVersioning(S, OldFD, NewFD, NewTA,
- Redeclaration, OldDecl,
- MergeTypeWithPrevious, Previous);
// At this point, we have a multiversion function decl (in OldFD) AND an
// appropriate attribute in the current function decl. Resolve that these are
// still compatible with previous declarations.
- return CheckMultiVersionAdditionalDecl(
- S, OldFD, NewFD, MVType, NewTA, NewCPUDisp, NewCPUSpec, NewClones,
- Redeclaration, OldDecl, MergeTypeWithPrevious, Previous);
+ return CheckMultiVersionAdditionalDecl(S, OldFD, NewFD, MVKind, NewTA,
+ NewCPUDisp, NewCPUSpec, NewClones,
+ Redeclaration, OldDecl, Previous);
}
/// Perform semantic checking of a new function declaration.
@@ -10951,7 +11139,8 @@ static bool CheckMultiVersionFunction(Sema &S, FunctionDecl *NewFD,
/// \returns true if the function declaration is a redeclaration.
bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
LookupResult &Previous,
- bool IsMemberSpecialization) {
+ bool IsMemberSpecialization,
+ bool DeclIsDefn) {
assert(!NewFD->getReturnType()->isVariablyModifiedType() &&
"Variably modified return types are not handled here");
@@ -11019,8 +11208,7 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
}
}
- if (CheckMultiVersionFunction(*this, NewFD, Redeclaration, OldDecl,
- MergeTypeWithPrevious, Previous))
+ if (CheckMultiVersionFunction(*this, NewFD, Redeclaration, OldDecl, Previous))
return Redeclaration;
// PPC MMA non-pointer types are not allowed as function return types.
@@ -11070,7 +11258,8 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
if (Redeclaration) {
// NewFD and OldDecl represent declarations that need to be
// merged.
- if (MergeFunctionDecl(NewFD, OldDecl, S, MergeTypeWithPrevious)) {
+ if (MergeFunctionDecl(NewFD, OldDecl, S, MergeTypeWithPrevious,
+ DeclIsDefn)) {
NewFD->setInvalidDecl();
return Redeclaration;
}
@@ -11200,6 +11389,15 @@ bool Sema::CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD,
checkThisInStaticMemberFunctionType(Method);
}
+ // C++20: dcl.decl.general p4:
+ // The optional requires-clause ([temp.pre]) in an init-declarator or
+ // member-declarator shall be present only if the declarator declares a
+ // templated function ([dcl.fct]).
+ if (Expr *TRC = NewFD->getTrailingRequiresClause()) {
+ if (!NewFD->isTemplated() && !NewFD->isTemplateInstantiation())
+ Diag(TRC->getBeginLoc(), diag::err_constrained_non_templated_function);
+ }
+
if (CXXConversionDecl *Conversion = dyn_cast<CXXConversionDecl>(NewFD))
ActOnConversionDeclarator(Conversion);
@@ -11315,6 +11513,11 @@ void Sema::CheckMain(FunctionDecl* FD, const DeclSpec& DS) {
return;
}
+ // Functions named main in hlsl are default entries, but don't have specific
+ // signatures they are required to conform to.
+ if (getLangOpts().HLSL)
+ return;
+
QualType T = FD->getType();
assert(T->isFunctionType() && "function decl is not of function type");
const FunctionType* FT = T->castAs<FunctionType>();
@@ -12990,7 +13193,6 @@ void Sema::ActOnUninitializedDecl(Decl *RealDecl) {
if (Type->isReferenceType()) {
Diag(Var->getLocation(), diag::err_reference_var_requires_init)
<< Var << SourceRange(Var->getLocation(), Var->getLocation());
- Var->setInvalidDecl();
return;
}
@@ -13139,11 +13341,9 @@ void Sema::ActOnCXXForRangeDecl(Decl *D) {
}
}
-StmtResult
-Sema::ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
- IdentifierInfo *Ident,
- ParsedAttributes &Attrs,
- SourceLocation AttrEnd) {
+StmtResult Sema::ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
+ IdentifierInfo *Ident,
+ ParsedAttributes &Attrs) {
// C++1y [stmt.iter]p1:
// A range-based for statement of the form
// for ( for-range-identifier : for-range-initializer ) statement
@@ -13156,9 +13356,9 @@ Sema::ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
DS.SetTypeSpecType(DeclSpec::TST_auto, IdentLoc, PrevSpec, DiagID,
getPrintingPolicy());
- Declarator D(DS, DeclaratorContext::ForInit);
+ Declarator D(DS, ParsedAttributesView::none(), DeclaratorContext::ForInit);
D.SetIdentifier(Ident, IdentLoc);
- D.takeAttributes(Attrs, AttrEnd);
+ D.takeAttributes(Attrs);
D.AddTypeInfo(DeclaratorChunk::getReference(0, IdentLoc, /*lvalue*/ false),
IdentLoc);
@@ -13166,7 +13366,8 @@ Sema::ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
cast<VarDecl>(Var)->setCXXForRangeDecl(true);
FinalizeDeclaration(Var);
return ActOnDeclStmt(FinalizeDeclaratorGroup(S, DS, Var), IdentLoc,
- AttrEnd.isValid() ? AttrEnd : IdentLoc);
+ Attrs.Range.getEnd().isValid() ? Attrs.Range.getEnd()
+ : IdentLoc);
}
void Sema::CheckCompleteVariableDeclaration(VarDecl *var) {
@@ -14119,18 +14320,28 @@ void Sema::ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls) {
DeclaratorChunk::FunctionTypeInfo &FTI = D.getFunctionTypeInfo();
- // Verify 6.9.1p6: 'every identifier in the identifier list shall be declared'
- // for a K&R function.
+ // C99 6.9.1p6 "If a declarator includes an identifier list, each declaration
+ // in the declaration list shall have at least one declarator, those
+ // declarators shall only declare identifiers from the identifier list, and
+ // every identifier in the identifier list shall be declared.
+ //
+ // C89 3.7.1p5 "If a declarator includes an identifier list, only the
+ // identifiers it names shall be declared in the declaration list."
+ //
+ // This is why we only diagnose in C99 and later. Note, the other conditions
+ // listed are checked elsewhere.
if (!FTI.hasPrototype) {
for (int i = FTI.NumParams; i != 0; /* decrement in loop */) {
--i;
if (FTI.Params[i].Param == nullptr) {
- SmallString<256> Code;
- llvm::raw_svector_ostream(Code)
- << " int " << FTI.Params[i].Ident->getName() << ";\n";
- Diag(FTI.Params[i].IdentLoc, diag::ext_param_not_declared)
- << FTI.Params[i].Ident
- << FixItHint::CreateInsertion(LocAfterDecls, Code);
+ if (getLangOpts().C99) {
+ SmallString<256> Code;
+ llvm::raw_svector_ostream(Code)
+ << " int " << FTI.Params[i].Ident->getName() << ";\n";
+ Diag(FTI.Params[i].IdentLoc, diag::ext_param_not_declared)
+ << FTI.Params[i].Ident
+ << FixItHint::CreateInsertion(LocAfterDecls, Code);
+ }
// Implicitly declare the argument as type 'int' for lack of a better
// type.
@@ -14143,7 +14354,8 @@ void Sema::ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
// Use the identifier location for the type source range.
DS.SetRangeStart(FTI.Params[i].IdentLoc);
DS.SetRangeEnd(FTI.Params[i].IdentLoc);
- Declarator ParamD(DS, DeclaratorContext::KNRTypeList);
+ Declarator ParamD(DS, ParsedAttributesView::none(),
+ DeclaratorContext::KNRTypeList);
ParamD.SetIdentifier(FTI.Params[i].Ident, FTI.Params[i].IdentLoc);
FTI.Params[i].Param = ActOnParamDeclarator(S, ParamD);
}
@@ -14154,7 +14366,7 @@ void Sema::ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
Decl *
Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
- SkipBodyInfo *SkipBody) {
+ SkipBodyInfo *SkipBody, FnBodyKind BodyKind) {
assert(getCurFunctionDecl() == nullptr && "Function parsing confused");
assert(D.isFunctionDeclarator() && "Not a function declarator!");
Scope *ParentScope = FnBodyScope->getParent();
@@ -14173,7 +14385,7 @@ Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Declarator &D,
D.setFunctionDefinitionKind(FunctionDefinitionKind::Definition);
Decl *DP = HandleDeclarator(ParentScope, D, TemplateParameterLists);
- Decl *Dcl = ActOnStartOfFunctionDef(FnBodyScope, DP, SkipBody);
+ Decl *Dcl = ActOnStartOfFunctionDef(FnBodyScope, DP, SkipBody, BodyKind);
if (!Bases.empty())
ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope(Dcl, Bases);
@@ -14196,9 +14408,6 @@ ShouldWarnAboutMissingPrototype(const FunctionDecl *FD,
if (!FD->isGlobal())
return false;
- if (!FD->isExternallyVisible())
- return false;
-
// Don't warn about C++ member functions.
if (isa<CXXMethodDecl>(FD))
return false;
@@ -14229,6 +14438,11 @@ ShouldWarnAboutMissingPrototype(const FunctionDecl *FD,
if (FD->isDeleted())
return false;
+ // Don't warn on implicitly local functions (such as having local-typed
+ // parameters).
+ if (!FD->isExternallyVisible())
+ return false;
+
for (const FunctionDecl *Prev = FD->getPreviousDecl();
Prev; Prev = Prev->getPreviousDecl()) {
// Ignore any declarations that occur in function or method
@@ -14347,7 +14561,8 @@ static void RebuildLambdaScopeInfo(CXXMethodDecl *CallOperator,
}
Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D,
- SkipBodyInfo *SkipBody) {
+ SkipBodyInfo *SkipBody,
+ FnBodyKind BodyKind) {
if (!D) {
// Parsing the function declaration failed in some way. Push on a fake scope
// anyway so we can try to parse the function body.
@@ -14436,11 +14651,11 @@ Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D,
}
}
- // The return type of a function definition must be complete
- // (C99 6.9.1p3, C++ [dcl.fct]p6).
+ // The return type of a function definition must be complete (C99 6.9.1p3),
+ // unless the function is deleted (C++ specifc, C++ [dcl.fct.def.general]p2)
QualType ResultType = FD->getReturnType();
if (!ResultType->isDependentType() && !ResultType->isVoidType() &&
- !FD->isInvalidDecl() &&
+ !FD->isInvalidDecl() && BodyKind != FnBodyKind::Delete &&
RequireCompleteType(FD->getLocation(), ResultType,
diag::err_func_def_incomplete_result))
FD->setInvalidDecl();
@@ -14449,8 +14664,9 @@ Decl *Sema::ActOnStartOfFunctionDef(Scope *FnBodyScope, Decl *D,
PushDeclContext(FnBodyScope, FD);
// Check the validity of our function parameters
- CheckParmsForFunctionDef(FD->parameters(),
- /*CheckParameterNames=*/true);
+ if (BodyKind != FnBodyKind::Delete)
+ CheckParmsForFunctionDef(FD->parameters(),
+ /*CheckParameterNames=*/true);
// Add non-parameter declarations already in the function to the current
// scope.
@@ -14660,18 +14876,20 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
if (getLangOpts().CPlusPlus14) {
if (!FD->isInvalidDecl() && Body && !FD->isDependentContext() &&
FD->getReturnType()->isUndeducedType()) {
- // If the function has a deduced result type but contains no 'return'
- // statements, the result type as written must be exactly 'auto', and
- // the deduced result type is 'void'.
+ // For a function with a deduced result type to return void,
+ // the result type as written must be 'auto' or 'decltype(auto)',
+ // possibly cv-qualified or constrained, but not ref-qualified.
if (!FD->getReturnType()->getAs<AutoType>()) {
Diag(dcl->getLocation(), diag::err_auto_fn_no_return_but_not_auto)
<< FD->getReturnType();
FD->setInvalidDecl();
} else {
- // Substitute 'void' for the 'auto' in the type.
- TypeLoc ResultType = getReturnTypeLoc(FD);
- Context.adjustDeducedFunctionResultType(
- FD, SubstAutoType(ResultType.getType(), Context.VoidTy));
+ // Falling off the end of the function is the same as 'return;'.
+ Expr *Dummy = nullptr;
+ if (DeduceFunctionTypeFromReturnExpr(
+ FD, dcl->getLocation(), Dummy,
+ FD->getReturnType()->getAs<AutoType>()))
+ FD->setInvalidDecl();
}
}
} else if (getLangOpts().CPlusPlus11 && isLambdaCallOperator(FD)) {
@@ -14796,18 +15014,56 @@ Decl *Sema::ActOnFinishFunctionBody(Decl *dcl, Stmt *Body,
? FixItHint::CreateInsertion(findBeginLoc(), "static ")
: FixItHint{});
}
+ }
- // GNU warning -Wstrict-prototypes
- // Warn if K&R function is defined without a previous declaration.
- // This warning is issued only if the definition itself does not
- // provide a prototype. Only K&R definitions do not provide a
- // prototype.
- if (!FD->hasWrittenPrototype()) {
- TypeSourceInfo *TI = FD->getTypeSourceInfo();
- TypeLoc TL = TI->getTypeLoc();
- FunctionTypeLoc FTL = TL.getAsAdjusted<FunctionTypeLoc>();
- Diag(FTL.getLParenLoc(), diag::warn_strict_prototypes) << 2;
- }
+ // If the function being defined does not have a prototype, then we may
+ // need to diagnose it as changing behavior in C2x because we now know
+ // whether the function accepts arguments or not. This only handles the
+ // case where the definition has no prototype but does have parameters
+ // and either there is no previous potential prototype, or the previous
+ // potential prototype also has no actual prototype. This handles cases
+ // like:
+ // void f(); void f(a) int a; {}
+ // void g(a) int a; {}
+ // See MergeFunctionDecl() for other cases of the behavior change
+ // diagnostic. See GetFullTypeForDeclarator() for handling of a function
+ // type without a prototype.
+ if (!FD->hasWrittenPrototype() && FD->getNumParams() != 0 &&
+ (!PossiblePrototype || (!PossiblePrototype->hasWrittenPrototype() &&
+ !PossiblePrototype->isImplicit()))) {
+ // The function definition has parameters, so this will change behavior
+ // in C2x. If there is a possible prototype, it comes before the
+ // function definition.
+ // FIXME: The declaration may have already been diagnosed as being
+ // deprecated in GetFullTypeForDeclarator() if it had no arguments, but
+ // there's no way to test for the "changes behavior" condition in
+ // SemaType.cpp when forming the declaration's function type. So, we do
+ // this awkward dance instead.
+ //
+ // If we have a possible prototype and it declares a function with a
+ // prototype, we don't want to diagnose it; if we have a possible
+ // prototype and it has no prototype, it may have already been
+ // diagnosed in SemaType.cpp as deprecated depending on whether
+ // -Wstrict-prototypes is enabled. If we already warned about it being
+ // deprecated, add a note that it also changes behavior. If we didn't
+ // warn about it being deprecated (because the diagnostic is not
+ // enabled), warn now that it is deprecated and changes behavior.
+
+ // This K&R C function definition definitely changes behavior in C2x,
+ // so diagnose it.
+ Diag(FD->getLocation(), diag::warn_non_prototype_changes_behavior)
+ << /*definition*/ 1 << /* not supported in C2x */ 0;
+
+ // If we have a possible prototype for the function which is a user-
+ // visible declaration, we already tested that it has no prototype.
+ // This will change behavior in C2x. This gets a warning rather than a
+ // note because it's the same behavior-changing problem as with the
+ // definition.
+ if (PossiblePrototype)
+ Diag(PossiblePrototype->getLocation(),
+ diag::warn_non_prototype_changes_behavior)
+ << /*declaration*/ 0 << /* conflicting */ 1 << /*subsequent*/ 1
+ << /*definition*/ 1;
}
// Warn on CPUDispatch with an actual body.
@@ -15028,6 +15284,10 @@ void Sema::ActOnFinishDelayedAttribute(Scope *S, Decl *D,
/// call, forming a call to an implicitly defined function (per C99 6.5.1p2).
NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc,
IdentifierInfo &II, Scope *S) {
+ // It is not valid to implicitly define a function in C2x.
+ assert(LangOpts.implicitFunctionsAllowed() &&
+ "Implicit function declarations aren't allowed in this language mode");
+
// Find the scope in which the identifier is injected and the corresponding
// DeclContext.
// FIXME: C89 does not say what happens if there is no enclosing block scope.
@@ -15066,15 +15326,13 @@ NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc,
}
}
- // Extension in C99. Legal in C90, but warn about it.
+ // Extension in C99 (defaults to error). Legal in C89, but warn about it.
unsigned diag_id;
if (II.getName().startswith("__builtin_"))
diag_id = diag::warn_builtin_unknown;
// OpenCL v2.0 s6.9.u - Implicit function declaration is not supported.
- else if (getLangOpts().OpenCL)
- diag_id = diag::err_opencl_implicit_function_decl;
else if (getLangOpts().C99)
- diag_id = diag::ext_implicit_function_decl;
+ diag_id = diag::ext_implicit_function_decl_c99;
else
diag_id = diag::warn_implicit_function_decl;
@@ -15092,9 +15350,16 @@ NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc,
}
Diag(Loc, diag_id) << &II;
- if (Corrected)
- diagnoseTypo(Corrected, PDiag(diag::note_function_suggestion),
- /*ErrorRecovery*/ false);
+ if (Corrected) {
+ // If the correction is going to suggest an implicitly defined function,
+ // skip the correction as not being a particularly good idea.
+ bool Diagnose = true;
+ if (const auto *D = Corrected.getCorrectionDecl())
+ Diagnose = !D->isImplicit();
+ if (Diagnose)
+ diagnoseTypo(Corrected, PDiag(diag::note_function_suggestion),
+ /*ErrorRecovery*/ false);
+ }
// If we found a prior declaration of this function, don't bother building
// another one. We've already pushed that one into scope, so there's nothing
@@ -15112,7 +15377,7 @@ NamedDecl *Sema::ImplicitlyDefineFunction(SourceLocation Loc,
(void)Error; // Silence warning.
assert(!Error && "Error setting up implicit decl!");
SourceLocation NoLoc;
- Declarator D(DS, DeclaratorContext::Block);
+ Declarator D(DS, ParsedAttributesView::none(), DeclaratorContext::Block);
D.AddTypeInfo(DeclaratorChunk::getFunction(/*HasProto=*/false,
/*IsAmbiguous=*/false,
/*LParenLoc=*/NoLoc,
@@ -15197,7 +15462,7 @@ void Sema::AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(
// (3.1) If the allocation function takes an argument of type
// std​::​align_­val_­t, the storage will have the alignment
// specified by the value of this argument.
- if (AlignmentParam.hasValue() && !FD->hasAttr<AllocAlignAttr>()) {
+ if (AlignmentParam && !FD->hasAttr<AllocAlignAttr>()) {
FD->addAttr(AllocAlignAttr::CreateImplicit(
Context, ParamIdx(AlignmentParam.getValue(), FD), FD->getLocation()));
}
@@ -15315,6 +15580,7 @@ void Sema::AddKnownFunctionAttributes(FunctionDecl *FD) {
// Add known guaranteed alignment for allocation functions.
switch (BuiltinID) {
+ case Builtin::BImemalign:
case Builtin::BIaligned_alloc:
if (!FD->hasAttr<AllocAlignAttr>())
FD->addAttr(AllocAlignAttr::CreateImplicit(Context, ParamIdx(1, FD),
@@ -15323,6 +15589,26 @@ void Sema::AddKnownFunctionAttributes(FunctionDecl *FD) {
default:
break;
}
+
+ // Add allocsize attribute for allocation functions.
+ switch (BuiltinID) {
+ case Builtin::BIcalloc:
+ FD->addAttr(AllocSizeAttr::CreateImplicit(
+ Context, ParamIdx(1, FD), ParamIdx(2, FD), FD->getLocation()));
+ break;
+ case Builtin::BImemalign:
+ case Builtin::BIaligned_alloc:
+ case Builtin::BIrealloc:
+ FD->addAttr(AllocSizeAttr::CreateImplicit(Context, ParamIdx(2, FD),
+ ParamIdx(), FD->getLocation()));
+ break;
+ case Builtin::BImalloc:
+ FD->addAttr(AllocSizeAttr::CreateImplicit(Context, ParamIdx(1, FD),
+ ParamIdx(), FD->getLocation()));
+ break;
+ default:
+ break;
+ }
}
AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction(FD);
@@ -16019,9 +16305,20 @@ Decl *Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
// with C structs, unions, and enums when looking for a matching
// tag declaration or definition. See the similar lookup tweak
// in Sema::LookupName; is there a better way to deal with this?
- while (isa<RecordDecl>(SearchDC) || isa<EnumDecl>(SearchDC))
+ while (isa<RecordDecl, EnumDecl, ObjCContainerDecl>(SearchDC))
+ SearchDC = SearchDC->getParent();
+ } else if (getLangOpts().CPlusPlus) {
+ // Inside ObjCContainer want to keep it as a lexical decl context but go
+ // past it (most often to TranslationUnit) to find the semantic decl
+ // context.
+ while (isa<ObjCContainerDecl>(SearchDC))
SearchDC = SearchDC->getParent();
}
+ } else if (getLangOpts().CPlusPlus) {
+ // Don't use ObjCContainerDecl as the semantic decl context for anonymous
+ // TagDecl the same way as we skip it for named TagDecl.
+ while (isa<ObjCContainerDecl>(SearchDC))
+ SearchDC = SearchDC->getParent();
}
if (Previous.isSingleResult() &&
@@ -16671,8 +16968,7 @@ void Sema::ActOnTagStartDefinition(Scope *S, Decl *TagD) {
AddPushedVisibilityAttribute(Tag);
}
-bool Sema::ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
- SkipBodyInfo &SkipBody) {
+bool Sema::ActOnDuplicateDefinition(Decl *Prev, SkipBodyInfo &SkipBody) {
if (!hasStructuralCompatLayout(Prev, SkipBody.New))
return false;
@@ -16681,14 +16977,10 @@ bool Sema::ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
return true;
}
-Decl *Sema::ActOnObjCContainerStartDefinition(Decl *IDecl) {
- assert(isa<ObjCContainerDecl>(IDecl) &&
- "ActOnObjCContainerStartDefinition - Not ObjCContainerDecl");
- DeclContext *OCD = cast<DeclContext>(IDecl);
- assert(OCD->getLexicalParent() == CurContext &&
+void Sema::ActOnObjCContainerStartDefinition(ObjCContainerDecl *IDecl) {
+ assert(IDecl->getLexicalParent() == CurContext &&
"The next DeclContext should be lexically contained in the current one.");
- CurContext = OCD;
- return IDecl;
+ CurContext = IDecl;
}
void Sema::ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagD,
@@ -16796,14 +17088,14 @@ void Sema::ActOnObjCContainerFinishDefinition() {
PopDeclContext();
}
-void Sema::ActOnObjCTemporaryExitContainerContext(DeclContext *DC) {
- assert(DC == CurContext && "Mismatch of container contexts");
- OriginalLexicalContext = DC;
+void Sema::ActOnObjCTemporaryExitContainerContext(ObjCContainerDecl *ObjCCtx) {
+ assert(ObjCCtx == CurContext && "Mismatch of container contexts");
+ OriginalLexicalContext = ObjCCtx;
ActOnObjCContainerFinishDefinition();
}
-void Sema::ActOnObjCReenterContainerContext(DeclContext *DC) {
- ActOnObjCContainerStartDefinition(cast<Decl>(DC));
+void Sema::ActOnObjCReenterContainerContext(ObjCContainerDecl *ObjCCtx) {
+ ActOnObjCContainerStartDefinition(ObjCCtx);
OriginalLexicalContext = nullptr;
}
@@ -16827,17 +17119,12 @@ void Sema::ActOnTagDefinitionError(Scope *S, Decl *TagD) {
// Note that FieldName may be null for anonymous bitfields.
ExprResult Sema::VerifyBitField(SourceLocation FieldLoc,
- IdentifierInfo *FieldName,
- QualType FieldTy, bool IsMsStruct,
- Expr *BitWidth, bool *ZeroWidth) {
+ IdentifierInfo *FieldName, QualType FieldTy,
+ bool IsMsStruct, Expr *BitWidth) {
assert(BitWidth);
if (BitWidth->containsErrors())
return ExprError();
- // Default to true; that shouldn't confuse checks for emptiness
- if (ZeroWidth)
- *ZeroWidth = true;
-
// C99 6.7.2.1p4 - verify the field type.
// C++ 9.6p3: A bit-field shall have integral or enumeration type.
if (!FieldTy->isDependentType() && !FieldTy->isIntegralOrEnumerationType()) {
@@ -16865,9 +17152,6 @@ ExprResult Sema::VerifyBitField(SourceLocation FieldLoc,
return ICE;
BitWidth = ICE.get();
- if (Value != 0 && ZeroWidth)
- *ZeroWidth = false;
-
// Zero-width bitfield is ok for anonymous field.
if (Value == 0 && FieldName)
return Diag(FieldLoc, diag::err_bitfield_has_zero_width) << FieldName;
@@ -17120,17 +17404,15 @@ FieldDecl *Sema::CheckFieldDecl(DeclarationName Name, QualType T,
AbstractFieldType))
InvalidDecl = true;
- bool ZeroWidth = false;
if (InvalidDecl)
BitWidth = nullptr;
// If this is declared as a bit-field, check the bit-field.
if (BitWidth) {
- BitWidth = VerifyBitField(Loc, II, T, Record->isMsStruct(Context), BitWidth,
- &ZeroWidth).get();
+ BitWidth =
+ VerifyBitField(Loc, II, T, Record->isMsStruct(Context), BitWidth).get();
if (!BitWidth) {
InvalidDecl = true;
BitWidth = nullptr;
- ZeroWidth = false;
}
}
@@ -17452,6 +17734,75 @@ void Sema::ActOnLastBitfield(SourceLocation DeclLoc,
AllIvarDecls.push_back(Ivar);
}
+namespace {
+/// [class.dtor]p4:
+/// At the end of the definition of a class, overload resolution is
+/// performed among the prospective destructors declared in that class with
+/// an empty argument list to select the destructor for the class, also
+/// known as the selected destructor.
+///
+/// We do the overload resolution here, then mark the selected constructor in the AST.
+/// Later CXXRecordDecl::getDestructor() will return the selected constructor.
+void ComputeSelectedDestructor(Sema &S, CXXRecordDecl *Record) {
+ if (!Record->hasUserDeclaredDestructor()) {
+ return;
+ }
+
+ SourceLocation Loc = Record->getLocation();
+ OverloadCandidateSet OCS(Loc, OverloadCandidateSet::CSK_Normal);
+
+ for (auto *Decl : Record->decls()) {
+ if (auto *DD = dyn_cast<CXXDestructorDecl>(Decl)) {
+ if (DD->isInvalidDecl())
+ continue;
+ S.AddOverloadCandidate(DD, DeclAccessPair::make(DD, DD->getAccess()), {},
+ OCS);
+ assert(DD->isIneligibleOrNotSelected() && "Selecting a destructor but a destructor was already selected.");
+ }
+ }
+
+ if (OCS.empty()) {
+ return;
+ }
+ OverloadCandidateSet::iterator Best;
+ unsigned Msg = 0;
+ OverloadCandidateDisplayKind DisplayKind;
+
+ switch (OCS.BestViableFunction(S, Loc, Best)) {
+ case OR_Success:
+ case OR_Deleted:
+ Record->addedSelectedDestructor(dyn_cast<CXXDestructorDecl>(Best->Function));
+ break;
+
+ case OR_Ambiguous:
+ Msg = diag::err_ambiguous_destructor;
+ DisplayKind = OCD_AmbiguousCandidates;
+ break;
+
+ case OR_No_Viable_Function:
+ Msg = diag::err_no_viable_destructor;
+ DisplayKind = OCD_AllCandidates;
+ break;
+ }
+
+ if (Msg) {
+ // OpenCL have got their own thing going with destructors. It's slightly broken,
+ // but we allow it.
+ if (!S.LangOpts.OpenCL) {
+ PartialDiagnostic Diag = S.PDiag(Msg) << Record;
+ OCS.NoteCandidates(PartialDiagnosticAt(Loc, Diag), S, DisplayKind, {});
+ Record->setInvalidDecl();
+ }
+ // It's a bit hacky: At this point we've raised an error but we want the
+ // rest of the compiler to continue somehow working. However almost
+ // everything we'll try to do with the class will depend on there being a
+ // destructor. So let's pretend the first one is selected and hope for the
+ // best.
+ Record->addedSelectedDestructor(dyn_cast<CXXDestructorDecl>(OCS.begin()->Function));
+ }
+}
+} // namespace
+
void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac,
@@ -17478,6 +17829,9 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
RecordDecl *Record = dyn_cast<RecordDecl>(EnclosingDecl);
CXXRecordDecl *CXXRecord = dyn_cast<CXXRecordDecl>(EnclosingDecl);
+ if (CXXRecord && !CXXRecord->isDependentType())
+ ComputeSelectedDestructor(*this, CXXRecord);
+
// Start counting up the number of named members; make sure to include
// members of anonymous structs and unions in the total.
unsigned NumNamedMembers = 0;
@@ -17772,6 +18126,33 @@ void Sema::ActOnFields(Scope *S, SourceLocation RecLoc, Decl *EnclosingDecl,
// Handle attributes before checking the layout.
ProcessDeclAttributeList(S, Record, Attrs);
+ // Check to see if a FieldDecl is a pointer to a function.
+ auto IsFunctionPointer = [&](const Decl *D) {
+ const FieldDecl *FD = dyn_cast<FieldDecl>(D);
+ if (!FD)
+ return false;
+ QualType FieldType = FD->getType().getDesugaredType(Context);
+ if (isa<PointerType>(FieldType)) {
+ QualType PointeeType = cast<PointerType>(FieldType)->getPointeeType();
+ return PointeeType.getDesugaredType(Context)->isFunctionType();
+ }
+ return false;
+ };
+
+ // Maybe randomize the record's decls. We automatically randomize a record
+ // of function pointers, unless it has the "no_randomize_layout" attribute.
+ if (!getLangOpts().CPlusPlus &&
+ (Record->hasAttr<RandomizeLayoutAttr>() ||
+ (!Record->hasAttr<NoRandomizeLayoutAttr>() &&
+ llvm::all_of(Record->decls(), IsFunctionPointer))) &&
+ !Record->isUnion() && !getLangOpts().RandstructSeed.empty() &&
+ !Record->isRandomized()) {
+ SmallVector<Decl *, 32> NewDeclOrdering;
+ if (randstruct::randomizeStructureLayout(Context, Record,
+ NewDeclOrdering))
+ Record->reorderDecls(NewDeclOrdering);
+ }
+
// We may have deferred checking for a deleted destructor. Check now.
if (CXXRecord) {
auto *Dtor = CXXRecord->getDestructor();
@@ -18390,7 +18771,7 @@ bool Sema::IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
const auto &EVal = E->getInitVal();
// Only single-bit enumerators introduce new flag values.
if (EVal.isPowerOf2())
- FlagBits = FlagBits.zextOrSelf(EVal.getBitWidth()) | EVal;
+ FlagBits = FlagBits.zext(EVal.getBitWidth()) | EVal;
}
}
@@ -18439,9 +18820,6 @@ void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
unsigned NumNegativeBits = 0;
unsigned NumPositiveBits = 0;
- // Keep track of whether all elements have type int.
- bool AllElementsInt = true;
-
for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
EnumConstantDecl *ECD =
cast_or_null<EnumConstantDecl>(Elements[i]);
@@ -18456,10 +18834,6 @@ void Sema::ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
else
NumNegativeBits = std::max(NumNegativeBits,
(unsigned)InitVal.getMinSignedBits());
-
- // Keep track of whether every enum element has type int (very common).
- if (AllElementsInt)
- AllElementsInt = ECD->getType() == Context.IntTy;
}
// Figure out the type that should be used for this enum.
@@ -18687,9 +19061,7 @@ void Sema::ActOnPragmaWeakID(IdentifierInfo* Name,
if (PrevDecl) {
PrevDecl->addAttr(WeakAttr::CreateImplicit(Context, PragmaLoc, AttributeCommonInfo::AS_Pragma));
} else {
- (void)WeakUndeclaredIdentifiers.insert(
- std::pair<IdentifierInfo*,WeakInfo>
- (Name, WeakInfo((IdentifierInfo*)nullptr, NameLoc)));
+ (void)WeakUndeclaredIdentifiers[Name].insert(WeakInfo(nullptr, NameLoc));
}
}
@@ -18707,12 +19079,11 @@ void Sema::ActOnPragmaWeakAlias(IdentifierInfo* Name,
if (NamedDecl *ND = dyn_cast<NamedDecl>(PrevDecl))
DeclApplyPragmaWeak(TUScope, ND, W);
} else {
- (void)WeakUndeclaredIdentifiers.insert(
- std::pair<IdentifierInfo*,WeakInfo>(AliasName, W));
+ (void)WeakUndeclaredIdentifiers[AliasName].insert(W);
}
}
-Decl *Sema::getObjCDeclContext() const {
+ObjCContainerDecl *Sema::getObjCDeclContext() const {
return (dyn_cast_or_null<ObjCContainerDecl>(CurContext));
}
@@ -18749,12 +19120,12 @@ Sema::FunctionEmissionStatus Sema::getEmissionStatus(FunctionDecl *FD,
// #pragma omp declare target to(*) device_type(*).
// Therefore DevTy having no value does not imply host. The emission status
// will be checked again at the end of compilation unit with Final = true.
- if (DevTy.hasValue())
+ if (DevTy)
if (*DevTy == OMPDeclareTargetDeclAttr::DT_Host)
return FunctionEmissionStatus::OMPDiscarded;
// If we have an explicit value for the device type, or we are in a target
// declare context, we need to emit all extern and used symbols.
- if (isInOpenMPDeclareTargetContext() || DevTy.hasValue())
+ if (isInOpenMPDeclareTargetContext() || DevTy)
if (IsEmittedForExternalSymbol())
return FunctionEmissionStatus::Emitted;
// Device mode only emits what it must, if it wasn't tagged yet and needed,
@@ -18767,7 +19138,7 @@ Sema::FunctionEmissionStatus Sema::getEmissionStatus(FunctionDecl *FD,
// be ommitted.
Optional<OMPDeclareTargetDeclAttr::DevTypeTy> DevTy =
OMPDeclareTargetDeclAttr::getDeviceType(FD->getCanonicalDecl());
- if (DevTy.hasValue())
+ if (DevTy)
if (*DevTy == OMPDeclareTargetDeclAttr::DT_NoHost)
return FunctionEmissionStatus::OMPDiscarded;
}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaDeclAttr.cpp b/contrib/llvm-project/clang/lib/Sema/SemaDeclAttr.cpp
index e76e7c608e0c..f79523983ed8 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaDeclAttr.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaDeclAttr.cpp
@@ -24,6 +24,7 @@
#include "clang/AST/Type.h"
#include "clang/Basic/CharInfo.h"
#include "clang/Basic/DarwinSDKInfo.h"
+#include "clang/Basic/LangOptions.h"
#include "clang/Basic/SourceLocation.h"
#include "clang/Basic/SourceManager.h"
#include "clang/Basic/TargetBuiltins.h"
@@ -334,6 +335,26 @@ static bool checkFunctionOrMethodParameterIndex(
return true;
}
+/// Check if the argument \p E is a ASCII string literal. If not emit an error
+/// and return false, otherwise set \p Str to the value of the string literal
+/// and return true.
+bool Sema::checkStringLiteralArgumentAttr(const AttributeCommonInfo &CI,
+ const Expr *E, StringRef &Str,
+ SourceLocation *ArgLocation) {
+ const auto *Literal = dyn_cast<StringLiteral>(E->IgnoreParenCasts());
+ if (ArgLocation)
+ *ArgLocation = E->getBeginLoc();
+
+ if (!Literal || !Literal->isOrdinary()) {
+ Diag(E->getBeginLoc(), diag::err_attribute_argument_type)
+ << CI << AANT_ArgumentString;
+ return false;
+ }
+
+ Str = Literal->getString();
+ return true;
+}
+
/// Check if the argument \p ArgNum of \p Attr is a ASCII string literal.
/// If not emit an error and return false. If the argument is an identifier it
/// will emit an error with a fixit hint and treat it as if it was a string
@@ -356,18 +377,7 @@ bool Sema::checkStringLiteralArgumentAttr(const ParsedAttr &AL, unsigned ArgNum,
// Now check for an actual string literal.
Expr *ArgExpr = AL.getArgAsExpr(ArgNum);
- const auto *Literal = dyn_cast<StringLiteral>(ArgExpr->IgnoreParenCasts());
- if (ArgLocation)
- *ArgLocation = ArgExpr->getBeginLoc();
-
- if (!Literal || !Literal->isAscii()) {
- Diag(ArgExpr->getBeginLoc(), diag::err_attribute_argument_type)
- << AL << AANT_ArgumentString;
- return false;
- }
-
- Str = Literal->getString();
- return true;
+ return checkStringLiteralArgumentAttr(AL, ArgExpr, Str, ArgLocation);
}
/// Applies the given attribute to the Decl without performing any
@@ -610,7 +620,7 @@ static void checkAttrArgsAreCapabilityObjs(Sema &S, Decl *D,
if (const auto *StrLit = dyn_cast<StringLiteral>(ArgExp)) {
if (StrLit->getLength() == 0 ||
- (StrLit->isAscii() && StrLit->getString() == StringRef("*"))) {
+ (StrLit->isOrdinary() && StrLit->getString() == StringRef("*"))) {
// Pass empty strings to the analyzer without warnings.
// Treat "*" as the universal lock.
Args.push_back(ArgExp);
@@ -1394,9 +1404,9 @@ static void handlePackedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
FD->isBitField() &&
S.Context.getTypeAlign(FD->getType()) <= 8);
- if (S.getASTContext().getTargetInfo().getTriple().isPS4()) {
+ if (S.getASTContext().getTargetInfo().getTriple().isPS()) {
if (BitfieldByteAligned)
- // The PS4 target needs to maintain ABI backwards compatibility.
+ // The PS4/PS5 targets need to maintain ABI backwards compatibility.
S.Diag(AL.getLoc(), diag::warn_attribute_ignored_for_field_of_type)
<< AL << FD->getType();
else
@@ -2149,6 +2159,14 @@ static void handleNakedAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
<< AL << Triple.getArchName();
return;
}
+
+ // This form is not allowed to be written on a member function (static or
+ // nonstatic) when in Microsoft compatibility mode.
+ if (S.getLangOpts().MSVCCompat && isa<CXXMethodDecl>(D)) {
+ S.Diag(AL.getLoc(), diag::err_attribute_wrong_decl_type_str)
+ << AL << "non-member functions";
+ return;
+ }
}
D->addAttr(::new (S.Context) NakedAttr(S.Context, AL));
@@ -2166,6 +2184,21 @@ static void handleNoReturnAttr(Sema &S, Decl *D, const ParsedAttr &Attrs) {
D->addAttr(::new (S.Context) NoReturnAttr(S.Context, Attrs));
}
+static void handleStandardNoReturnAttr(Sema &S, Decl *D, const ParsedAttr &A) {
+ // The [[_Noreturn]] spelling is deprecated in C2x, so if that was used,
+ // issue an appropriate diagnostic. However, don't issue a diagnostic if the
+ // attribute name comes from a macro expansion. We don't want to punish users
+ // who write [[noreturn]] after including <stdnoreturn.h> (where 'noreturn'
+ // is defined as a macro which expands to '_Noreturn').
+ if (!S.getLangOpts().CPlusPlus &&
+ A.getSemanticSpelling() == CXX11NoReturnAttr::C2x_Noreturn &&
+ !(A.getLoc().isMacroID() &&
+ S.getSourceManager().isInSystemMacro(A.getLoc())))
+ S.Diag(A.getLoc(), diag::warn_deprecated_noreturn_spelling) << A.getRange();
+
+ D->addAttr(::new (S.Context) CXX11NoReturnAttr(S.Context, A));
+}
+
static void handleNoCfCheckAttr(Sema &S, Decl *D, const ParsedAttr &Attrs) {
if (!S.getLangOpts().CFProtectionBranch)
S.Diag(Attrs.getLoc(), diag::warn_nocf_check_attribute_ignored);
@@ -2647,8 +2680,8 @@ static void handleAvailabilityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
auto Major = Version.getMajor();
auto NewMajor = Major >= 9 ? Major - 7 : 0;
if (NewMajor >= 2) {
- if (Version.getMinor().hasValue()) {
- if (Version.getSubminor().hasValue())
+ if (Version.getMinor()) {
+ if (Version.getSubminor())
return VersionTuple(NewMajor, Version.getMinor().getValue(),
Version.getSubminor().getValue());
else
@@ -2696,7 +2729,7 @@ static void handleAvailabilityAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (IOSToTvOSMapping) {
if (auto MappedVersion =
IOSToTvOSMapping->map(Version, VersionTuple(0, 0), None)) {
- return MappedVersion.getValue();
+ return *MappedVersion;
}
}
return Version;
@@ -3123,6 +3156,14 @@ static void handleWarnUnusedResult(Sema &S, Decl *D, const ParsedAttr &AL) {
S.Diag(AL.getLoc(), diag::ext_cxx17_attr) << AL;
}
+ if ((!AL.isGNUAttribute() &&
+ !(AL.isStandardAttributeSyntax() && AL.isClangScope())) &&
+ isa<TypedefNameDecl>(D)) {
+ S.Diag(AL.getLoc(), diag::warn_unused_result_typedef_unsupported_spelling)
+ << AL.isGNUScope();
+ return;
+ }
+
D->addAttr(::new (S.Context) WarnUnusedResultAttr(S.Context, AL, Str));
}
@@ -3457,7 +3498,7 @@ bool Sema::checkTargetClonesAttrString(SourceLocation LiteralLoc, StringRef Str,
return Diag(CurLoc, diag::warn_unsupported_target_attribute)
<< Unsupported << None << Cur << TargetClones;
- if (llvm::find(Strings, Cur) != Strings.end() || DefaultIsDupe)
+ if (llvm::is_contained(Strings, Cur) || DefaultIsDupe)
Diag(CurLoc, diag::warn_target_clone_duplicate_options);
// Note: Add even if there are duplicates, since it changes name mangling.
Strings.push_back(Cur);
@@ -3628,8 +3669,7 @@ static void handleFormatArgAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
(!Ty->isPointerType() ||
!Ty->castAs<PointerType>()->getPointeeType()->isCharType())) {
S.Diag(AL.getLoc(), diag::err_format_attribute_not)
- << "a string type" << IdxExpr->getSourceRange()
- << getFunctionOrMethodParamRange(D, 0);
+ << IdxExpr->getSourceRange() << getFunctionOrMethodParamRange(D, 0);
return;
}
Ty = getFunctionOrMethodResultType(D);
@@ -3829,27 +3869,12 @@ static void handleFormatAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// make sure the format string is really a string
QualType Ty = getFunctionOrMethodParamType(D, ArgIdx);
- if (Kind == CFStringFormat) {
- if (!isCFStringType(Ty, S.Context)) {
- S.Diag(AL.getLoc(), diag::err_format_attribute_not)
- << "a CFString" << IdxExpr->getSourceRange()
- << getFunctionOrMethodParamRange(D, ArgIdx);
- return;
- }
- } else if (Kind == NSStringFormat) {
- // FIXME: do we need to check if the type is NSString*? What are the
- // semantics?
- if (!isNSStringType(Ty, S.Context, /*AllowNSAttributedString=*/true)) {
- S.Diag(AL.getLoc(), diag::err_format_attribute_not)
- << "an NSString" << IdxExpr->getSourceRange()
- << getFunctionOrMethodParamRange(D, ArgIdx);
- return;
- }
- } else if (!Ty->isPointerType() ||
- !Ty->castAs<PointerType>()->getPointeeType()->isCharType()) {
+ if (!isNSStringType(Ty, S.Context, true) &&
+ !isCFStringType(Ty, S.Context) &&
+ (!Ty->isPointerType() ||
+ !Ty->castAs<PointerType>()->getPointeeType()->isCharType())) {
S.Diag(AL.getLoc(), diag::err_format_attribute_not)
- << "a string type" << IdxExpr->getSourceRange()
- << getFunctionOrMethodParamRange(D, ArgIdx);
+ << IdxExpr->getSourceRange() << getFunctionOrMethodParamRange(D, ArgIdx);
return;
}
@@ -4122,48 +4147,10 @@ static void handleTransparentUnionAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
void Sema::AddAnnotationAttr(Decl *D, const AttributeCommonInfo &CI,
StringRef Str, MutableArrayRef<Expr *> Args) {
auto *Attr = AnnotateAttr::Create(Context, Str, Args.data(), Args.size(), CI);
- llvm::SmallVector<PartialDiagnosticAt, 8> Notes;
- for (unsigned Idx = 0; Idx < Attr->args_size(); Idx++) {
- Expr *&E = Attr->args_begin()[Idx];
- assert(E && "error are handled before");
- if (E->isValueDependent() || E->isTypeDependent())
- continue;
-
- if (E->getType()->isArrayType())
- E = ImpCastExprToType(E, Context.getPointerType(E->getType()),
- clang::CK_ArrayToPointerDecay)
- .get();
- if (E->getType()->isFunctionType())
- E = ImplicitCastExpr::Create(Context,
- Context.getPointerType(E->getType()),
- clang::CK_FunctionToPointerDecay, E, nullptr,
- VK_PRValue, FPOptionsOverride());
- if (E->isLValue())
- E = ImplicitCastExpr::Create(Context, E->getType().getNonReferenceType(),
- clang::CK_LValueToRValue, E, nullptr,
- VK_PRValue, FPOptionsOverride());
-
- Expr::EvalResult Eval;
- Notes.clear();
- Eval.Diag = &Notes;
-
- bool Result =
- E->EvaluateAsConstantExpr(Eval, Context);
-
- /// Result means the expression can be folded to a constant.
- /// Note.empty() means the expression is a valid constant expression in the
- /// current language mode.
- if (!Result || !Notes.empty()) {
- Diag(E->getBeginLoc(), diag::err_attribute_argument_n_type)
- << CI << (Idx + 1) << AANT_ArgumentConstantExpr;
- for (auto &Note : Notes)
- Diag(Note.first, Note.second);
- return;
- }
- assert(Eval.Val.hasValue());
- E = ConstantExpr::Create(Context, E, Eval.Val);
+ if (ConstantFoldAttrArgs(
+ CI, MutableArrayRef<Expr *>(Attr->args_begin(), Attr->args_end()))) {
+ D->addAttr(Attr);
}
- D->addAttr(Attr);
}
static void handleAnnotateAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
@@ -4266,6 +4253,9 @@ void Sema::AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
// declared with the register storage class specifier. An
// alignment-specifier may also be applied to the declaration of a class
// or enumeration type.
+ // CWG 2354:
+ // CWG agreed to remove permission for alignas to be applied to
+ // enumerations.
// C11 6.7.5/2:
// An alignment attribute shall not be specified in a declaration of
// a typedef, or a bit-field, or a function, or a parameter, or an
@@ -4281,6 +4271,9 @@ void Sema::AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E,
} else if (const auto *FD = dyn_cast<FieldDecl>(D)) {
if (FD->isBitField())
DiagKind = 3;
+ } else if (const auto *ED = dyn_cast<EnumDecl>(D)) {
+ if (ED->getLangOpts().CPlusPlus)
+ DiagKind = 4;
} else if (!isa<TagDecl>(D)) {
Diag(AttrLoc, diag::err_attribute_wrong_decl_type) << &TmpAttr
<< (TmpAttr.isC11() ? ExpectedVariableOrField
@@ -4985,6 +4978,12 @@ static void handleCallConvAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
case ParsedAttr::AT_AArch64VectorPcs:
D->addAttr(::new (S.Context) AArch64VectorPcsAttr(S.Context, AL));
return;
+ case ParsedAttr::AT_AArch64SVEPcs:
+ D->addAttr(::new (S.Context) AArch64SVEPcsAttr(S.Context, AL));
+ return;
+ case ParsedAttr::AT_AMDGPUKernelCall:
+ D->addAttr(::new (S.Context) AMDGPUKernelCallAttr(S.Context, AL));
+ return;
case ParsedAttr::AT_IntelOclBicc:
D->addAttr(::new (S.Context) IntelOclBiccAttr(S.Context, AL));
return;
@@ -5082,6 +5081,21 @@ static void handleLifetimeCategoryAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
}
}
+static void handleRandomizeLayoutAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ if (checkAttrMutualExclusion<NoRandomizeLayoutAttr>(S, D, AL))
+ return;
+ if (!D->hasAttr<RandomizeLayoutAttr>())
+ D->addAttr(::new (S.Context) RandomizeLayoutAttr(S.Context, AL));
+}
+
+static void handleNoRandomizeLayoutAttr(Sema &S, Decl *D,
+ const ParsedAttr &AL) {
+ if (checkAttrMutualExclusion<RandomizeLayoutAttr>(S, D, AL))
+ return;
+ if (!D->hasAttr<NoRandomizeLayoutAttr>())
+ D->addAttr(::new (S.Context) NoRandomizeLayoutAttr(S.Context, AL));
+}
+
bool Sema::CheckCallingConvAttr(const ParsedAttr &Attrs, CallingConv &CC,
const FunctionDecl *FD) {
if (Attrs.isInvalid())
@@ -5127,6 +5141,12 @@ bool Sema::CheckCallingConvAttr(const ParsedAttr &Attrs, CallingConv &CC,
case ParsedAttr::AT_AArch64VectorPcs:
CC = CC_AArch64VectorCall;
break;
+ case ParsedAttr::AT_AArch64SVEPcs:
+ CC = CC_AArch64SVEPCS;
+ break;
+ case ParsedAttr::AT_AMDGPUKernelCall:
+ CC = CC_AMDGPUKernelCall;
+ break;
case ParsedAttr::AT_RegCall:
CC = CC_X86RegCall;
break;
@@ -5609,11 +5629,12 @@ static void handleBuiltinAliasAttr(Sema &S, Decl *D,
bool IsAArch64 = S.Context.getTargetInfo().getTriple().isAArch64();
bool IsARM = S.Context.getTargetInfo().getTriple().isARM();
bool IsRISCV = S.Context.getTargetInfo().getTriple().isRISCV();
+ bool IsHLSL = S.Context.getLangOpts().HLSL;
if ((IsAArch64 && !ArmSveAliasValid(S.Context, BuiltinID, AliasName)) ||
(IsARM && !ArmMveAliasValid(BuiltinID, AliasName) &&
!ArmCdeAliasValid(BuiltinID, AliasName)) ||
(IsRISCV && !RISCVAliasValid(BuiltinID, AliasName)) ||
- (!IsAArch64 && !IsARM && !IsRISCV)) {
+ (!IsAArch64 && !IsARM && !IsRISCV && !IsHLSL)) {
S.Diag(AL.getLoc(), diag::err_attribute_builtin_alias) << AL;
return;
}
@@ -6798,6 +6819,127 @@ static void handleUuidAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
D->addAttr(UA);
}
+static void handleHLSLNumThreadsAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ using llvm::Triple;
+ Triple Target = S.Context.getTargetInfo().getTriple();
+ if (!llvm::is_contained({Triple::Compute, Triple::Mesh, Triple::Amplification,
+ Triple::Library},
+ Target.getEnvironment())) {
+ uint32_t Pipeline =
+ (uint32_t)S.Context.getTargetInfo().getTriple().getEnvironment() -
+ (uint32_t)llvm::Triple::Pixel;
+ S.Diag(AL.getLoc(), diag::err_hlsl_attr_unsupported_in_stage)
+ << AL << Pipeline << "Compute, Amplification, Mesh or Library";
+ return;
+ }
+
+ llvm::VersionTuple SMVersion = Target.getOSVersion();
+ uint32_t ZMax = 1024;
+ uint32_t ThreadMax = 1024;
+ if (SMVersion.getMajor() <= 4) {
+ ZMax = 1;
+ ThreadMax = 768;
+ } else if (SMVersion.getMajor() == 5) {
+ ZMax = 64;
+ ThreadMax = 1024;
+ }
+
+ uint32_t X;
+ if (!checkUInt32Argument(S, AL, AL.getArgAsExpr(0), X))
+ return;
+ if (X > 1024) {
+ S.Diag(AL.getArgAsExpr(0)->getExprLoc(),
+ diag::err_hlsl_numthreads_argument_oor) << 0 << 1024;
+ return;
+ }
+ uint32_t Y;
+ if (!checkUInt32Argument(S, AL, AL.getArgAsExpr(1), Y))
+ return;
+ if (Y > 1024) {
+ S.Diag(AL.getArgAsExpr(1)->getExprLoc(),
+ diag::err_hlsl_numthreads_argument_oor) << 1 << 1024;
+ return;
+ }
+ uint32_t Z;
+ if (!checkUInt32Argument(S, AL, AL.getArgAsExpr(2), Z))
+ return;
+ if (Z > ZMax) {
+ S.Diag(AL.getArgAsExpr(2)->getExprLoc(),
+ diag::err_hlsl_numthreads_argument_oor) << 2 << ZMax;
+ return;
+ }
+
+ if (X * Y * Z > ThreadMax) {
+ S.Diag(AL.getLoc(), diag::err_hlsl_numthreads_invalid) << ThreadMax;
+ return;
+ }
+
+ HLSLNumThreadsAttr *NewAttr = S.mergeHLSLNumThreadsAttr(D, AL, X, Y, Z);
+ if (NewAttr)
+ D->addAttr(NewAttr);
+}
+
+HLSLNumThreadsAttr *Sema::mergeHLSLNumThreadsAttr(Decl *D,
+ const AttributeCommonInfo &AL,
+ int X, int Y, int Z) {
+ if (HLSLNumThreadsAttr *NT = D->getAttr<HLSLNumThreadsAttr>()) {
+ if (NT->getX() != X || NT->getY() != Y || NT->getZ() != Z) {
+ Diag(NT->getLocation(), diag::err_hlsl_attribute_param_mismatch) << AL;
+ Diag(AL.getLoc(), diag::note_conflicting_attribute);
+ }
+ return nullptr;
+ }
+ return ::new (Context) HLSLNumThreadsAttr(Context, AL, X, Y, Z);
+}
+
+static void handleHLSLSVGroupIndexAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ using llvm::Triple;
+ Triple Target = S.Context.getTargetInfo().getTriple();
+ if (Target.getEnvironment() != Triple::Compute) {
+ uint32_t Pipeline =
+ (uint32_t)S.Context.getTargetInfo().getTriple().getEnvironment() -
+ (uint32_t)llvm::Triple::Pixel;
+ S.Diag(AL.getLoc(), diag::err_hlsl_attr_unsupported_in_stage)
+ << AL << Pipeline << "Compute";
+ return;
+ }
+
+ D->addAttr(::new (S.Context) HLSLSV_GroupIndexAttr(S.Context, AL));
+}
+
+static void handleHLSLShaderAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ StringRef Str;
+ SourceLocation ArgLoc;
+ if (!S.checkStringLiteralArgumentAttr(AL, 0, Str, &ArgLoc))
+ return;
+
+ HLSLShaderAttr::ShaderType ShaderType;
+ if (!HLSLShaderAttr::ConvertStrToShaderType(Str, ShaderType)) {
+ S.Diag(AL.getLoc(), diag::warn_attribute_type_not_supported)
+ << AL << Str << ArgLoc;
+ return;
+ }
+
+ // FIXME: check function match the shader stage.
+
+ HLSLShaderAttr *NewAttr = S.mergeHLSLShaderAttr(D, AL, ShaderType);
+ if (NewAttr)
+ D->addAttr(NewAttr);
+}
+
+HLSLShaderAttr *
+Sema::mergeHLSLShaderAttr(Decl *D, const AttributeCommonInfo &AL,
+ HLSLShaderAttr::ShaderType ShaderType) {
+ if (HLSLShaderAttr *NT = D->getAttr<HLSLShaderAttr>()) {
+ if (NT->getType() != ShaderType) {
+ Diag(NT->getLocation(), diag::err_hlsl_attribute_param_mismatch) << AL;
+ Diag(AL.getLoc(), diag::note_conflicting_attribute);
+ }
+ return nullptr;
+ }
+ return HLSLShaderAttr::Create(Context, ShaderType, AL);
+}
+
static void handleMSInheritanceAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!S.LangOpts.CPlusPlus) {
S.Diag(AL.getLoc(), diag::err_attribute_not_supported_in_lang)
@@ -7718,6 +7860,11 @@ static bool isGlobalVar(const Decl *D) {
return false;
}
+static bool isSanitizerAttributeAllowedOnGlobals(StringRef Sanitizer) {
+ return Sanitizer == "address" || Sanitizer == "hwaddress" ||
+ Sanitizer == "memtag";
+}
+
static void handleNoSanitizeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
if (!AL.checkAtLeastNumArgs(S, 1))
return;
@@ -7735,7 +7882,7 @@ static void handleNoSanitizeAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
SanitizerMask() &&
SanitizerName != "coverage")
S.Diag(LiteralLoc, diag::warn_unknown_sanitizer_ignored) << SanitizerName;
- else if (isGlobalVar(D) && SanitizerName != "address")
+ else if (isGlobalVar(D) && !isSanitizerAttributeAllowedOnGlobals(SanitizerName))
S.Diag(D->getLocation(), diag::err_attribute_wrong_decl_type)
<< AL << ExpectedFunctionOrMethod;
Sanitizers.push_back(SanitizerName);
@@ -7837,6 +7984,24 @@ static void handleOpenCLAccessAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
D->addAttr(::new (S.Context) OpenCLAccessAttr(S.Context, AL));
}
+static void handleZeroCallUsedRegsAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
+ // Check that the argument is a string literal.
+ StringRef KindStr;
+ SourceLocation LiteralLoc;
+ if (!S.checkStringLiteralArgumentAttr(AL, 0, KindStr, &LiteralLoc))
+ return;
+
+ ZeroCallUsedRegsAttr::ZeroCallUsedRegsKind Kind;
+ if (!ZeroCallUsedRegsAttr::ConvertStrToZeroCallUsedRegsKind(KindStr, Kind)) {
+ S.Diag(LiteralLoc, diag::warn_attribute_type_not_supported)
+ << AL << KindStr;
+ return;
+ }
+
+ D->dropAttr<ZeroCallUsedRegsAttr>();
+ D->addAttr(ZeroCallUsedRegsAttr::Create(S.Context, Kind, AL));
+}
+
static void handleSYCLKernelAttr(Sema &S, Decl *D, const ParsedAttr &AL) {
// The 'sycl_kernel' attribute applies only to function templates.
const auto *FD = cast<FunctionDecl>(D);
@@ -8123,18 +8288,55 @@ EnforceTCBLeafAttr *Sema::mergeEnforceTCBLeafAttr(
// Top Level Sema Entry Points
//===----------------------------------------------------------------------===//
+// Returns true if the attribute must delay setting its arguments until after
+// template instantiation, and false otherwise.
+static bool MustDelayAttributeArguments(const ParsedAttr &AL) {
+ // Only attributes that accept expression parameter packs can delay arguments.
+ if (!AL.acceptsExprPack())
+ return false;
+
+ bool AttrHasVariadicArg = AL.hasVariadicArg();
+ unsigned AttrNumArgs = AL.getNumArgMembers();
+ for (size_t I = 0; I < std::min(AL.getNumArgs(), AttrNumArgs); ++I) {
+ bool IsLastAttrArg = I == (AttrNumArgs - 1);
+ // If the argument is the last argument and it is variadic it can contain
+ // any expression.
+ if (IsLastAttrArg && AttrHasVariadicArg)
+ return false;
+ Expr *E = AL.getArgAsExpr(I);
+ bool ArgMemberCanHoldExpr = AL.isParamExpr(I);
+ // If the expression is a pack expansion then arguments must be delayed
+ // unless the argument is an expression and it is the last argument of the
+ // attribute.
+ if (isa<PackExpansionExpr>(E))
+ return !(IsLastAttrArg && ArgMemberCanHoldExpr);
+ // Last case is if the expression is value dependent then it must delay
+ // arguments unless the corresponding argument is able to hold the
+ // expression.
+ if (E->isValueDependent() && !ArgMemberCanHoldExpr)
+ return true;
+ }
+ return false;
+}
+
/// ProcessDeclAttribute - Apply the specific attribute to the specified decl if
/// the attribute applies to decls. If the attribute is a type attribute, just
/// silently ignore it if a GNU attribute.
-static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
- const ParsedAttr &AL,
- bool IncludeCXX11Attributes) {
+static void
+ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D, const ParsedAttr &AL,
+ const Sema::ProcessDeclAttributeOptions &Options) {
if (AL.isInvalid() || AL.getKind() == ParsedAttr::IgnoredAttribute)
return;
// Ignore C++11 attributes on declarator chunks: they appertain to the type
// instead.
- if (AL.isCXX11Attribute() && !IncludeCXX11Attributes)
+ // FIXME: We currently check the attribute syntax directly instead of using
+ // isCXX11Attribute(), which currently erroneously classifies the C11
+ // `_Alignas` attribute as a C++11 attribute. `_Alignas` can appear on the
+ // `DeclSpec`, so we need to let it through here to make sure it is processed
+ // appropriately. Once the behavior of isCXX11Attribute() is fixed, we can
+ // go back to using that here.
+ if (AL.getSyntax() == ParsedAttr::AS_CXX11 && !Options.IncludeCXX11Attributes)
return;
// Unknown attributes are automatically warned on. Target-specific attributes
@@ -8150,22 +8352,93 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
return;
}
- if (S.checkCommonAttributeFeatures(D, AL))
+ // Check if argument population must delayed to after template instantiation.
+ bool MustDelayArgs = MustDelayAttributeArguments(AL);
+
+ // Argument number check must be skipped if arguments are delayed.
+ if (S.checkCommonAttributeFeatures(D, AL, MustDelayArgs))
+ return;
+
+ if (MustDelayArgs) {
+ AL.handleAttrWithDelayedArgs(S, D);
return;
+ }
switch (AL.getKind()) {
default:
if (AL.getInfo().handleDeclAttribute(S, D, AL) != ParsedAttrInfo::NotHandled)
break;
if (!AL.isStmtAttr()) {
- // Type attributes are handled elsewhere; silently move on.
assert(AL.isTypeAttr() && "Non-type attribute not handled");
- break;
+ }
+ if (AL.isTypeAttr()) {
+ if (Options.IgnoreTypeAttributes)
+ break;
+ if (!AL.isStandardAttributeSyntax()) {
+ // Non-[[]] type attributes are handled in processTypeAttrs(); silently
+ // move on.
+ break;
+ }
+
+ // According to the C and C++ standards, we should never see a
+ // [[]] type attribute on a declaration. However, we have in the past
+ // allowed some type attributes to "slide" to the `DeclSpec`, so we need
+ // to continue to support this legacy behavior. We only do this, however,
+ // if
+ // - we actually have a `DeclSpec`, i.e. if we're looking at a
+ // `DeclaratorDecl`, or
+ // - we are looking at an alias-declaration, where historically we have
+ // allowed type attributes after the identifier to slide to the type.
+ if (AL.slidesFromDeclToDeclSpecLegacyBehavior() &&
+ isa<DeclaratorDecl, TypeAliasDecl>(D)) {
+ // Suggest moving the attribute to the type instead, but only for our
+ // own vendor attributes; moving other vendors' attributes might hurt
+ // portability.
+ if (AL.isClangScope()) {
+ S.Diag(AL.getLoc(), diag::warn_type_attribute_deprecated_on_decl)
+ << AL << D->getLocation();
+ }
+
+ // Allow this type attribute to be handled in processTypeAttrs();
+ // silently move on.
+ break;
+ }
+
+ if (AL.getKind() == ParsedAttr::AT_Regparm) {
+ // `regparm` is a special case: It's a type attribute but we still want
+ // to treat it as if it had been written on the declaration because that
+ // way we'll be able to handle it directly in `processTypeAttr()`.
+ // If we treated `regparm` it as if it had been written on the
+ // `DeclSpec`, the logic in `distributeFunctionTypeAttrFromDeclSepc()`
+ // would try to move it to the declarator, but that doesn't work: We
+ // can't remove the attribute from the list of declaration attributes
+ // because it might be needed by other declarators in the same
+ // declaration.
+ break;
+ }
+
+ if (AL.getKind() == ParsedAttr::AT_VectorSize) {
+ // `vector_size` is a special case: It's a type attribute semantically,
+ // but GCC expects the [[]] syntax to be written on the declaration (and
+ // warns that the attribute has no effect if it is placed on the
+ // decl-specifier-seq).
+ // Silently move on and allow the attribute to be handled in
+ // processTypeAttr().
+ break;
+ }
+
+ if (AL.getKind() == ParsedAttr::AT_NoDeref) {
+ // FIXME: `noderef` currently doesn't work correctly in [[]] syntax.
+ // See https://github.com/llvm/llvm-project/issues/55790 for details.
+ // We allow processTypeAttrs() to emit a warning and silently move on.
+ break;
+ }
}
// N.B., ClangAttrEmitter.cpp emits a diagnostic helper that ensures a
// statement attribute is not written on a declaration, but this code is
- // needed for attributes in Attr.td that do not list any subjects.
- S.Diag(AL.getLoc(), diag::err_stmt_attribute_invalid_on_decl)
+ // needed for type attributes as well as statement attributes in Attr.td
+ // that do not list any subjects.
+ S.Diag(AL.getLoc(), diag::err_attribute_invalid_on_decl)
<< AL << D->getLocation();
break;
case ParsedAttr::AT_Interrupt:
@@ -8365,6 +8638,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_NoReturn:
handleNoReturnAttr(S, D, AL);
break;
+ case ParsedAttr::AT_CXX11NoReturn:
+ handleStandardNoReturnAttr(S, D, AL);
+ break;
case ParsedAttr::AT_AnyX86NoCfCheck:
handleNoCfCheckAttr(S, D, AL);
break;
@@ -8462,6 +8738,12 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_Section:
handleSectionAttr(S, D, AL);
break;
+ case ParsedAttr::AT_RandomizeLayout:
+ handleRandomizeLayoutAttr(S, D, AL);
+ break;
+ case ParsedAttr::AT_NoRandomizeLayout:
+ handleNoRandomizeLayoutAttr(S, D, AL);
+ break;
case ParsedAttr::AT_CodeSeg:
handleCodeSegAttr(S, D, AL);
break;
@@ -8551,6 +8833,8 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_PreserveMost:
case ParsedAttr::AT_PreserveAll:
case ParsedAttr::AT_AArch64VectorPcs:
+ case ParsedAttr::AT_AArch64SVEPcs:
+ case ParsedAttr::AT_AMDGPUKernelCall:
handleCallConvAttr(S, D, AL);
break;
case ParsedAttr::AT_Suppress:
@@ -8581,6 +8865,9 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
case ParsedAttr::AT_InternalLinkage:
handleInternalLinkageAttr(S, D, AL);
break;
+ case ParsedAttr::AT_ZeroCallUsedRegs:
+ handleZeroCallUsedRegsAttr(S, D, AL);
+ break;
// Microsoft attributes:
case ParsedAttr::AT_LayoutVersion:
@@ -8596,6 +8883,17 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
handleDeclspecThreadAttr(S, D, AL);
break;
+ // HLSL attributes:
+ case ParsedAttr::AT_HLSLNumThreads:
+ handleHLSLNumThreadsAttr(S, D, AL);
+ break;
+ case ParsedAttr::AT_HLSLSV_GroupIndex:
+ handleHLSLSVGroupIndexAttr(S, D, AL);
+ break;
+ case ParsedAttr::AT_HLSLShader:
+ handleHLSLShaderAttr(S, D, AL);
+ break;
+
case ParsedAttr::AT_AbiTag:
handleAbiTagAttr(S, D, AL);
break;
@@ -8786,14 +9084,14 @@ static void ProcessDeclAttribute(Sema &S, Scope *scope, Decl *D,
/// ProcessDeclAttributeList - Apply all the decl attributes in the specified
/// attribute list to the specified decl, ignoring any type attributes.
-void Sema::ProcessDeclAttributeList(Scope *S, Decl *D,
- const ParsedAttributesView &AttrList,
- bool IncludeCXX11Attributes) {
+void Sema::ProcessDeclAttributeList(
+ Scope *S, Decl *D, const ParsedAttributesView &AttrList,
+ const ProcessDeclAttributeOptions &Options) {
if (AttrList.empty())
return;
for (const ParsedAttr &AL : AttrList)
- ProcessDeclAttribute(*this, S, D, AL, IncludeCXX11Attributes);
+ ProcessDeclAttribute(*this, S, D, AL, Options);
// FIXME: We should be able to handle these cases in TableGen.
// GCC accepts
@@ -8881,7 +9179,8 @@ bool Sema::ProcessAccessDeclAttributeList(
AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList) {
for (const ParsedAttr &AL : AttrList) {
if (AL.getKind() == ParsedAttr::AT_Annotate) {
- ProcessDeclAttribute(*this, nullptr, ASDecl, AL, AL.isCXX11Attribute());
+ ProcessDeclAttribute(*this, nullptr, ASDecl, AL,
+ ProcessDeclAttributeOptions());
} else {
Diag(AL.getLoc(), diag::err_only_annotate_after_access_spec);
return true;
@@ -8914,6 +9213,7 @@ static void checkUnusedDeclAttributes(Sema &S, const ParsedAttributesView &A) {
/// used to build a declaration, complain about any decl attributes
/// which might be lying around on it.
void Sema::checkUnusedDeclAttributes(Declarator &D) {
+ ::checkUnusedDeclAttributes(*this, D.getDeclarationAttributes());
::checkUnusedDeclAttributes(*this, D.getDeclSpec().getAttributes());
::checkUnusedDeclAttributes(*this, D.getAttributes());
for (unsigned i = 0, e = D.getNumTypeObjects(); i != e; ++i)
@@ -8922,8 +9222,8 @@ void Sema::checkUnusedDeclAttributes(Declarator &D) {
/// DeclClonePragmaWeak - clone existing decl (maybe definition),
/// \#pragma weak needs a non-definition decl and source may not have one.
-NamedDecl * Sema::DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
- SourceLocation Loc) {
+NamedDecl *Sema::DeclClonePragmaWeak(NamedDecl *ND, const IdentifierInfo *II,
+ SourceLocation Loc) {
assert(isa<FunctionDecl>(ND) || isa<VarDecl>(ND));
NamedDecl *NewD = nullptr;
if (auto *FD = dyn_cast<FunctionDecl>(ND)) {
@@ -8968,9 +9268,7 @@ NamedDecl * Sema::DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
/// DeclApplyPragmaWeak - A declaration (maybe definition) needs \#pragma weak
/// applied to it, possibly with an alias.
-void Sema::DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W) {
- if (W.getUsed()) return; // only do this once
- W.setUsed(true);
+void Sema::DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, const WeakInfo &W) {
if (W.getAlias()) { // clone decl, impersonate __attribute(weak,alias(...))
IdentifierInfo *NDId = ND->getIdentifier();
NamedDecl *NewD = DeclClonePragmaWeak(ND, W.getAlias(), W.getLocation());
@@ -8997,23 +9295,25 @@ void Sema::ProcessPragmaWeak(Scope *S, Decl *D) {
// It's valid to "forward-declare" #pragma weak, in which case we
// have to do this.
LoadExternalWeakUndeclaredIdentifiers();
- if (!WeakUndeclaredIdentifiers.empty()) {
- NamedDecl *ND = nullptr;
- if (auto *VD = dyn_cast<VarDecl>(D))
- if (VD->isExternC())
- ND = VD;
- if (auto *FD = dyn_cast<FunctionDecl>(D))
- if (FD->isExternC())
- ND = FD;
- if (ND) {
- if (IdentifierInfo *Id = ND->getIdentifier()) {
- auto I = WeakUndeclaredIdentifiers.find(Id);
- if (I != WeakUndeclaredIdentifiers.end()) {
- WeakInfo W = I->second;
- DeclApplyPragmaWeak(S, ND, W);
- WeakUndeclaredIdentifiers[Id] = W;
- }
- }
+ if (WeakUndeclaredIdentifiers.empty())
+ return;
+ NamedDecl *ND = nullptr;
+ if (auto *VD = dyn_cast<VarDecl>(D))
+ if (VD->isExternC())
+ ND = VD;
+ if (auto *FD = dyn_cast<FunctionDecl>(D))
+ if (FD->isExternC())
+ ND = FD;
+ if (!ND)
+ return;
+ if (IdentifierInfo *Id = ND->getIdentifier()) {
+ auto I = WeakUndeclaredIdentifiers.find(Id);
+ if (I != WeakUndeclaredIdentifiers.end()) {
+ auto &WeakInfos = I->second;
+ for (const auto &W : WeakInfos)
+ DeclApplyPragmaWeak(S, ND, W);
+ std::remove_reference_t<decltype(WeakInfos)> EmptyWeakInfos;
+ WeakInfos.swap(EmptyWeakInfos);
}
}
}
@@ -9022,17 +9322,43 @@ void Sema::ProcessPragmaWeak(Scope *S, Decl *D) {
/// it, apply them to D. This is a bit tricky because PD can have attributes
/// specified in many different places, and we need to find and apply them all.
void Sema::ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD) {
+ // Ordering of attributes can be important, so we take care to process
+ // attributes in the order in which they appeared in the source code.
+
+ // First, process attributes that appeared on the declaration itself (but
+ // only if they don't have the legacy behavior of "sliding" to the DeclSepc).
+ ParsedAttributesView NonSlidingAttrs;
+ for (ParsedAttr &AL : PD.getDeclarationAttributes()) {
+ if (AL.slidesFromDeclToDeclSpecLegacyBehavior()) {
+ // Skip processing the attribute, but do check if it appertains to the
+ // declaration. This is needed for the `MatrixType` attribute, which,
+ // despite being a type attribute, defines a `SubjectList` that only
+ // allows it to be used on typedef declarations.
+ AL.diagnoseAppertainsTo(*this, D);
+ } else {
+ NonSlidingAttrs.addAtEnd(&AL);
+ }
+ }
+ ProcessDeclAttributeList(S, D, NonSlidingAttrs);
+
// Apply decl attributes from the DeclSpec if present.
- if (!PD.getDeclSpec().getAttributes().empty())
- ProcessDeclAttributeList(S, D, PD.getDeclSpec().getAttributes());
+ if (!PD.getDeclSpec().getAttributes().empty()) {
+ ProcessDeclAttributeList(S, D, PD.getDeclSpec().getAttributes(),
+ ProcessDeclAttributeOptions()
+ .WithIncludeCXX11Attributes(false)
+ .WithIgnoreTypeAttributes(true));
+ }
// Walk the declarator structure, applying decl attributes that were in a type
// position to the decl itself. This handles cases like:
// int *__attr__(x)** D;
// when X is a decl attribute.
- for (unsigned i = 0, e = PD.getNumTypeObjects(); i != e; ++i)
+ for (unsigned i = 0, e = PD.getNumTypeObjects(); i != e; ++i) {
ProcessDeclAttributeList(S, D, PD.getTypeObject(i).getAttrs(),
- /*IncludeCXX11Attributes=*/false);
+ ProcessDeclAttributeOptions()
+ .WithIncludeCXX11Attributes(false)
+ .WithIgnoreTypeAttributes(true));
+ }
// Finally, apply any attributes on the decl itself.
ProcessDeclAttributeList(S, D, PD.getAttributes());
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaDeclCXX.cpp b/contrib/llvm-project/clang/lib/Sema/SemaDeclCXX.cpp
index 16cdb7e57723..221cbd14da97 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaDeclCXX.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaDeclCXX.cpp
@@ -1892,16 +1892,26 @@ static bool CheckConstexprDeclStmt(Sema &SemaRef, const FunctionDecl *Dcl,
if (VD->isStaticLocal()) {
if (Kind == Sema::CheckConstexprKind::Diagnose) {
SemaRef.Diag(VD->getLocation(),
- diag::err_constexpr_local_var_static)
- << isa<CXXConstructorDecl>(Dcl)
- << (VD->getTLSKind() == VarDecl::TLS_Dynamic);
+ SemaRef.getLangOpts().CPlusPlus2b
+ ? diag::warn_cxx20_compat_constexpr_var
+ : diag::ext_constexpr_static_var)
+ << isa<CXXConstructorDecl>(Dcl)
+ << (VD->getTLSKind() == VarDecl::TLS_Dynamic);
+ } else if (!SemaRef.getLangOpts().CPlusPlus2b) {
+ return false;
}
- return false;
}
- if (CheckLiteralType(SemaRef, Kind, VD->getLocation(), VD->getType(),
- diag::err_constexpr_local_var_non_literal_type,
- isa<CXXConstructorDecl>(Dcl)))
+ if (SemaRef.LangOpts.CPlusPlus2b) {
+ CheckLiteralType(SemaRef, Kind, VD->getLocation(), VD->getType(),
+ diag::warn_cxx20_compat_constexpr_var,
+ isa<CXXConstructorDecl>(Dcl),
+ /*variable of non-literal type*/ 2);
+ } else if (CheckLiteralType(
+ SemaRef, Kind, VD->getLocation(), VD->getType(),
+ diag::err_constexpr_local_var_non_literal_type,
+ isa<CXXConstructorDecl>(Dcl))) {
return false;
+ }
if (!VD->getType()->isDependentType() &&
!VD->hasInit() && !VD->isCXXForRangeDecl()) {
if (Kind == Sema::CheckConstexprKind::Diagnose) {
@@ -2021,6 +2031,7 @@ static bool
CheckConstexprFunctionStmt(Sema &SemaRef, const FunctionDecl *Dcl, Stmt *S,
SmallVectorImpl<SourceLocation> &ReturnStmts,
SourceLocation &Cxx1yLoc, SourceLocation &Cxx2aLoc,
+ SourceLocation &Cxx2bLoc,
Sema::CheckConstexprKind Kind) {
// - its function-body shall be [...] a compound-statement that contains only
switch (S->getStmtClass()) {
@@ -2053,9 +2064,9 @@ CheckConstexprFunctionStmt(Sema &SemaRef, const FunctionDecl *Dcl, Stmt *S,
case Stmt::AttributedStmtClass:
// Attributes on a statement don't affect its formal kind and hence don't
// affect its validity in a constexpr function.
- return CheckConstexprFunctionStmt(SemaRef, Dcl,
- cast<AttributedStmt>(S)->getSubStmt(),
- ReturnStmts, Cxx1yLoc, Cxx2aLoc, Kind);
+ return CheckConstexprFunctionStmt(
+ SemaRef, Dcl, cast<AttributedStmt>(S)->getSubStmt(), ReturnStmts,
+ Cxx1yLoc, Cxx2aLoc, Cxx2bLoc, Kind);
case Stmt::CompoundStmtClass: {
// C++1y allows compound-statements.
@@ -2065,7 +2076,7 @@ CheckConstexprFunctionStmt(Sema &SemaRef, const FunctionDecl *Dcl, Stmt *S,
CompoundStmt *CompStmt = cast<CompoundStmt>(S);
for (auto *BodyIt : CompStmt->body()) {
if (!CheckConstexprFunctionStmt(SemaRef, Dcl, BodyIt, ReturnStmts,
- Cxx1yLoc, Cxx2aLoc, Kind))
+ Cxx1yLoc, Cxx2aLoc, Cxx2bLoc, Kind))
return false;
}
return true;
@@ -2078,11 +2089,11 @@ CheckConstexprFunctionStmt(Sema &SemaRef, const FunctionDecl *Dcl, Stmt *S,
IfStmt *If = cast<IfStmt>(S);
if (!CheckConstexprFunctionStmt(SemaRef, Dcl, If->getThen(), ReturnStmts,
- Cxx1yLoc, Cxx2aLoc, Kind))
+ Cxx1yLoc, Cxx2aLoc, Cxx2bLoc, Kind))
return false;
if (If->getElse() &&
!CheckConstexprFunctionStmt(SemaRef, Dcl, If->getElse(), ReturnStmts,
- Cxx1yLoc, Cxx2aLoc, Kind))
+ Cxx1yLoc, Cxx2aLoc, Cxx2bLoc, Kind))
return false;
return true;
}
@@ -2098,11 +2109,12 @@ CheckConstexprFunctionStmt(Sema &SemaRef, const FunctionDecl *Dcl, Stmt *S,
break;
if (!Cxx1yLoc.isValid())
Cxx1yLoc = S->getBeginLoc();
- for (Stmt *SubStmt : S->children())
+ for (Stmt *SubStmt : S->children()) {
if (SubStmt &&
!CheckConstexprFunctionStmt(SemaRef, Dcl, SubStmt, ReturnStmts,
- Cxx1yLoc, Cxx2aLoc, Kind))
+ Cxx1yLoc, Cxx2aLoc, Cxx2bLoc, Kind))
return false;
+ }
return true;
case Stmt::SwitchStmtClass:
@@ -2113,11 +2125,24 @@ CheckConstexprFunctionStmt(Sema &SemaRef, const FunctionDecl *Dcl, Stmt *S,
// mutation, we can reasonably allow them in C++11 as an extension.
if (!Cxx1yLoc.isValid())
Cxx1yLoc = S->getBeginLoc();
- for (Stmt *SubStmt : S->children())
+ for (Stmt *SubStmt : S->children()) {
+ if (SubStmt &&
+ !CheckConstexprFunctionStmt(SemaRef, Dcl, SubStmt, ReturnStmts,
+ Cxx1yLoc, Cxx2aLoc, Cxx2bLoc, Kind))
+ return false;
+ }
+ return true;
+
+ case Stmt::LabelStmtClass:
+ case Stmt::GotoStmtClass:
+ if (Cxx2bLoc.isInvalid())
+ Cxx2bLoc = S->getBeginLoc();
+ for (Stmt *SubStmt : S->children()) {
if (SubStmt &&
!CheckConstexprFunctionStmt(SemaRef, Dcl, SubStmt, ReturnStmts,
- Cxx1yLoc, Cxx2aLoc, Kind))
+ Cxx1yLoc, Cxx2aLoc, Cxx2bLoc, Kind))
return false;
+ }
return true;
case Stmt::GCCAsmStmtClass:
@@ -2129,7 +2154,7 @@ CheckConstexprFunctionStmt(Sema &SemaRef, const FunctionDecl *Dcl, Stmt *S,
for (Stmt *SubStmt : S->children()) {
if (SubStmt &&
!CheckConstexprFunctionStmt(SemaRef, Dcl, SubStmt, ReturnStmts,
- Cxx1yLoc, Cxx2aLoc, Kind))
+ Cxx1yLoc, Cxx2aLoc, Cxx2bLoc, Kind))
return false;
}
return true;
@@ -2137,9 +2162,9 @@ CheckConstexprFunctionStmt(Sema &SemaRef, const FunctionDecl *Dcl, Stmt *S,
case Stmt::CXXCatchStmtClass:
// Do not bother checking the language mode (already covered by the
// try block check).
- if (!CheckConstexprFunctionStmt(SemaRef, Dcl,
- cast<CXXCatchStmt>(S)->getHandlerBlock(),
- ReturnStmts, Cxx1yLoc, Cxx2aLoc, Kind))
+ if (!CheckConstexprFunctionStmt(
+ SemaRef, Dcl, cast<CXXCatchStmt>(S)->getHandlerBlock(), ReturnStmts,
+ Cxx1yLoc, Cxx2aLoc, Cxx2bLoc, Kind))
return false;
return true;
@@ -2204,20 +2229,27 @@ static bool CheckConstexprFunctionBody(Sema &SemaRef, const FunctionDecl *Dcl,
//
// Note that walking the children here is enough to properly check for
// CompoundStmt and CXXTryStmt body.
- SourceLocation Cxx1yLoc, Cxx2aLoc;
+ SourceLocation Cxx1yLoc, Cxx2aLoc, Cxx2bLoc;
for (Stmt *SubStmt : Body->children()) {
if (SubStmt &&
!CheckConstexprFunctionStmt(SemaRef, Dcl, SubStmt, ReturnStmts,
- Cxx1yLoc, Cxx2aLoc, Kind))
+ Cxx1yLoc, Cxx2aLoc, Cxx2bLoc, Kind))
return false;
}
if (Kind == Sema::CheckConstexprKind::CheckValid) {
// If this is only valid as an extension, report that we don't satisfy the
// constraints of the current language.
- if ((Cxx2aLoc.isValid() && !SemaRef.getLangOpts().CPlusPlus20) ||
+ if ((Cxx2bLoc.isValid() && !SemaRef.getLangOpts().CPlusPlus2b) ||
+ (Cxx2aLoc.isValid() && !SemaRef.getLangOpts().CPlusPlus20) ||
(Cxx1yLoc.isValid() && !SemaRef.getLangOpts().CPlusPlus17))
return false;
+ } else if (Cxx2bLoc.isValid()) {
+ SemaRef.Diag(Cxx2bLoc,
+ SemaRef.getLangOpts().CPlusPlus2b
+ ? diag::warn_cxx20_compat_constexpr_body_invalid_stmt
+ : diag::ext_constexpr_body_invalid_stmt_cxx2b)
+ << isa<CXXConstructorDecl>(Dcl);
} else if (Cxx2aLoc.isValid()) {
SemaRef.Diag(Cxx2aLoc,
SemaRef.getLangOpts().CPlusPlus20
@@ -2469,6 +2501,11 @@ Sema::CheckBaseSpecifier(CXXRecordDecl *Class,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc) {
+ // In HLSL, unspecified class access is public rather than private.
+ if (getLangOpts().HLSL && Class->getTagKind() == TTK_Class &&
+ Access == AS_none)
+ Access = AS_public;
+
QualType BaseType = TInfo->getType();
if (BaseType->containsErrors()) {
// Already emitted a diagnostic when parsing the error type.
@@ -2615,12 +2652,11 @@ Sema::CheckBaseSpecifier(CXXRecordDecl *Class,
/// example:
/// class foo : public bar, virtual private baz {
/// 'public bar' and 'virtual private baz' are each base-specifiers.
-BaseResult
-Sema::ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange,
- ParsedAttributes &Attributes,
- bool Virtual, AccessSpecifier Access,
- ParsedType basetype, SourceLocation BaseLoc,
- SourceLocation EllipsisLoc) {
+BaseResult Sema::ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange,
+ const ParsedAttributesView &Attributes,
+ bool Virtual, AccessSpecifier Access,
+ ParsedType basetype, SourceLocation BaseLoc,
+ SourceLocation EllipsisLoc) {
if (!classdecl)
return true;
@@ -3395,6 +3431,15 @@ Sema::ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D,
return nullptr;
}
+ if (D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId) {
+ Diag(D.getIdentifierLoc(), diag::err_member_with_template_arguments)
+ << II
+ << SourceRange(D.getName().TemplateId->LAngleLoc,
+ D.getName().TemplateId->RAngleLoc)
+ << D.getName().TemplateId->LAngleLoc;
+ D.SetIdentifier(II, Loc);
+ }
+
if (SS.isSet() && !SS.isInvalid()) {
// The user provided a superfluous scope specifier inside a class
// definition:
@@ -4262,6 +4307,15 @@ Sema::BuildMemInitializer(Decl *ConstructorD,
}
}
+ if (getLangOpts().MSVCCompat && !getLangOpts().CPlusPlus20) {
+ auto UnqualifiedBase = R.getAsSingle<ClassTemplateDecl>();
+ if (UnqualifiedBase) {
+ Diag(IdLoc, diag::ext_unqualified_base_class)
+ << SourceRange(IdLoc, Init->getSourceRange().getEnd());
+ BaseType = UnqualifiedBase->getInjectedClassNameSpecialization();
+ }
+ }
+
// If no results were found, try to correct typos.
TypoCorrection Corr;
MemInitializerValidatorCCC CCC(ClassDecl);
@@ -5376,8 +5430,7 @@ static void DiagnoseBaseOrMemInitializerOrder(
return;
// Sort based on the ideal order, first in the pair.
- llvm::sort(CorrelatedInitOrder,
- [](auto &LHS, auto &RHS) { return LHS.first < RHS.first; });
+ llvm::sort(CorrelatedInitOrder, llvm::less_first());
// Introduce a new scope as SemaDiagnosticBuilder needs to be destroyed to
// emit the diagnostic before we can try adding notes.
@@ -6640,7 +6693,7 @@ static bool canPassInRegisters(Sema &S, CXXRecordDecl *D,
return false;
for (const CXXMethodDecl *MD : D->methods()) {
- if (MD->isDeleted())
+ if (MD->isDeleted() || MD->isIneligibleOrNotSelected())
continue;
auto *CD = dyn_cast<CXXConstructorDecl>(MD);
@@ -8564,10 +8617,10 @@ bool Sema::CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *FD,
int(1)))
return true;
- if (llvm::find_if(RD->friends(), [&](const FriendDecl *F) {
+ if (llvm::none_of(RD->friends(), [&](const FriendDecl *F) {
return FD->getCanonicalDecl() ==
F->getFriendDecl()->getCanonicalDecl();
- }) == RD->friends().end()) {
+ })) {
Diag(FD->getLocation(), diag::err_defaulted_comparison_not_friend)
<< int(DCK) << int(0) << RD;
Diag(RD->getCanonicalDecl()->getLocation(), diag::note_declared_at);
@@ -9159,13 +9212,12 @@ bool SpecialMemberDeletionInfo::shouldDeleteForField(FieldDecl *FD) {
<< !!ICI << MD->getParent() << FD << FieldType << /*Reference*/0;
return true;
}
- // C++11 [class.ctor]p5: any non-variant non-static data member of
- // const-qualified type (or array thereof) with no
- // brace-or-equal-initializer does not have a user-provided default
- // constructor.
+ // C++11 [class.ctor]p5 (modified by DR2394): any non-variant non-static
+ // data member of const-qualified type (or array thereof) with no
+ // brace-or-equal-initializer is not const-default-constructible.
if (!inUnion() && FieldType.isConstQualified() &&
!FD->hasInClassInitializer() &&
- (!FieldRecord || !FieldRecord->hasUserProvidedDefaultConstructor())) {
+ (!FieldRecord || !FieldRecord->allowConstDefaultInit())) {
if (Diagnose)
S.Diag(FD->getLocation(), diag::note_deleted_default_ctor_uninit_field)
<< !!ICI << MD->getParent() << FD << FD->getType() << /*Const*/1;
@@ -9720,11 +9772,22 @@ bool Sema::SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
case CXXCopyConstructor:
case CXXCopyAssignment: {
- // Trivial copy operations always have const, non-volatile parameter types.
- ConstArg = true;
const ParmVarDecl *Param0 = MD->getParamDecl(0);
const ReferenceType *RT = Param0->getType()->getAs<ReferenceType>();
- if (!RT || RT->getPointeeType().getCVRQualifiers() != Qualifiers::Const) {
+
+ // When ClangABICompat14 is true, CXX copy constructors will only be trivial
+ // if they are not user-provided and their parameter-type-list is equivalent
+ // to the parameter-type-list of an implicit declaration. This maintains the
+ // behavior before dr2171 was implemented.
+ //
+ // Otherwise, if ClangABICompat14 is false, All copy constructors can be
+ // trivial, if they are not user-provided, regardless of the qualifiers on
+ // the reference type.
+ const bool ClangABICompat14 = Context.getLangOpts().getClangABICompat() <=
+ LangOptions::ClangABI::Ver14;
+ if (!RT ||
+ ((RT->getPointeeType().getCVRQualifiers() != Qualifiers::Const) &&
+ ClangABICompat14)) {
if (Diagnose)
Diag(Param0->getLocation(), diag::note_nontrivial_param_type)
<< Param0->getSourceRange() << Param0->getType()
@@ -9732,6 +9795,8 @@ bool Sema::SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
Context.getRecordType(RD).withConst());
return false;
}
+
+ ConstArg = RT->getPointeeType().isConstQualified();
break;
}
@@ -10983,6 +11048,8 @@ void Sema::CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
TemplateName SpecifiedName = RetTST.getTypePtr()->getTemplateName();
bool TemplateMatches =
Context.hasSameTemplateName(SpecifiedName, GuidedTemplate);
+ // FIXME: We should consider other template kinds (using, qualified),
+ // otherwise we will emit bogus diagnostics.
if (SpecifiedName.getKind() == TemplateName::Template && TemplateMatches)
AcceptableReturnType = true;
else {
@@ -11025,6 +11092,11 @@ static void DiagnoseNamespaceInlineMismatch(Sema &S, SourceLocation KeywordLoc,
NamespaceDecl *PrevNS) {
assert(*IsInline != PrevNS->isInline());
+ // 'inline' must appear on the original definition, but not necessarily
+ // on all extension definitions, so the note should point to the first
+ // definition to avoid confusion.
+ PrevNS = PrevNS->getFirstDecl();
+
if (PrevNS->isInline())
// The user probably just forgot the 'inline', so suggest that it
// be added back.
@@ -13306,7 +13378,8 @@ void Sema::CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD) {
R.resolveKind();
R.suppressDiagnostics();
- CheckFunctionDeclaration(S, FD, R, /*IsMemberSpecialization*/false);
+ CheckFunctionDeclaration(S, FD, R, /*IsMemberSpecialization*/ false,
+ FD->isThisDeclarationADefinition());
}
void Sema::setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
@@ -13370,14 +13443,13 @@ CXXConstructorDecl *Sema::DeclareImplicitDefaultConstructor(
DefaultCon->setAccess(AS_public);
DefaultCon->setDefaulted();
- if (getLangOpts().CUDA) {
+ setupImplicitSpecialMemberType(DefaultCon, Context.VoidTy, None);
+
+ if (getLangOpts().CUDA)
inferCUDATargetForImplicitSpecialMember(ClassDecl, CXXDefaultConstructor,
DefaultCon,
/* ConstRHS */ false,
/* Diagnose */ false);
- }
-
- setupImplicitSpecialMemberType(DefaultCon, Context.VoidTy, None);
// We don't need to use SpecialMemberIsTrivial here; triviality for default
// constructors is easy to compute.
@@ -13651,14 +13723,13 @@ CXXDestructorDecl *Sema::DeclareImplicitDestructor(CXXRecordDecl *ClassDecl) {
Destructor->setAccess(AS_public);
Destructor->setDefaulted();
- if (getLangOpts().CUDA) {
+ setupImplicitSpecialMemberType(Destructor, Context.VoidTy, None);
+
+ if (getLangOpts().CUDA)
inferCUDATargetForImplicitSpecialMember(ClassDecl, CXXDestructor,
Destructor,
/* ConstRHS */ false,
/* Diagnose */ false);
- }
-
- setupImplicitSpecialMemberType(Destructor, Context.VoidTy, None);
// We don't need to use SpecialMemberIsTrivial here; triviality for
// destructors is easy to compute.
@@ -14291,14 +14362,13 @@ CXXMethodDecl *Sema::DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl) {
CopyAssignment->setDefaulted();
CopyAssignment->setImplicit();
- if (getLangOpts().CUDA) {
+ setupImplicitSpecialMemberType(CopyAssignment, RetType, ArgType);
+
+ if (getLangOpts().CUDA)
inferCUDATargetForImplicitSpecialMember(ClassDecl, CXXCopyAssignment,
CopyAssignment,
/* ConstRHS */ Const,
/* Diagnose */ false);
- }
-
- setupImplicitSpecialMemberType(CopyAssignment, RetType, ArgType);
// Add the parameter to the operator.
ParmVarDecl *FromParam = ParmVarDecl::Create(Context, CopyAssignment,
@@ -14626,14 +14696,13 @@ CXXMethodDecl *Sema::DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl) {
MoveAssignment->setDefaulted();
MoveAssignment->setImplicit();
- if (getLangOpts().CUDA) {
+ setupImplicitSpecialMemberType(MoveAssignment, RetType, ArgType);
+
+ if (getLangOpts().CUDA)
inferCUDATargetForImplicitSpecialMember(ClassDecl, CXXMoveAssignment,
MoveAssignment,
/* ConstRHS */ false,
/* Diagnose */ false);
- }
-
- setupImplicitSpecialMemberType(MoveAssignment, RetType, ArgType);
// Add the parameter to the operator.
ParmVarDecl *FromParam = ParmVarDecl::Create(Context, MoveAssignment,
@@ -14743,8 +14812,7 @@ static void checkMoveAssignmentForRepeatedMove(Sema &S, CXXRecordDecl *Class,
continue;
// We're going to move the base classes of Base. Add them to the list.
- for (auto &BI : Base->bases())
- Worklist.push_back(&BI);
+ llvm::append_range(Worklist, llvm::make_pointer_range(Base->bases()));
}
}
}
@@ -15006,14 +15074,13 @@ CXXConstructorDecl *Sema::DeclareImplicitCopyConstructor(
CopyConstructor->setAccess(AS_public);
CopyConstructor->setDefaulted();
- if (getLangOpts().CUDA) {
+ setupImplicitSpecialMemberType(CopyConstructor, Context.VoidTy, ArgType);
+
+ if (getLangOpts().CUDA)
inferCUDATargetForImplicitSpecialMember(ClassDecl, CXXCopyConstructor,
CopyConstructor,
/* ConstRHS */ Const,
/* Diagnose */ false);
- }
-
- setupImplicitSpecialMemberType(CopyConstructor, Context.VoidTy, ArgType);
// During template instantiation of special member functions we need a
// reliable TypeSourceInfo for the parameter types in order to allow functions
@@ -15146,14 +15213,13 @@ CXXConstructorDecl *Sema::DeclareImplicitMoveConstructor(
MoveConstructor->setAccess(AS_public);
MoveConstructor->setDefaulted();
- if (getLangOpts().CUDA) {
+ setupImplicitSpecialMemberType(MoveConstructor, Context.VoidTy, ArgType);
+
+ if (getLangOpts().CUDA)
inferCUDATargetForImplicitSpecialMember(ClassDecl, CXXMoveConstructor,
MoveConstructor,
/* ConstRHS */ false,
/* Diagnose */ false);
- }
-
- setupImplicitSpecialMemberType(MoveConstructor, Context.VoidTy, ArgType);
// Add the parameter to the constructor.
ParmVarDecl *FromParam = ParmVarDecl::Create(Context, MoveConstructor,
@@ -15287,8 +15353,8 @@ void Sema::DefineImplicitLambdaToFunctionPointerConversion(
VK_LValue, Conv->getLocation());
assert(FunctionRef && "Can't refer to __invoke function?");
Stmt *Return = BuildReturnStmt(Conv->getLocation(), FunctionRef).get();
- Conv->setBody(CompoundStmt::Create(Context, Return, Conv->getLocation(),
- Conv->getLocation()));
+ Conv->setBody(CompoundStmt::Create(Context, Return, FPOptionsOverride(),
+ Conv->getLocation(), Conv->getLocation()));
Conv->markUsed(Context);
Conv->setReferenced();
@@ -15342,8 +15408,8 @@ void Sema::DefineImplicitLambdaToBlockPointerConversion(
// Set the body of the conversion function.
Stmt *ReturnS = Return.get();
- Conv->setBody(CompoundStmt::Create(Context, ReturnS, Conv->getLocation(),
- Conv->getLocation()));
+ Conv->setBody(CompoundStmt::Create(Context, ReturnS, FPOptionsOverride(),
+ Conv->getLocation(), Conv->getLocation()));
Conv->markUsed(Context);
// We're done; notify the mutation listener, if any.
@@ -15865,14 +15931,29 @@ bool Sema::CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl) {
// An operator function cannot have default arguments (8.3.6),
// except where explicitly stated below.
//
- // Only the function-call operator allows default arguments
- // (C++ [over.call]p1).
+ // Only the function-call operator (C++ [over.call]p1) and the subscript
+ // operator (CWG2507) allow default arguments.
if (Op != OO_Call) {
+ ParmVarDecl *FirstDefaultedParam = nullptr;
for (auto Param : FnDecl->parameters()) {
- if (Param->hasDefaultArg())
- return Diag(Param->getLocation(),
+ if (Param->hasDefaultArg()) {
+ FirstDefaultedParam = Param;
+ break;
+ }
+ }
+ if (FirstDefaultedParam) {
+ if (Op == OO_Subscript) {
+ Diag(FnDecl->getLocation(), LangOpts.CPlusPlus2b
+ ? diag::ext_subscript_overload
+ : diag::error_subscript_overload)
+ << FnDecl->getDeclName() << 1
+ << FirstDefaultedParam->getDefaultArgRange();
+ } else {
+ return Diag(FirstDefaultedParam->getLocation(),
diag::err_operator_overload_default_arg)
- << FnDecl->getDeclName() << Param->getDefaultArgRange();
+ << FnDecl->getDeclName()
+ << FirstDefaultedParam->getDefaultArgRange();
+ }
}
}
@@ -15893,10 +15974,10 @@ bool Sema::CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl) {
// described in the rest of this subclause.
unsigned NumParams = FnDecl->getNumParams()
+ (isa<CXXMethodDecl>(FnDecl)? 1 : 0);
- if (Op != OO_Call &&
+ if (Op != OO_Call && Op != OO_Subscript &&
((NumParams == 1 && !CanBeUnaryOperator) ||
- (NumParams == 2 && !CanBeBinaryOperator) ||
- (NumParams < 1) || (NumParams > 2))) {
+ (NumParams == 2 && !CanBeBinaryOperator) || (NumParams < 1) ||
+ (NumParams > 2))) {
// We have the wrong number of parameters.
unsigned ErrorKind;
if (CanBeUnaryOperator && CanBeBinaryOperator) {
@@ -15908,16 +15989,23 @@ bool Sema::CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl) {
"All non-call overloaded operators are unary or binary!");
ErrorKind = 1; // 1 -> binary
}
-
return Diag(FnDecl->getLocation(), diag::err_operator_overload_must_be)
<< FnDecl->getDeclName() << NumParams << ErrorKind;
}
- // Overloaded operators other than operator() cannot be variadic.
+ if (Op == OO_Subscript && NumParams != 2) {
+ Diag(FnDecl->getLocation(), LangOpts.CPlusPlus2b
+ ? diag::ext_subscript_overload
+ : diag::error_subscript_overload)
+ << FnDecl->getDeclName() << (NumParams == 1 ? 0 : 2);
+ }
+
+ // Overloaded operators other than operator() and operator[] cannot be
+ // variadic.
if (Op != OO_Call &&
FnDecl->getType()->castAs<FunctionProtoType>()->isVariadic()) {
return Diag(FnDecl->getLocation(), diag::err_operator_overload_variadic)
- << FnDecl->getDeclName();
+ << FnDecl->getDeclName();
}
// Some operators must be non-static member functions.
@@ -16186,7 +16274,7 @@ Decl *Sema::ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc) {
StringLiteral *Lit = cast<StringLiteral>(LangStr);
- if (!Lit->isAscii()) {
+ if (!Lit->isOrdinary()) {
Diag(LangStr->getExprLoc(), diag::err_language_linkage_spec_not_ascii)
<< LangStr->getSourceRange();
return nullptr;
@@ -16222,7 +16310,12 @@ Decl *Sema::ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc,
if (getLangOpts().CPlusPlusModules && isCurrentModulePurview()) {
Module *GlobalModule =
PushGlobalModuleFragment(ExternLoc, /*IsImplicit=*/true);
- D->setModuleOwnershipKind(Decl::ModuleOwnershipKind::ModulePrivate);
+ /// According to [module.reach]p3.2,
+ /// The declaration in global module fragment is reachable if it is not
+ /// discarded. And the discarded declaration should be deleted. So it
+ /// doesn't matter mark the declaration in global module fragment as
+ /// reachable here.
+ D->setModuleOwnershipKind(Decl::ModuleOwnershipKind::ReachableWhenImported);
D->setLocalOwningModule(GlobalModule);
}
@@ -16499,8 +16592,13 @@ Decl *Sema::BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
if (!Failed && !Cond) {
SmallString<256> MsgBuffer;
llvm::raw_svector_ostream Msg(MsgBuffer);
- if (AssertMessage)
- AssertMessage->printPretty(Msg, nullptr, getPrintingPolicy());
+ if (AssertMessage) {
+ const auto *MsgStr = cast<StringLiteral>(AssertMessage);
+ if (MsgStr->isOrdinary())
+ Msg << MsgStr->getString();
+ else
+ MsgStr->printPretty(Msg, nullptr, getPrintingPolicy());
+ }
Expr *InnerCond = nullptr;
std::string InnerCondDescription;
@@ -16785,7 +16883,8 @@ Decl *Sema::ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
// Try to convert the decl specifier to a type. This works for
// friend templates because ActOnTag never produces a ClassTemplateDecl
// for a TUK_Friend.
- Declarator TheDeclarator(DS, DeclaratorContext::Member);
+ Declarator TheDeclarator(DS, ParsedAttributesView::none(),
+ DeclaratorContext::Member);
TypeSourceInfo *TSI = GetTypeForDeclarator(TheDeclarator, S);
QualType T = TSI->getType();
if (TheDeclarator.isInvalidType())
@@ -17319,6 +17418,21 @@ void Sema::DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock) {
}
}
+void Sema::SetFunctionBodyKind(Decl *D, SourceLocation Loc,
+ FnBodyKind BodyKind) {
+ switch (BodyKind) {
+ case FnBodyKind::Delete:
+ SetDeclDeleted(D, Loc);
+ break;
+ case FnBodyKind::Default:
+ SetDeclDefaulted(D, Loc);
+ break;
+ case FnBodyKind::Other:
+ llvm_unreachable(
+ "Parsed function body should be '= delete;' or '= default;'");
+ }
+}
+
bool Sema::CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old) {
const auto *NewFT = New->getType()->castAs<FunctionProtoType>();
@@ -18265,8 +18379,7 @@ void Sema::ActOnStartFunctionDeclarationDeclarator(
}
if (ExplicitParams) {
Info.AutoTemplateParameterDepth = ExplicitParams->getDepth();
- for (NamedDecl *Param : *ExplicitParams)
- Info.TemplateParams.push_back(Param);
+ llvm::append_range(Info.TemplateParams, *ExplicitParams);
Info.NumExplicitTemplateParams = ExplicitParams->size();
} else {
Info.AutoTemplateParameterDepth = TemplateParameterDepth;
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaDeclObjC.cpp b/contrib/llvm-project/clang/lib/Sema/SemaDeclObjC.cpp
index d4fefc3d18d8..a574a5539330 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaDeclObjC.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaDeclObjC.cpp
@@ -971,7 +971,7 @@ static bool checkTypeParamListConsistency(Sema &S,
return false;
}
-Decl *Sema::ActOnStartClassInterface(
+ObjCInterfaceDecl *Sema::ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
@@ -1100,7 +1100,8 @@ Decl *Sema::ActOnStartClassInterface(
}
CheckObjCDeclScope(IDecl);
- return ActOnObjCContainerStartDefinition(IDecl);
+ ActOnObjCContainerStartDefinition(IDecl);
+ return IDecl;
}
/// ActOnTypedefedProtocols - this action finds protocol list as part of the
@@ -1208,7 +1209,7 @@ bool Sema::CheckForwardProtocolDeclarationForCircularDependency(
return res;
}
-Decl *Sema::ActOnStartProtocolInterface(
+ObjCProtocolDecl *Sema::ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
@@ -1272,7 +1273,8 @@ Decl *Sema::ActOnStartProtocolInterface(
}
CheckObjCDeclScope(PDecl);
- return ActOnObjCContainerStartDefinition(PDecl);
+ ActOnObjCContainerStartDefinition(PDecl);
+ return PDecl;
}
static bool NestedProtocolHasNoDefinition(ObjCProtocolDecl *PDecl,
@@ -1586,7 +1588,7 @@ void Sema::actOnObjCTypeArgsOrProtocolQualifiers(
DS.SetRangeEnd(loc);
// Form the declarator.
- Declarator D(DS, DeclaratorContext::TypeName);
+ Declarator D(DS, ParsedAttributesView::none(), DeclaratorContext::TypeName);
// If we have a typedef of an Objective-C class type that is missing a '*',
// add the '*'.
@@ -1799,7 +1801,7 @@ Sema::ActOnForwardProtocolDeclaration(SourceLocation AtProtocolLoc,
return BuildDeclaratorGroup(DeclsInGroup);
}
-Decl *Sema::ActOnStartCategoryInterface(
+ObjCCategoryDecl *Sema::ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
@@ -1826,7 +1828,8 @@ Decl *Sema::ActOnStartCategoryInterface(
if (!IDecl)
Diag(ClassLoc, diag::err_undef_interface) << ClassName;
- return ActOnObjCContainerStartDefinition(CDecl);
+ ActOnObjCContainerStartDefinition(CDecl);
+ return CDecl;
}
if (!CategoryName && IDecl->getImplementation()) {
@@ -1889,17 +1892,17 @@ Decl *Sema::ActOnStartCategoryInterface(
}
CheckObjCDeclScope(CDecl);
- return ActOnObjCContainerStartDefinition(CDecl);
+ ActOnObjCContainerStartDefinition(CDecl);
+ return CDecl;
}
/// ActOnStartCategoryImplementation - Perform semantic checks on the
/// category implementation declaration and build an ObjCCategoryImplDecl
/// object.
-Decl *Sema::ActOnStartCategoryImplementation(
- SourceLocation AtCatImplLoc,
- IdentifierInfo *ClassName, SourceLocation ClassLoc,
- IdentifierInfo *CatName, SourceLocation CatLoc,
- const ParsedAttributesView &Attrs) {
+ObjCCategoryImplDecl *Sema::ActOnStartCategoryImplementation(
+ SourceLocation AtCatImplLoc, IdentifierInfo *ClassName,
+ SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc,
+ const ParsedAttributesView &Attrs) {
ObjCInterfaceDecl *IDecl = getObjCInterfaceDecl(ClassName, ClassLoc, true);
ObjCCategoryDecl *CatIDecl = nullptr;
if (IDecl && IDecl->hasDefinition()) {
@@ -1958,15 +1961,14 @@ Decl *Sema::ActOnStartCategoryImplementation(
}
CheckObjCDeclScope(CDecl);
- return ActOnObjCContainerStartDefinition(CDecl);
+ ActOnObjCContainerStartDefinition(CDecl);
+ return CDecl;
}
-Decl *Sema::ActOnStartClassImplementation(
- SourceLocation AtClassImplLoc,
- IdentifierInfo *ClassName, SourceLocation ClassLoc,
- IdentifierInfo *SuperClassname,
- SourceLocation SuperClassLoc,
- const ParsedAttributesView &Attrs) {
+ObjCImplementationDecl *Sema::ActOnStartClassImplementation(
+ SourceLocation AtClassImplLoc, IdentifierInfo *ClassName,
+ SourceLocation ClassLoc, IdentifierInfo *SuperClassname,
+ SourceLocation SuperClassLoc, const ParsedAttributesView &Attrs) {
ObjCInterfaceDecl *IDecl = nullptr;
// Check for another declaration kind with the same name.
NamedDecl *PrevDecl
@@ -2063,8 +2065,10 @@ Decl *Sema::ActOnStartClassImplementation(
ProcessDeclAttributeList(TUScope, IMPDecl, Attrs);
AddPragmaAttributes(TUScope, IMPDecl);
- if (CheckObjCDeclScope(IMPDecl))
- return ActOnObjCContainerStartDefinition(IMPDecl);
+ if (CheckObjCDeclScope(IMPDecl)) {
+ ActOnObjCContainerStartDefinition(IMPDecl);
+ return IMPDecl;
+ }
// Check that there is no duplicate implementation of this class.
if (IDecl->getImplementation()) {
@@ -2090,7 +2094,8 @@ Decl *Sema::ActOnStartClassImplementation(
<< IDecl->getSuperClass()->getDeclName();
}
- return ActOnObjCContainerStartDefinition(IMPDecl);
+ ActOnObjCContainerStartDefinition(IMPDecl);
+ return IMPDecl;
}
Sema::DeclGroupPtrTy
@@ -4532,7 +4537,7 @@ static QualType mergeTypeNullabilityForRedecl(Sema &S, SourceLocation loc,
auto prevNullability = prevType->getNullability(S.Context);
// Easy case: both have nullability.
- if (nullability.hasValue() == prevNullability.hasValue()) {
+ if (nullability.has_value() == prevNullability.has_value()) {
// Neither has nullability; continue.
if (!nullability)
return type;
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaExceptionSpec.cpp b/contrib/llvm-project/clang/lib/Sema/SemaExceptionSpec.cpp
index 29cb4be7b1ba..d8344cfd01f9 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaExceptionSpec.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaExceptionSpec.cpp
@@ -342,8 +342,7 @@ bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) {
if (!MissingExceptionSpecification)
return ReturnValueOnError;
- const FunctionProtoType *NewProto =
- New->getType()->castAs<FunctionProtoType>();
+ const auto *NewProto = New->getType()->castAs<FunctionProtoType>();
// The new function declaration is only missing an empty exception
// specification "throw()". If the throw() specification came from a
@@ -353,7 +352,7 @@ bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) {
// specifications.
//
// Likewise if the old function is a builtin.
- if (MissingEmptyExceptionSpecification && NewProto &&
+ if (MissingEmptyExceptionSpecification &&
(Old->getLocation().isInvalid() ||
Context.getSourceManager().isInSystemHeader(Old->getLocation()) ||
Old->getBuiltinID()) &&
@@ -364,8 +363,7 @@ bool Sema::CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New) {
return false;
}
- const FunctionProtoType *OldProto =
- Old->getType()->castAs<FunctionProtoType>();
+ const auto *OldProto = Old->getType()->castAs<FunctionProtoType>();
FunctionProtoType::ExceptionSpecInfo ESI = OldProto->getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
@@ -1455,15 +1453,20 @@ CanThrowResult Sema::canThrow(const Stmt *S) {
case Stmt::OMPForSimdDirectiveClass:
case Stmt::OMPMasterDirectiveClass:
case Stmt::OMPMasterTaskLoopDirectiveClass:
+ case Stmt::OMPMaskedTaskLoopDirectiveClass:
case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
+ case Stmt::OMPMaskedTaskLoopSimdDirectiveClass:
case Stmt::OMPOrderedDirectiveClass:
case Stmt::OMPCanonicalLoopClass:
case Stmt::OMPParallelDirectiveClass:
case Stmt::OMPParallelForDirectiveClass:
case Stmt::OMPParallelForSimdDirectiveClass:
case Stmt::OMPParallelMasterDirectiveClass:
+ case Stmt::OMPParallelMaskedDirectiveClass:
case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
+ case Stmt::OMPParallelMaskedTaskLoopDirectiveClass:
case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
+ case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass:
case Stmt::OMPParallelSectionsDirectiveClass:
case Stmt::OMPSectionDirectiveClass:
case Stmt::OMPSectionsDirectiveClass:
@@ -1501,6 +1504,10 @@ CanThrowResult Sema::canThrow(const Stmt *S) {
case Stmt::OMPMaskedDirectiveClass:
case Stmt::OMPMetaDirectiveClass:
case Stmt::OMPGenericLoopDirectiveClass:
+ case Stmt::OMPTeamsGenericLoopDirectiveClass:
+ case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
+ case Stmt::OMPParallelGenericLoopDirectiveClass:
+ case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
case Stmt::ReturnStmtClass:
case Stmt::SEHExceptStmtClass:
case Stmt::SEHFinallyStmtClass:
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp b/contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp
index 85553eccde83..b9ecde6f20a0 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaExpr.cpp
@@ -27,11 +27,13 @@
#include "clang/AST/OperationKinds.h"
#include "clang/AST/ParentMapContext.h"
#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/Type.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/DiagnosticSema.h"
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Basic/SourceManager.h"
+#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TargetInfo.h"
#include "clang/Lex/LiteralSupport.h"
#include "clang/Lex/Preprocessor.h"
@@ -50,8 +52,10 @@
#include "clang/Sema/Template.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringExtras.h"
+#include "llvm/Support/Casting.h"
#include "llvm/Support/ConvertUTF.h"
#include "llvm/Support/SaveAndRestore.h"
+#include "llvm/Support/TypeSize.h"
using namespace clang;
using namespace sema;
@@ -773,6 +777,40 @@ ExprResult Sema::UsualUnaryConversions(Expr *E) {
QualType Ty = E->getType();
assert(!Ty.isNull() && "UsualUnaryConversions - missing type");
+ LangOptions::FPEvalMethodKind EvalMethod = CurFPFeatures.getFPEvalMethod();
+ if (EvalMethod != LangOptions::FEM_Source && Ty->isFloatingType() &&
+ (getLangOpts().getFPEvalMethod() !=
+ LangOptions::FPEvalMethodKind::FEM_UnsetOnCommandLine ||
+ PP.getLastFPEvalPragmaLocation().isValid())) {
+ switch (EvalMethod) {
+ default:
+ llvm_unreachable("Unrecognized float evaluation method");
+ break;
+ case LangOptions::FEM_UnsetOnCommandLine:
+ llvm_unreachable("Float evaluation method should be set by now");
+ break;
+ case LangOptions::FEM_Double:
+ if (Context.getFloatingTypeOrder(Context.DoubleTy, Ty) > 0)
+ // Widen the expression to double.
+ return Ty->isComplexType()
+ ? ImpCastExprToType(E,
+ Context.getComplexType(Context.DoubleTy),
+ CK_FloatingComplexCast)
+ : ImpCastExprToType(E, Context.DoubleTy, CK_FloatingCast);
+ break;
+ case LangOptions::FEM_Extended:
+ if (Context.getFloatingTypeOrder(Context.LongDoubleTy, Ty) > 0)
+ // Widen the expression to long double.
+ return Ty->isComplexType()
+ ? ImpCastExprToType(
+ E, Context.getComplexType(Context.LongDoubleTy),
+ CK_FloatingComplexCast)
+ : ImpCastExprToType(E, Context.LongDoubleTy,
+ CK_FloatingCast);
+ break;
+ }
+ }
+
// Half FP have to be promoted to float unless it is natively supported
if (Ty->isHalfType() && !getLangOpts().NativeHalfType)
return ImpCastExprToType(Res.get(), Context.FloatTy, CK_FloatingCast);
@@ -1615,18 +1653,18 @@ Sema::CreateGenericSelectionExpr(SourceLocation KeyLoc,
ControllingExpr = R.get();
}
+ bool TypeErrorFound = false,
+ IsResultDependent = ControllingExpr->isTypeDependent(),
+ ContainsUnexpandedParameterPack
+ = ControllingExpr->containsUnexpandedParameterPack();
+
// The controlling expression is an unevaluated operand, so side effects are
// likely unintended.
- if (!inTemplateInstantiation() &&
+ if (!inTemplateInstantiation() && !IsResultDependent &&
ControllingExpr->HasSideEffects(Context, false))
Diag(ControllingExpr->getExprLoc(),
diag::warn_side_effects_unevaluated_context);
- bool TypeErrorFound = false,
- IsResultDependent = ControllingExpr->isTypeDependent(),
- ContainsUnexpandedParameterPack
- = ControllingExpr->containsUnexpandedParameterPack();
-
for (unsigned i = 0; i < NumAssocs; ++i) {
if (Exprs[i]->containsUnexpandedParameterPack())
ContainsUnexpandedParameterPack = true;
@@ -1647,6 +1685,34 @@ Sema::CreateGenericSelectionExpr(SourceLocation KeyLoc,
D = diag::err_assoc_type_nonobject;
else if (Types[i]->getType()->isVariablyModifiedType())
D = diag::err_assoc_type_variably_modified;
+ else {
+ // Because the controlling expression undergoes lvalue conversion,
+ // array conversion, and function conversion, an association which is
+ // of array type, function type, or is qualified can never be
+ // reached. We will warn about this so users are less surprised by
+ // the unreachable association. However, we don't have to handle
+ // function types; that's not an object type, so it's handled above.
+ //
+ // The logic is somewhat different for C++ because C++ has different
+ // lvalue to rvalue conversion rules than C. [conv.lvalue]p1 says,
+ // If T is a non-class type, the type of the prvalue is the cv-
+ // unqualified version of T. Otherwise, the type of the prvalue is T.
+ // The result of these rules is that all qualified types in an
+ // association in C are unreachable, and in C++, only qualified non-
+ // class types are unreachable.
+ unsigned Reason = 0;
+ QualType QT = Types[i]->getType();
+ if (QT->isArrayType())
+ Reason = 1;
+ else if (QT.hasQualifiers() &&
+ (!LangOpts.CPlusPlus || !QT->isRecordType()))
+ Reason = 2;
+
+ if (Reason)
+ Diag(Types[i]->getTypeLoc().getBeginLoc(),
+ diag::warn_unreachable_association)
+ << QT << (Reason - 1);
+ }
if (D != 0) {
Diag(Types[i]->getTypeLoc().getBeginLoc(), D)
@@ -1687,10 +1753,14 @@ Sema::CreateGenericSelectionExpr(SourceLocation KeyLoc,
SmallVector<unsigned, 1> CompatIndices;
unsigned DefaultIndex = -1U;
+ // Look at the canonical type of the controlling expression in case it was a
+ // deduced type like __auto_type. However, when issuing diagnostics, use the
+ // type the user wrote in source rather than the canonical one.
for (unsigned i = 0; i < NumAssocs; ++i) {
if (!Types[i])
DefaultIndex = i;
- else if (Context.typesAreCompatible(ControllingExpr->getType(),
+ else if (Context.typesAreCompatible(
+ ControllingExpr->getType().getCanonicalType(),
Types[i]->getType()))
CompatIndices.push_back(i);
}
@@ -1797,7 +1867,7 @@ Sema::ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope) {
StringTokLocs.push_back(Tok.getLocation());
QualType CharTy = Context.CharTy;
- StringLiteral::StringKind Kind = StringLiteral::Ascii;
+ StringLiteral::StringKind Kind = StringLiteral::Ordinary;
if (Literal.isWide()) {
CharTy = Context.getWideCharType();
Kind = StringLiteral::Wide;
@@ -2515,9 +2585,10 @@ Sema::ActOnIdExpression(Scope *S, CXXScopeSpec &SS,
if (R.isAmbiguous())
return ExprError();
- // This could be an implicitly declared function reference (legal in C90,
- // extension in C99, forbidden in C++).
- if (R.empty() && HasTrailingLParen && II && !getLangOpts().CPlusPlus) {
+ // This could be an implicitly declared function reference if the language
+ // mode allows it as a feature.
+ if (R.empty() && HasTrailingLParen && II &&
+ getLangOpts().implicitFunctionsAllowed()) {
NamedDecl *D = ImplicitlyDefineFunction(NameLoc, *II, S);
if (D) R.addDecl(D);
}
@@ -3022,7 +3093,7 @@ Sema::PerformObjectMemberConversion(Expr *From,
QualType QType = QualType(Qualifier->getAsType(), 0);
assert(QType->isRecordType() && "lookup done with non-record type");
- QualType QRecordType = QualType(QType->getAs<RecordType>(), 0);
+ QualType QRecordType = QualType(QType->castAs<RecordType>(), 0);
// In C++98, the qualifier type doesn't actually have to be a base
// type of the object type, in which case we just ignore it.
@@ -3194,8 +3265,12 @@ ExprResult Sema::BuildDeclarationNameExpr(
"Cannot refer unambiguously to a function template");
SourceLocation Loc = NameInfo.getLoc();
- if (CheckDeclInExpr(*this, Loc, D))
- return ExprError();
+ if (CheckDeclInExpr(*this, Loc, D)) {
+ // Recovery from invalid cases (e.g. D is an invalid Decl).
+ // We use the dependent type for the RecoveryExpr to prevent bogus follow-up
+ // diagnostics, as invalid decls use int as a fallback type.
+ return CreateRecoveryExpr(NameInfo.getBeginLoc(), NameInfo.getEndLoc(), {});
+ }
if (TemplateDecl *Template = dyn_cast<TemplateDecl>(D)) {
// Specifically diagnose references to class templates that are missing
@@ -3353,7 +3428,7 @@ ExprResult Sema::BuildDeclarationNameExpr(
case Decl::Function: {
if (unsigned BID = cast<FunctionDecl>(VD)->getBuiltinID()) {
- if (!Context.BuiltinInfo.isPredefinedLibFunction(BID)) {
+ if (!Context.BuiltinInfo.isDirectlyAddressable(BID)) {
type = Context.BuiltinFnTy;
valueKind = VK_PRValue;
break;
@@ -3401,6 +3476,10 @@ ExprResult Sema::BuildDeclarationNameExpr(
valueKind = VK_LValue;
break;
+ case Decl::UnnamedGlobalConstant:
+ valueKind = VK_LValue;
+ break;
+
case Decl::CXXMethod:
// If we're referring to a method with an __unknown_anytype
// result type, make the entire expression __unknown_anytype.
@@ -3489,7 +3568,7 @@ ExprResult Sema::BuildPredefinedExpr(SourceLocation Loc,
ResTy = Context.getConstantArrayType(ResTy, LengthI, nullptr,
ArrayType::Normal,
/*IndexTypeQuals*/ 0);
- SL = StringLiteral::Create(Context, Str, StringLiteral::Ascii,
+ SL = StringLiteral::Create(Context, Str, StringLiteral::Ordinary,
/*Pascal*/ false, ResTy, Loc);
}
}
@@ -3551,6 +3630,8 @@ ExprResult Sema::ActOnCharacterConstant(const Token &Tok, Scope *UDLScope) {
QualType Ty;
if (Literal.isWide())
Ty = Context.WideCharTy; // L'x' -> wchar_t in C and C++.
+ else if (Literal.isUTF8() && getLangOpts().C2x)
+ Ty = Context.UnsignedCharTy; // u8'x' -> unsigned char in C2x
else if (Literal.isUTF8() && getLangOpts().Char8)
Ty = Context.Char8Ty; // u8'x' -> char8_t when it exists.
else if (Literal.isUTF16())
@@ -3560,7 +3641,8 @@ ExprResult Sema::ActOnCharacterConstant(const Token &Tok, Scope *UDLScope) {
else if (!getLangOpts().CPlusPlus || Literal.isMultiChar())
Ty = Context.IntTy; // 'x' -> int in C, 'wxyz' -> int in C++.
else
- Ty = Context.CharTy; // 'x' -> char in C++
+ Ty = Context.CharTy; // 'x' -> char in C++;
+ // u8'x' -> char in C11-C17 and in C++ without char8_t.
CharacterLiteral::CharacterKind Kind = CharacterLiteral::Ascii;
if (Literal.isWide())
@@ -3752,9 +3834,10 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
QualType StrTy = Context.getConstantArrayType(
Context.adjustStringLiteralBaseType(Context.CharTy.withConst()),
llvm::APInt(32, Length + 1), nullptr, ArrayType::Normal, 0);
- Expr *Lit = StringLiteral::Create(
- Context, StringRef(TokSpelling.data(), Length), StringLiteral::Ascii,
- /*Pascal*/false, StrTy, &TokLoc, 1);
+ Expr *Lit =
+ StringLiteral::Create(Context, StringRef(TokSpelling.data(), Length),
+ StringLiteral::Ordinary,
+ /*Pascal*/ false, StrTy, &TokLoc, 1);
return BuildLiteralOperatorCall(R, OpNameInfo, Lit, TokLoc);
}
@@ -3884,9 +3967,27 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
: diag::ext_cxx2b_size_t_suffix
: diag::err_cxx2b_size_t_suffix);
- // Get the value in the widest-possible width.
- unsigned MaxWidth = Context.getTargetInfo().getIntMaxTWidth();
- llvm::APInt ResultVal(MaxWidth, 0);
+ // 'wb/uwb' literals are a C2x feature. We support _BitInt as a type in C++,
+ // but we do not currently support the suffix in C++ mode because it's not
+ // entirely clear whether WG21 will prefer this suffix to return a library
+ // type such as std::bit_int instead of returning a _BitInt.
+ if (Literal.isBitInt && !getLangOpts().CPlusPlus)
+ PP.Diag(Tok.getLocation(), getLangOpts().C2x
+ ? diag::warn_c2x_compat_bitint_suffix
+ : diag::ext_c2x_bitint_suffix);
+
+ // Get the value in the widest-possible width. What is "widest" depends on
+ // whether the literal is a bit-precise integer or not. For a bit-precise
+ // integer type, try to scan the source to determine how many bits are
+ // needed to represent the value. This may seem a bit expensive, but trying
+ // to get the integer value from an overly-wide APInt is *extremely*
+ // expensive, so the naive approach of assuming
+ // llvm::IntegerType::MAX_INT_BITS is a big performance hit.
+ unsigned BitsNeeded =
+ Literal.isBitInt ? llvm::APInt::getSufficientBitsNeeded(
+ Literal.getLiteralDigits(), Literal.getRadix())
+ : Context.getTargetInfo().getIntMaxTWidth();
+ llvm::APInt ResultVal(BitsNeeded, 0);
if (Literal.GetIntegerValue(ResultVal)) {
// If this value didn't fit into uintmax_t, error and force to ull.
@@ -3918,6 +4019,32 @@ ExprResult Sema::ActOnNumericConstant(const Token &Tok, Scope *UDLScope) {
}
}
+ // Bit-precise integer literals are automagically-sized based on the
+ // width required by the literal.
+ if (Literal.isBitInt) {
+ // The signed version has one more bit for the sign value. There are no
+ // zero-width bit-precise integers, even if the literal value is 0.
+ Width = std::max(ResultVal.getActiveBits(), 1u) +
+ (Literal.isUnsigned ? 0u : 1u);
+
+ // Diagnose if the width of the constant is larger than BITINT_MAXWIDTH,
+ // and reset the type to the largest supported width.
+ unsigned int MaxBitIntWidth =
+ Context.getTargetInfo().getMaxBitIntWidth();
+ if (Width > MaxBitIntWidth) {
+ Diag(Tok.getLocation(), diag::err_integer_literal_too_large)
+ << Literal.isUnsigned;
+ Width = MaxBitIntWidth;
+ }
+
+ // Reset the result value to the smaller APInt and select the correct
+ // type to be used. Note, we zext even for signed values because the
+ // literal itself is always an unsigned value (a preceeding - is a
+ // unary operator, not part of the literal).
+ ResultVal = ResultVal.zextOrTrunc(Width);
+ Ty = Context.getBitIntType(Literal.isUnsigned, Width);
+ }
+
// Check C++2b size_t literals.
if (Literal.isSizeT) {
assert(!Literal.MicrosoftInteger &&
@@ -4151,6 +4278,7 @@ bool Sema::CheckUnaryExprOrTypeTraitOperand(Expr *E,
// FIXME: Should we consider instantiation-dependent operands to 'alignof'?
if (IsUnevaluatedOperand && !inTemplateInstantiation() &&
!E->isInstantiationDependent() &&
+ !E->getType()->isVariableArrayType() &&
E->HasSideEffects(Context, false))
Diag(E->getExprLoc(), diag::warn_side_effects_unevaluated_context);
@@ -4431,6 +4559,7 @@ static void captureVariablyModifiedType(ASTContext &Context, QualType T,
case Type::TypeOf:
case Type::UnaryTransform:
case Type::Attributed:
+ case Type::BTFTagAttributed:
case Type::SubstTemplateTypeParm:
case Type::MacroQualified:
// Keep walking after single level desugaring.
@@ -4681,19 +4810,24 @@ static QualType getDependentArraySubscriptType(Expr *LHS, Expr *RHS,
return Result->isDependentType() ? Result : Ctx.DependentTy;
}
-ExprResult
-Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base, SourceLocation lbLoc,
- Expr *idx, SourceLocation rbLoc) {
+static bool checkArgsForPlaceholders(Sema &S, MultiExprArg args);
+
+ExprResult Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base,
+ SourceLocation lbLoc,
+ MultiExprArg ArgExprs,
+ SourceLocation rbLoc) {
+
if (base && !base->getType().isNull() &&
base->hasPlaceholderType(BuiltinType::OMPArraySection))
- return ActOnOMPArraySectionExpr(base, lbLoc, idx, SourceLocation(),
+ return ActOnOMPArraySectionExpr(base, lbLoc, ArgExprs.front(), SourceLocation(),
SourceLocation(), /*Length*/ nullptr,
/*Stride=*/nullptr, rbLoc);
// Since this might be a postfix expression, get rid of ParenListExprs.
if (isa<ParenListExpr>(base)) {
ExprResult result = MaybeConvertParenListExprToParenExpr(S, base);
- if (result.isInvalid()) return ExprError();
+ if (result.isInvalid())
+ return ExprError();
base = result.get();
}
@@ -4721,13 +4855,15 @@ Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base, SourceLocation lbLoc,
// MatrixSubscriptExpr.
auto *matSubscriptE = dyn_cast<MatrixSubscriptExpr>(base);
if (matSubscriptE) {
- if (CheckAndReportCommaError(idx))
+ assert(ArgExprs.size() == 1);
+ if (CheckAndReportCommaError(ArgExprs.front()))
return ExprError();
assert(matSubscriptE->isIncomplete() &&
"base has to be an incomplete matrix subscript");
- return CreateBuiltinMatrixSubscriptExpr(
- matSubscriptE->getBase(), matSubscriptE->getRowIdx(), idx, rbLoc);
+ return CreateBuiltinMatrixSubscriptExpr(matSubscriptE->getBase(),
+ matSubscriptE->getRowIdx(),
+ ArgExprs.front(), rbLoc);
}
// Handle any non-overload placeholder types in the base and index
@@ -4748,32 +4884,42 @@ Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base, SourceLocation lbLoc,
// If the base is a matrix type, try to create a new MatrixSubscriptExpr.
if (base->getType()->isMatrixType()) {
- if (CheckAndReportCommaError(idx))
+ assert(ArgExprs.size() == 1);
+ if (CheckAndReportCommaError(ArgExprs.front()))
return ExprError();
- return CreateBuiltinMatrixSubscriptExpr(base, idx, nullptr, rbLoc);
+ return CreateBuiltinMatrixSubscriptExpr(base, ArgExprs.front(), nullptr,
+ rbLoc);
}
- // A comma-expression as the index is deprecated in C++2a onwards.
- if (getLangOpts().CPlusPlus20 &&
- ((isa<BinaryOperator>(idx) && cast<BinaryOperator>(idx)->isCommaOp()) ||
- (isa<CXXOperatorCallExpr>(idx) &&
- cast<CXXOperatorCallExpr>(idx)->getOperator() == OO_Comma))) {
- Diag(idx->getExprLoc(), diag::warn_deprecated_comma_subscript)
- << SourceRange(base->getBeginLoc(), rbLoc);
+ if (ArgExprs.size() == 1 && getLangOpts().CPlusPlus20) {
+ Expr *idx = ArgExprs[0];
+ if ((isa<BinaryOperator>(idx) && cast<BinaryOperator>(idx)->isCommaOp()) ||
+ (isa<CXXOperatorCallExpr>(idx) &&
+ cast<CXXOperatorCallExpr>(idx)->getOperator() == OO_Comma)) {
+ Diag(idx->getExprLoc(), diag::warn_deprecated_comma_subscript)
+ << SourceRange(base->getBeginLoc(), rbLoc);
+ }
}
- if (idx->getType()->isNonOverloadPlaceholderType()) {
- ExprResult result = CheckPlaceholderExpr(idx);
- if (result.isInvalid()) return ExprError();
- idx = result.get();
+ if (ArgExprs.size() == 1 &&
+ ArgExprs[0]->getType()->isNonOverloadPlaceholderType()) {
+ ExprResult result = CheckPlaceholderExpr(ArgExprs[0]);
+ if (result.isInvalid())
+ return ExprError();
+ ArgExprs[0] = result.get();
+ } else {
+ if (checkArgsForPlaceholders(*this, ArgExprs))
+ return ExprError();
}
// Build an unanalyzed expression if either operand is type-dependent.
- if (getLangOpts().CPlusPlus &&
- (base->isTypeDependent() || idx->isTypeDependent())) {
+ if (getLangOpts().CPlusPlus && ArgExprs.size() == 1 &&
+ (base->isTypeDependent() ||
+ Expr::hasAnyTypeDependentArguments(ArgExprs))) {
return new (Context) ArraySubscriptExpr(
- base, idx, getDependentArraySubscriptType(base, idx, getASTContext()),
+ base, ArgExprs.front(),
+ getDependentArraySubscriptType(base, ArgExprs.front(), getASTContext()),
VK_LValue, OK_Ordinary, rbLoc);
}
@@ -4786,10 +4932,12 @@ Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base, SourceLocation lbLoc,
// indices. In this case, i=p->x[a][b] will be turned into i=p->GetX(a, b),
// and p->x[a][b] = i will be turned into p->PutX(a, b, i);
if (IsMSPropertySubscript) {
+ assert(ArgExprs.size() == 1);
// Build MS property subscript expression if base is MS property reference
// or MS property subscript.
- return new (Context) MSPropertySubscriptExpr(
- base, idx, Context.PseudoObjectTy, VK_LValue, OK_Ordinary, rbLoc);
+ return new (Context)
+ MSPropertySubscriptExpr(base, ArgExprs.front(), Context.PseudoObjectTy,
+ VK_LValue, OK_Ordinary, rbLoc);
}
// Use C++ overloaded-operator rules if either operand has record
@@ -4800,14 +4948,14 @@ Sema::ActOnArraySubscriptExpr(Scope *S, Expr *base, SourceLocation lbLoc,
//
// ObjC pointers have their own subscripting logic that is not tied
// to overload resolution and so should not take this path.
- if (getLangOpts().CPlusPlus &&
- (base->getType()->isRecordType() ||
- (!base->getType()->isObjCObjectPointerType() &&
- idx->getType()->isRecordType()))) {
- return CreateOverloadedArraySubscriptExpr(lbLoc, rbLoc, base, idx);
+ if (getLangOpts().CPlusPlus && !base->getType()->isObjCObjectPointerType() &&
+ ((base->getType()->isRecordType() ||
+ (ArgExprs.size() != 1 || ArgExprs[0]->getType()->isRecordType())))) {
+ return CreateOverloadedArraySubscriptExpr(lbLoc, rbLoc, base, ArgExprs);
}
- ExprResult Res = CreateBuiltinArraySubscriptExpr(base, lbLoc, idx, rbLoc);
+ ExprResult Res =
+ CreateBuiltinArraySubscriptExpr(base, lbLoc, ArgExprs.front(), rbLoc);
if (!Res.isInvalid() && isa<ArraySubscriptExpr>(Res.get()))
CheckSubscriptAccessOfNoDeref(cast<ArraySubscriptExpr>(Res.get()));
@@ -5581,6 +5729,33 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Qualifiers Combined = BaseQuals + MemberQuals;
if (Combined != MemberQuals)
ResultType = Context.getQualifiedType(ResultType, Combined);
+ } else if (LHSTy->isBuiltinType() &&
+ LHSTy->getAs<BuiltinType>()->isVLSTBuiltinType()) {
+ const BuiltinType *BTy = LHSTy->getAs<BuiltinType>();
+ if (BTy->isSVEBool())
+ return ExprError(Diag(LLoc, diag::err_subscript_svbool_t)
+ << LHSExp->getSourceRange() << RHSExp->getSourceRange());
+
+ BaseExpr = LHSExp;
+ IndexExpr = RHSExp;
+ if (getLangOpts().CPlusPlus11 && LHSExp->isPRValue()) {
+ ExprResult Materialized = TemporaryMaterializationConversion(LHSExp);
+ if (Materialized.isInvalid())
+ return ExprError();
+ LHSExp = Materialized.get();
+ }
+ VK = LHSExp->getValueKind();
+ if (VK != VK_PRValue)
+ OK = OK_VectorComponent;
+
+ ResultType = BTy->getSveEltType(Context);
+
+ QualType BaseType = BaseExpr->getType();
+ Qualifiers BaseQuals = BaseType.getQualifiers();
+ Qualifiers MemberQuals = ResultType.getQualifiers();
+ Qualifiers Combined = BaseQuals + MemberQuals;
+ if (Combined != MemberQuals)
+ ResultType = Context.getQualifiedType(ResultType, Combined);
} else if (LHSTy->isArrayType()) {
// If we see an array that wasn't promoted by
// DefaultFunctionArrayLvalueConversion, it must be an array that
@@ -6397,6 +6572,37 @@ tryImplicitlyCaptureThisIfImplicitMemberFunctionAccessWithDependentArgs(
}
}
+// Once a call is fully resolved, warn for unqualified calls to specific
+// C++ standard functions, like move and forward.
+static void DiagnosedUnqualifiedCallsToStdFunctions(Sema &S, CallExpr *Call) {
+ // We are only checking unary move and forward so exit early here.
+ if (Call->getNumArgs() != 1)
+ return;
+
+ Expr *E = Call->getCallee()->IgnoreParenImpCasts();
+ if (!E || isa<UnresolvedLookupExpr>(E))
+ return;
+ DeclRefExpr *DRE = dyn_cast_or_null<DeclRefExpr>(E);
+ if (!DRE || !DRE->getLocation().isValid())
+ return;
+
+ if (DRE->getQualifier())
+ return;
+
+ const FunctionDecl *FD = Call->getDirectCallee();
+ if (!FD)
+ return;
+
+ // Only warn for some functions deemed more frequent or problematic.
+ unsigned BuiltinID = FD->getBuiltinID();
+ if (BuiltinID != Builtin::BImove && BuiltinID != Builtin::BIforward)
+ return;
+
+ S.Diag(DRE->getLocation(), diag::warn_unqualified_call_to_std_cast_function)
+ << FD->getQualifiedNameAsString()
+ << FixItHint::CreateInsertion(DRE->getLocation(), "std::");
+}
+
ExprResult Sema::ActOnCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig) {
@@ -6421,7 +6627,11 @@ ExprResult Sema::ActOnCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
if (LangOpts.OpenMP)
Call = ActOnOpenMPCall(Call, Scope, LParenLoc, ArgExprs, RParenLoc,
ExecConfig);
-
+ if (LangOpts.CPlusPlus) {
+ CallExpr *CE = dyn_cast<CallExpr>(Call.get());
+ if (CE)
+ DiagnosedUnqualifiedCallsToStdFunctions(*this, CE);
+ }
return Call;
}
@@ -6582,9 +6792,9 @@ ExprResult Sema::BuildCallExpr(Scope *Scope, Expr *Fn, SourceLocation LParenLoc,
auto ArgAS = ArgPtTy.getAddressSpace();
// Add address space cast if target address spaces are different
- bool NeedImplicitASC =
+ bool NeedImplicitASC =
ParamAS != LangAS::Default && // Pointer params in generic AS don't need special handling.
- ( ArgAS == LangAS::Default || // We do allow implicit conversion from generic AS
+ ( ArgAS == LangAS::Default || // We do allow implicit conversion from generic AS
// or from specific AS which has target AS matching that of Param.
getASTContext().getTargetAddressSpace(ArgAS) == getASTContext().getTargetAddressSpace(ParamAS));
if (!NeedImplicitASC)
@@ -6903,6 +7113,23 @@ ExprResult Sema::BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl,
Proto = FDecl->getType()->getAs<FunctionProtoType>();
}
+ // If we still haven't found a prototype to use but there are arguments to
+ // the call, diagnose this as calling a function without a prototype.
+ // However, if we found a function declaration, check to see if
+ // -Wdeprecated-non-prototype was disabled where the function was declared.
+ // If so, we will silence the diagnostic here on the assumption that this
+ // interface is intentional and the user knows what they're doing. We will
+ // also silence the diagnostic if there is a function declaration but it
+ // was implicitly defined (the user already gets diagnostics about the
+ // creation of the implicit function declaration, so the additional warning
+ // is not helpful).
+ if (!Proto && !Args.empty() &&
+ (!FDecl || (!FDecl->isImplicit() &&
+ !Diags.isIgnored(diag::warn_strict_uses_without_prototype,
+ FDecl->getLocation()))))
+ Diag(LParenLoc, diag::warn_strict_uses_without_prototype)
+ << (FDecl != nullptr) << FDecl;
+
// Promote the arguments (C99 6.5.2.2p6).
for (unsigned i = 0, e = Args.size(); i != e; i++) {
Expr *Arg = Args[i];
@@ -7507,6 +7734,36 @@ bool Sema::areVectorTypesSameSize(QualType SrcTy, QualType DestTy) {
return (SrcLen * SrcEltSize == DestLen * DestEltSize);
}
+// This returns true if at least one of the types is an altivec vector.
+bool Sema::anyAltivecTypes(QualType SrcTy, QualType DestTy) {
+ assert((DestTy->isVectorType() || SrcTy->isVectorType()) &&
+ "expected at least one type to be a vector here");
+
+ bool IsSrcTyAltivec =
+ SrcTy->isVectorType() && (SrcTy->castAs<VectorType>()->getVectorKind() ==
+ VectorType::AltiVecVector);
+ bool IsDestTyAltivec = DestTy->isVectorType() &&
+ (DestTy->castAs<VectorType>()->getVectorKind() ==
+ VectorType::AltiVecVector);
+
+ return (IsSrcTyAltivec || IsDestTyAltivec);
+}
+
+// This returns true if both vectors have the same element type.
+bool Sema::areSameVectorElemTypes(QualType SrcTy, QualType DestTy) {
+ assert((DestTy->isVectorType() || SrcTy->isVectorType()) &&
+ "expected at least one type to be a vector here");
+
+ uint64_t SrcLen, DestLen;
+ QualType SrcEltTy, DestEltTy;
+ if (!breakDownVectorType(SrcTy, SrcLen, SrcEltTy))
+ return false;
+ if (!breakDownVectorType(DestTy, DestLen, DestEltTy))
+ return false;
+
+ return (SrcEltTy == DestEltTy);
+}
+
/// Are the two types lax-compatible vector types? That is, given
/// that one of them is a vector, do they have equal storage sizes,
/// where the storage size is the number of elements times the element
@@ -8316,11 +8573,17 @@ OpenCLCheckVectorConditional(Sema &S, ExprResult &Cond,
// result as specified in OpenCL v1.1 s6.3.i.
if (LHS.get()->getType()->isVectorType() ||
RHS.get()->getType()->isVectorType()) {
- QualType VecResTy = S.CheckVectorOperands(LHS, RHS, QuestionLoc,
- /*isCompAssign*/false,
- /*AllowBothBool*/true,
- /*AllowBoolConversions*/false);
- if (VecResTy.isNull()) return QualType();
+ bool IsBoolVecLang =
+ !S.getLangOpts().OpenCL && !S.getLangOpts().OpenCLCPlusPlus;
+ QualType VecResTy =
+ S.CheckVectorOperands(LHS, RHS, QuestionLoc,
+ /*isCompAssign*/ false,
+ /*AllowBothBool*/ true,
+ /*AllowBoolConversions*/ false,
+ /*AllowBooleanOperation*/ IsBoolVecLang,
+ /*ReportInvalid*/ true);
+ if (VecResTy.isNull())
+ return QualType();
// The result type must match the condition type as specified in
// OpenCL v1.1 s6.11.6.
if (checkVectorResult(S, CondTy, VecResTy, QuestionLoc))
@@ -8393,9 +8656,11 @@ QualType Sema::CheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
// Now check the two expressions.
if (LHS.get()->getType()->isVectorType() ||
RHS.get()->getType()->isVectorType())
- return CheckVectorOperands(LHS, RHS, QuestionLoc, /*isCompAssign*/false,
- /*AllowBothBool*/true,
- /*AllowBoolConversions*/false);
+ return CheckVectorOperands(LHS, RHS, QuestionLoc, /*isCompAssign*/ false,
+ /*AllowBothBool*/ true,
+ /*AllowBoolConversions*/ false,
+ /*AllowBooleanOperation*/ false,
+ /*ReportInvalid*/ true);
QualType ResTy =
UsualArithmeticConversions(LHS, RHS, QuestionLoc, ACK_Conditional);
@@ -9253,6 +9518,15 @@ Sema::CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS,
return Compatible;
}
+ // If the LHS has an __auto_type, there are no additional type constraints
+ // to be worried about.
+ if (const auto *AT = dyn_cast<AutoType>(LHSType)) {
+ if (AT->isGNUAutoType()) {
+ Kind = CK_NoOp;
+ return Compatible;
+ }
+ }
+
// If we have an atomic type, try a non-atomic assignment, then just add an
// atomic qualification step.
if (const AtomicType *AtomicTy = dyn_cast<AtomicType>(LHSType)) {
@@ -9309,6 +9583,13 @@ Sema::CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS,
// vectors, the total size only needs to be the same. This is a bitcast;
// no bits are changed but the result type is different.
if (isLaxVectorConversion(RHSType, LHSType)) {
+ // The default for lax vector conversions with Altivec vectors will
+ // change, so if we are converting between vector types where
+ // at least one is an Altivec vector, emit a warning.
+ if (anyAltivecTypes(RHSType, LHSType) &&
+ !areSameVectorElemTypes(RHSType, LHSType))
+ Diag(RHS.get()->getExprLoc(), diag::warn_deprecated_lax_vec_conv_all)
+ << RHSType << LHSType;
Kind = CK_BitCast;
return IncompatibleVectors;
}
@@ -9322,6 +9603,9 @@ Sema::CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS,
const VectorType *VecType = RHSType->getAs<VectorType>();
if (VecType && VecType->getNumElements() == 1 &&
isLaxVectorConversion(RHSType, LHSType)) {
+ if (VecType->getVectorKind() == VectorType::AltiVecVector)
+ Diag(RHS.get()->getExprLoc(), diag::warn_deprecated_lax_vec_conv_all)
+ << RHSType << LHSType;
ExprResult *VecExpr = &RHS;
*VecExpr = ImpCastExprToType(VecExpr->get(), LHSType, CK_BitCast);
Kind = CK_BitCast;
@@ -9914,9 +10198,11 @@ static bool tryVectorConvertAndSplat(Sema &S, ExprResult *scalar,
static ExprResult convertVector(Expr *E, QualType ElementType, Sema &S) {
const auto *VecTy = E->getType()->getAs<VectorType>();
assert(VecTy && "Expression E must be a vector");
- QualType NewVecTy = S.Context.getVectorType(ElementType,
- VecTy->getNumElements(),
- VecTy->getVectorKind());
+ QualType NewVecTy =
+ VecTy->isExtVectorType()
+ ? S.Context.getExtVectorType(ElementType, VecTy->getNumElements())
+ : S.Context.getVectorType(ElementType, VecTy->getNumElements(),
+ VecTy->getVectorKind());
// Look through the implicit cast. Return the subexpression if its type is
// NewVecTy.
@@ -10016,12 +10302,18 @@ static bool tryGCCVectorConvertAndSplat(Sema &S, ExprResult *Scalar,
ExprResult *Vector) {
QualType ScalarTy = Scalar->get()->getType().getUnqualifiedType();
QualType VectorTy = Vector->get()->getType().getUnqualifiedType();
- const VectorType *VT = VectorTy->getAs<VectorType>();
-
- assert(!isa<ExtVectorType>(VT) &&
- "ExtVectorTypes should not be handled here!");
-
- QualType VectorEltTy = VT->getElementType();
+ QualType VectorEltTy;
+
+ if (const auto *VT = VectorTy->getAs<VectorType>()) {
+ assert(!isa<ExtVectorType>(VT) &&
+ "ExtVectorTypes should not be handled here!");
+ VectorEltTy = VT->getElementType();
+ } else if (VectorTy->isVLSTBuiltinType()) {
+ VectorEltTy =
+ VectorTy->castAs<BuiltinType>()->getSveEltType(S.getASTContext());
+ } else {
+ llvm_unreachable("Only Fixed-Length and SVE Vector types are handled here");
+ }
// Reject cases where the vector element type or the scalar element type are
// not integral or floating point types.
@@ -10103,7 +10395,9 @@ static bool tryGCCVectorConvertAndSplat(Sema &S, ExprResult *Scalar,
QualType Sema::CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool,
- bool AllowBoolConversions) {
+ bool AllowBoolConversions,
+ bool AllowBoolOperation,
+ bool ReportInvalid) {
if (!IsCompAssign) {
LHS = DefaultFunctionArrayLvalueConversion(LHS.get());
if (LHS.isInvalid())
@@ -10124,14 +10418,19 @@ QualType Sema::CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
if ((LHSVecType && LHSVecType->getElementType()->isBFloat16Type()) ||
(RHSVecType && RHSVecType->getElementType()->isBFloat16Type()))
- return InvalidOperands(Loc, LHS, RHS);
+ return ReportInvalid ? InvalidOperands(Loc, LHS, RHS) : QualType();
// AltiVec-style "vector bool op vector bool" combinations are allowed
// for some operators but not others.
if (!AllowBothBool &&
LHSVecType && LHSVecType->getVectorKind() == VectorType::AltiVecBool &&
RHSVecType && RHSVecType->getVectorKind() == VectorType::AltiVecBool)
- return InvalidOperands(Loc, LHS, RHS);
+ return ReportInvalid ? InvalidOperands(Loc, LHS, RHS) : QualType();
+
+ // This operation may not be performed on boolean vectors.
+ if (!AllowBoolOperation &&
+ (LHSType->isExtVectorBoolType() || RHSType->isExtVectorBoolType()))
+ return ReportInvalid ? InvalidOperands(Loc, LHS, RHS) : QualType();
// If the vector types are identical, return.
if (Context.hasSameType(LHSType, RHSType))
@@ -10245,6 +10544,9 @@ QualType Sema::CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
QualType OtherType = LHSVecType ? RHSType : LHSType;
ExprResult *OtherExpr = LHSVecType ? &RHS : &LHS;
if (isLaxVectorConversion(OtherType, VecType)) {
+ if (anyAltivecTypes(RHSType, LHSType) &&
+ !areSameVectorElemTypes(RHSType, LHSType))
+ Diag(Loc, diag::warn_deprecated_lax_vec_conv_all) << RHSType << LHSType;
// If we're allowing lax vector conversions, only the total (data) size
// needs to be the same. For non compound assignment, if one of the types is
// scalar, the result is always the vector type.
@@ -10310,6 +10612,81 @@ QualType Sema::CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
return QualType();
}
+QualType Sema::CheckSizelessVectorOperands(ExprResult &LHS, ExprResult &RHS,
+ SourceLocation Loc,
+ bool IsCompAssign,
+ ArithConvKind OperationKind) {
+ if (!IsCompAssign) {
+ LHS = DefaultFunctionArrayLvalueConversion(LHS.get());
+ if (LHS.isInvalid())
+ return QualType();
+ }
+ RHS = DefaultFunctionArrayLvalueConversion(RHS.get());
+ if (RHS.isInvalid())
+ return QualType();
+
+ QualType LHSType = LHS.get()->getType().getUnqualifiedType();
+ QualType RHSType = RHS.get()->getType().getUnqualifiedType();
+
+ const BuiltinType *LHSBuiltinTy = LHSType->getAs<BuiltinType>();
+ const BuiltinType *RHSBuiltinTy = RHSType->getAs<BuiltinType>();
+
+ unsigned DiagID = diag::err_typecheck_invalid_operands;
+ if ((OperationKind == ACK_Arithmetic) &&
+ ((LHSBuiltinTy && LHSBuiltinTy->isSVEBool()) ||
+ (RHSBuiltinTy && RHSBuiltinTy->isSVEBool()))) {
+ Diag(Loc, DiagID) << LHSType << RHSType << LHS.get()->getSourceRange()
+ << RHS.get()->getSourceRange();
+ return QualType();
+ }
+
+ if (Context.hasSameType(LHSType, RHSType))
+ return LHSType;
+
+ if (LHSType->isVLSTBuiltinType() && !RHSType->isVLSTBuiltinType()) {
+ if (!tryGCCVectorConvertAndSplat(*this, &RHS, &LHS))
+ return LHSType;
+ }
+ if (RHSType->isVLSTBuiltinType() && !LHSType->isVLSTBuiltinType()) {
+ if (LHS.get()->isLValue() ||
+ !tryGCCVectorConvertAndSplat(*this, &LHS, &RHS))
+ return RHSType;
+ }
+
+ if ((!LHSType->isVLSTBuiltinType() && !LHSType->isRealType()) ||
+ (!RHSType->isVLSTBuiltinType() && !RHSType->isRealType())) {
+ Diag(Loc, diag::err_typecheck_vector_not_convertable_non_scalar)
+ << LHSType << RHSType << LHS.get()->getSourceRange()
+ << RHS.get()->getSourceRange();
+ return QualType();
+ }
+
+ if (LHSType->isVLSTBuiltinType() && RHSType->isVLSTBuiltinType() &&
+ Context.getBuiltinVectorTypeInfo(LHSBuiltinTy).EC !=
+ Context.getBuiltinVectorTypeInfo(RHSBuiltinTy).EC) {
+ Diag(Loc, diag::err_typecheck_vector_lengths_not_equal)
+ << LHSType << RHSType << LHS.get()->getSourceRange()
+ << RHS.get()->getSourceRange();
+ return QualType();
+ }
+
+ if (LHSType->isVLSTBuiltinType() || RHSType->isVLSTBuiltinType()) {
+ QualType Scalar = LHSType->isVLSTBuiltinType() ? RHSType : LHSType;
+ QualType Vector = LHSType->isVLSTBuiltinType() ? LHSType : RHSType;
+ bool ScalarOrVector =
+ LHSType->isVLSTBuiltinType() && RHSType->isVLSTBuiltinType();
+
+ Diag(Loc, diag::err_typecheck_vector_not_convertable_implict_truncation)
+ << ScalarOrVector << Scalar << Vector;
+
+ return QualType();
+ }
+
+ Diag(Loc, DiagID) << LHSType << RHSType << LHS.get()->getSourceRange()
+ << RHS.get()->getSourceRange();
+ return QualType();
+}
+
// checkArithmeticNull - Detect when a NULL constant is used improperly in an
// expression. These are mainly cases where the null pointer is used as an
// integer instead of a pointer.
@@ -10419,8 +10796,13 @@ QualType Sema::CheckMultiplyDivideOperands(ExprResult &LHS, ExprResult &RHS,
QualType RHSTy = RHS.get()->getType();
if (LHSTy->isVectorType() || RHSTy->isVectorType())
return CheckVectorOperands(LHS, RHS, Loc, IsCompAssign,
- /*AllowBothBool*/getLangOpts().AltiVec,
- /*AllowBoolConversions*/false);
+ /*AllowBothBool*/ getLangOpts().AltiVec,
+ /*AllowBoolConversions*/ false,
+ /*AllowBooleanOperation*/ false,
+ /*ReportInvalid*/ true);
+ if (LHSTy->isVLSTBuiltinType() || RHSTy->isVLSTBuiltinType())
+ return CheckSizelessVectorOperands(LHS, RHS, Loc, IsCompAssign,
+ ACK_Arithmetic);
if (!IsDiv &&
(LHSTy->isConstantMatrixType() || RHSTy->isConstantMatrixType()))
return CheckMatrixMultiplyOperands(LHS, RHS, Loc, IsCompAssign);
@@ -10453,8 +10835,20 @@ QualType Sema::CheckRemainderOperands(
if (LHS.get()->getType()->hasIntegerRepresentation() &&
RHS.get()->getType()->hasIntegerRepresentation())
return CheckVectorOperands(LHS, RHS, Loc, IsCompAssign,
- /*AllowBothBool*/getLangOpts().AltiVec,
- /*AllowBoolConversions*/false);
+ /*AllowBothBool*/ getLangOpts().AltiVec,
+ /*AllowBoolConversions*/ false,
+ /*AllowBooleanOperation*/ false,
+ /*ReportInvalid*/ true);
+ return InvalidOperands(Loc, LHS, RHS);
+ }
+
+ if (LHS.get()->getType()->isVLSTBuiltinType() ||
+ RHS.get()->getType()->isVLSTBuiltinType()) {
+ if (LHS.get()->getType()->hasIntegerRepresentation() &&
+ RHS.get()->getType()->hasIntegerRepresentation())
+ return CheckSizelessVectorOperands(LHS, RHS, Loc, IsCompAssign,
+ ACK_Arithmetic);
+
return InvalidOperands(Loc, LHS, RHS);
}
@@ -10515,8 +10909,10 @@ static void diagnoseSubtractionOnNullPointer(Sema &S, SourceLocation Loc,
if (S.Diags.getSuppressSystemWarnings() && S.SourceMgr.isInSystemMacro(Loc))
return;
- S.Diag(Loc, diag::warn_pointer_sub_null_ptr)
- << S.getLangOpts().CPlusPlus << Pointer->getSourceRange();
+ S.DiagRuntimeBehavior(Loc, Pointer,
+ S.PDiag(diag::warn_pointer_sub_null_ptr)
+ << S.getLangOpts().CPlusPlus
+ << Pointer->getSourceRange());
}
/// Diagnose invalid arithmetic on two function pointers.
@@ -10755,14 +11151,25 @@ QualType Sema::CheckAdditionOperands(ExprResult &LHS, ExprResult &RHS,
if (LHS.get()->getType()->isVectorType() ||
RHS.get()->getType()->isVectorType()) {
- QualType compType = CheckVectorOperands(
- LHS, RHS, Loc, CompLHSTy,
- /*AllowBothBool*/getLangOpts().AltiVec,
- /*AllowBoolConversions*/getLangOpts().ZVector);
+ QualType compType =
+ CheckVectorOperands(LHS, RHS, Loc, CompLHSTy,
+ /*AllowBothBool*/ getLangOpts().AltiVec,
+ /*AllowBoolConversions*/ getLangOpts().ZVector,
+ /*AllowBooleanOperation*/ false,
+ /*ReportInvalid*/ true);
if (CompLHSTy) *CompLHSTy = compType;
return compType;
}
+ if (LHS.get()->getType()->isVLSTBuiltinType() ||
+ RHS.get()->getType()->isVLSTBuiltinType()) {
+ QualType compType =
+ CheckSizelessVectorOperands(LHS, RHS, Loc, CompLHSTy, ACK_Arithmetic);
+ if (CompLHSTy)
+ *CompLHSTy = compType;
+ return compType;
+ }
+
if (LHS.get()->getType()->isConstantMatrixType() ||
RHS.get()->getType()->isConstantMatrixType()) {
QualType compType =
@@ -10859,14 +11266,25 @@ QualType Sema::CheckSubtractionOperands(ExprResult &LHS, ExprResult &RHS,
if (LHS.get()->getType()->isVectorType() ||
RHS.get()->getType()->isVectorType()) {
- QualType compType = CheckVectorOperands(
- LHS, RHS, Loc, CompLHSTy,
- /*AllowBothBool*/getLangOpts().AltiVec,
- /*AllowBoolConversions*/getLangOpts().ZVector);
+ QualType compType =
+ CheckVectorOperands(LHS, RHS, Loc, CompLHSTy,
+ /*AllowBothBool*/ getLangOpts().AltiVec,
+ /*AllowBoolConversions*/ getLangOpts().ZVector,
+ /*AllowBooleanOperation*/ false,
+ /*ReportInvalid*/ true);
if (CompLHSTy) *CompLHSTy = compType;
return compType;
}
+ if (LHS.get()->getType()->isVLSTBuiltinType() ||
+ RHS.get()->getType()->isVLSTBuiltinType()) {
+ QualType compType =
+ CheckSizelessVectorOperands(LHS, RHS, Loc, CompLHSTy, ACK_Arithmetic);
+ if (CompLHSTy)
+ *CompLHSTy = compType;
+ return compType;
+ }
+
if (LHS.get()->getType()->isConstantMatrixType() ||
RHS.get()->getType()->isConstantMatrixType()) {
QualType compType =
@@ -11042,10 +11460,16 @@ static void DiagnoseBadShiftValues(Sema& S, ExprResult &LHS, ExprResult &RHS,
return;
llvm::APSInt Left = LHSResult.Val.getInt();
- // If LHS does not have a signed type and non-negative value
- // then, the behavior is undefined before C++2a. Warn about it.
- if (Left.isNegative() && !S.getLangOpts().isSignedOverflowDefined() &&
- !S.getLangOpts().CPlusPlus20) {
+ // Don't warn if signed overflow is defined, then all the rest of the
+ // diagnostics will not be triggered because the behavior is defined.
+ // Also don't warn in C++20 mode (and newer), as signed left shifts
+ // always wrap and never overflow.
+ if (S.getLangOpts().isSignedOverflowDefined() || S.getLangOpts().CPlusPlus20)
+ return;
+
+ // If LHS does not have a non-negative value then, the
+ // behavior is undefined before C++2a. Warn about it.
+ if (Left.isNegative()) {
S.DiagRuntimeBehavior(Loc, LHS.get(),
S.PDiag(diag::warn_shift_lhs_negative)
<< LHS.get()->getSourceRange());
@@ -11113,6 +11537,15 @@ static QualType checkVectorShift(Sema &S, ExprResult &LHS, ExprResult &RHS,
const VectorType *RHSVecTy = RHSType->getAs<VectorType>();
QualType RHSEleType = RHSVecTy ? RHSVecTy->getElementType() : RHSType;
+ // Do not allow shifts for boolean vectors.
+ if ((LHSVecTy && LHSVecTy->isExtVectorBoolType()) ||
+ (RHSVecTy && RHSVecTy->isExtVectorBoolType())) {
+ S.Diag(Loc, diag::err_typecheck_invalid_operands)
+ << LHS.get()->getType() << RHS.get()->getType()
+ << LHS.get()->getSourceRange();
+ return QualType();
+ }
+
// The operands need to be integers.
if (!LHSEleType->isIntegerType()) {
S.Diag(Loc, diag::err_typecheck_expect_int)
@@ -11168,6 +11601,97 @@ static QualType checkVectorShift(Sema &S, ExprResult &LHS, ExprResult &RHS,
return LHSType;
}
+static QualType checkSizelessVectorShift(Sema &S, ExprResult &LHS,
+ ExprResult &RHS, SourceLocation Loc,
+ bool IsCompAssign) {
+ if (!IsCompAssign) {
+ LHS = S.UsualUnaryConversions(LHS.get());
+ if (LHS.isInvalid())
+ return QualType();
+ }
+
+ RHS = S.UsualUnaryConversions(RHS.get());
+ if (RHS.isInvalid())
+ return QualType();
+
+ QualType LHSType = LHS.get()->getType();
+ const BuiltinType *LHSBuiltinTy = LHSType->getAs<BuiltinType>();
+ QualType LHSEleType = LHSType->isVLSTBuiltinType()
+ ? LHSBuiltinTy->getSveEltType(S.getASTContext())
+ : LHSType;
+
+ // Note that RHS might not be a vector
+ QualType RHSType = RHS.get()->getType();
+ const BuiltinType *RHSBuiltinTy = RHSType->getAs<BuiltinType>();
+ QualType RHSEleType = RHSType->isVLSTBuiltinType()
+ ? RHSBuiltinTy->getSveEltType(S.getASTContext())
+ : RHSType;
+
+ if ((LHSBuiltinTy && LHSBuiltinTy->isSVEBool()) ||
+ (RHSBuiltinTy && RHSBuiltinTy->isSVEBool())) {
+ S.Diag(Loc, diag::err_typecheck_invalid_operands)
+ << LHSType << RHSType << LHS.get()->getSourceRange();
+ return QualType();
+ }
+
+ if (!LHSEleType->isIntegerType()) {
+ S.Diag(Loc, diag::err_typecheck_expect_int)
+ << LHS.get()->getType() << LHS.get()->getSourceRange();
+ return QualType();
+ }
+
+ if (!RHSEleType->isIntegerType()) {
+ S.Diag(Loc, diag::err_typecheck_expect_int)
+ << RHS.get()->getType() << RHS.get()->getSourceRange();
+ return QualType();
+ }
+
+ if (LHSType->isVLSTBuiltinType() && RHSType->isVLSTBuiltinType() &&
+ (S.Context.getBuiltinVectorTypeInfo(LHSBuiltinTy).EC !=
+ S.Context.getBuiltinVectorTypeInfo(RHSBuiltinTy).EC)) {
+ S.Diag(Loc, diag::err_typecheck_invalid_operands)
+ << LHSType << RHSType << LHS.get()->getSourceRange()
+ << RHS.get()->getSourceRange();
+ return QualType();
+ }
+
+ if (!LHSType->isVLSTBuiltinType()) {
+ assert(RHSType->isVLSTBuiltinType());
+ if (IsCompAssign)
+ return RHSType;
+ if (LHSEleType != RHSEleType) {
+ LHS = S.ImpCastExprToType(LHS.get(), RHSEleType, clang::CK_IntegralCast);
+ LHSEleType = RHSEleType;
+ }
+ const llvm::ElementCount VecSize =
+ S.Context.getBuiltinVectorTypeInfo(RHSBuiltinTy).EC;
+ QualType VecTy =
+ S.Context.getScalableVectorType(LHSEleType, VecSize.getKnownMinValue());
+ LHS = S.ImpCastExprToType(LHS.get(), VecTy, clang::CK_VectorSplat);
+ LHSType = VecTy;
+ } else if (RHSBuiltinTy && RHSBuiltinTy->isVLSTBuiltinType()) {
+ if (S.Context.getTypeSize(RHSBuiltinTy) !=
+ S.Context.getTypeSize(LHSBuiltinTy)) {
+ S.Diag(Loc, diag::err_typecheck_vector_lengths_not_equal)
+ << LHSType << RHSType << LHS.get()->getSourceRange()
+ << RHS.get()->getSourceRange();
+ return QualType();
+ }
+ } else {
+ const llvm::ElementCount VecSize =
+ S.Context.getBuiltinVectorTypeInfo(LHSBuiltinTy).EC;
+ if (LHSEleType != RHSEleType) {
+ RHS = S.ImpCastExprToType(RHS.get(), LHSEleType, clang::CK_IntegralCast);
+ RHSEleType = LHSEleType;
+ }
+ QualType VecTy =
+ S.Context.getScalableVectorType(RHSEleType, VecSize.getKnownMinValue());
+ RHS = S.ImpCastExprToType(RHS.get(), VecTy, CK_VectorSplat);
+ }
+
+ return LHSType;
+}
+
// C99 6.5.7
QualType Sema::CheckShiftOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, BinaryOperatorKind Opc,
@@ -11191,6 +11715,10 @@ QualType Sema::CheckShiftOperands(ExprResult &LHS, ExprResult &RHS,
return checkVectorShift(*this, LHS, RHS, Loc, IsCompAssign);
}
+ if (LHS.get()->getType()->isVLSTBuiltinType() ||
+ RHS.get()->getType()->isVLSTBuiltinType())
+ return checkSizelessVectorShift(*this, LHS, RHS, Loc, IsCompAssign);
+
// Shifts don't perform usual arithmetic conversions, they just do integer
// promotions on each operand. C99 6.5.7p3
@@ -11796,7 +12324,7 @@ static QualType checkArithmeticOrEnumeralCompare(Sema &S, ExprResult &LHS,
// Check for comparisons of floating point operands using != and ==.
if (Type->hasFloatingRepresentation() && BinaryOperator::isEqualityOp(Opc))
- S.CheckFloatComparison(Loc, LHS.get(), RHS.get());
+ S.CheckFloatComparison(Loc, LHS.get(), RHS.get(), Opc);
// The result of comparisons is 'bool' in C++, 'int' in C.
return S.Context.getLogicalOperationType();
@@ -11872,6 +12400,10 @@ QualType Sema::CheckCompareOperands(ExprResult &LHS, ExprResult &RHS,
RHS.get()->getType()->isVectorType())
return CheckVectorCompareOperands(LHS, RHS, Loc, Opc);
+ if (LHS.get()->getType()->isVLSTBuiltinType() ||
+ RHS.get()->getType()->isVLSTBuiltinType())
+ return CheckSizelessVectorCompareOperands(LHS, RHS, Loc, Opc);
+
diagnoseLogicalNotOnLHSofCheck(*this, LHS, RHS, Loc, Opc);
diagnoseTautologicalComparison(*this, Loc, LHS.get(), RHS.get(), Opc);
@@ -12295,6 +12827,8 @@ QualType Sema::GetSignedVectorType(QualType V) {
unsigned TypeSize = Context.getTypeSize(VTy->getElementType());
if (isa<ExtVectorType>(VTy)) {
+ if (VTy->isExtVectorBoolType())
+ return Context.getExtVectorType(Context.BoolTy, VTy->getNumElements());
if (TypeSize == Context.getTypeSize(Context.CharTy))
return Context.getExtVectorType(Context.CharTy, VTy->getNumElements());
if (TypeSize == Context.getTypeSize(Context.ShortTy))
@@ -12331,6 +12865,18 @@ QualType Sema::GetSignedVectorType(QualType V) {
VectorType::GenericVector);
}
+QualType Sema::GetSignedSizelessVectorType(QualType V) {
+ const BuiltinType *VTy = V->castAs<BuiltinType>();
+ assert(VTy->isSizelessBuiltinType() && "expected sizeless type");
+
+ const QualType ETy = V->getSveEltType(Context);
+ const auto TypeSize = Context.getTypeSize(ETy);
+
+ const QualType IntTy = Context.getIntTypeForBitwidth(TypeSize, true);
+ const llvm::ElementCount VecSize = Context.getBuiltinVectorTypeInfo(VTy).EC;
+ return Context.getScalableVectorType(IntTy, VecSize.getKnownMinValue());
+}
+
/// CheckVectorCompareOperands - vector comparisons are a clang extension that
/// operates on extended vector types. Instead of producing an IntTy result,
/// like a scalar comparison, a vector comparison produces a vector of integer
@@ -12345,9 +12891,12 @@ QualType Sema::CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
// Check to make sure we're operating on vectors of the same type and width,
// Allowing one side to be a scalar of element type.
- QualType vType = CheckVectorOperands(LHS, RHS, Loc, /*isCompAssign*/false,
- /*AllowBothBool*/true,
- /*AllowBoolConversions*/getLangOpts().ZVector);
+ QualType vType =
+ CheckVectorOperands(LHS, RHS, Loc, /*isCompAssign*/ false,
+ /*AllowBothBool*/ true,
+ /*AllowBoolConversions*/ getLangOpts().ZVector,
+ /*AllowBooleanOperation*/ true,
+ /*ReportInvalid*/ true);
if (vType.isNull())
return vType;
@@ -12387,13 +12936,55 @@ QualType Sema::CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
if (BinaryOperator::isEqualityOp(Opc) &&
LHSType->hasFloatingRepresentation()) {
assert(RHS.get()->getType()->hasFloatingRepresentation());
- CheckFloatComparison(Loc, LHS.get(), RHS.get());
+ CheckFloatComparison(Loc, LHS.get(), RHS.get(), Opc);
}
// Return a signed type for the vector.
return GetSignedVectorType(vType);
}
+QualType Sema::CheckSizelessVectorCompareOperands(ExprResult &LHS,
+ ExprResult &RHS,
+ SourceLocation Loc,
+ BinaryOperatorKind Opc) {
+ if (Opc == BO_Cmp) {
+ Diag(Loc, diag::err_three_way_vector_comparison);
+ return QualType();
+ }
+
+ // Check to make sure we're operating on vectors of the same type and width,
+ // Allowing one side to be a scalar of element type.
+ QualType vType = CheckSizelessVectorOperands(
+ LHS, RHS, Loc, /*isCompAssign*/ false, ACK_Comparison);
+
+ if (vType.isNull())
+ return vType;
+
+ QualType LHSType = LHS.get()->getType();
+
+ // For non-floating point types, check for self-comparisons of the form
+ // x == x, x != x, x < x, etc. These always evaluate to a constant, and
+ // often indicate logic errors in the program.
+ diagnoseTautologicalComparison(*this, Loc, LHS.get(), RHS.get(), Opc);
+
+ // Check for comparisons of floating point operands using != and ==.
+ if (BinaryOperator::isEqualityOp(Opc) &&
+ LHSType->hasFloatingRepresentation()) {
+ assert(RHS.get()->getType()->hasFloatingRepresentation());
+ CheckFloatComparison(Loc, LHS.get(), RHS.get(), Opc);
+ }
+
+ const BuiltinType *LHSBuiltinTy = LHSType->getAs<BuiltinType>();
+ const BuiltinType *RHSBuiltinTy = RHS.get()->getType()->getAs<BuiltinType>();
+
+ if (LHSBuiltinTy && RHSBuiltinTy && LHSBuiltinTy->isSVEBool() &&
+ RHSBuiltinTy->isSVEBool())
+ return LHSType;
+
+ // Return a signed type for the vector.
+ return GetSignedSizelessVectorType(vType);
+}
+
static void diagnoseXorMisusedAsPow(Sema &S, const ExprResult &XorLHS,
const ExprResult &XorRHS,
const SourceLocation Loc) {
@@ -12521,8 +13112,10 @@ QualType Sema::CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
// Ensure that either both operands are of the same vector type, or
// one operand is of a vector type and the other is of its element type.
QualType vType = CheckVectorOperands(LHS, RHS, Loc, false,
- /*AllowBothBool*/true,
- /*AllowBoolConversions*/false);
+ /*AllowBothBool*/ true,
+ /*AllowBoolConversions*/ false,
+ /*AllowBooleanOperation*/ false,
+ /*ReportInvalid*/ false);
if (vType.isNull())
return InvalidOperands(Loc, LHS, RHS);
if (getLangOpts().OpenCL &&
@@ -12616,6 +13209,20 @@ QualType Sema::CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS,
return CheckMatrixElementwiseOperands(LHS, RHS, Loc, IsCompAssign);
}
+static bool isLegalBoolVectorBinaryOp(BinaryOperatorKind Opc) {
+ switch (Opc) {
+ default:
+ return false;
+ case BO_And:
+ case BO_AndAssign:
+ case BO_Or:
+ case BO_OrAssign:
+ case BO_Xor:
+ case BO_XorAssign:
+ return true;
+ }
+}
+
inline QualType Sema::CheckBitwiseOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc) {
@@ -12624,13 +13231,35 @@ inline QualType Sema::CheckBitwiseOperands(ExprResult &LHS, ExprResult &RHS,
bool IsCompAssign =
Opc == BO_AndAssign || Opc == BO_OrAssign || Opc == BO_XorAssign;
+ bool LegalBoolVecOperator = isLegalBoolVectorBinaryOp(Opc);
+
if (LHS.get()->getType()->isVectorType() ||
RHS.get()->getType()->isVectorType()) {
if (LHS.get()->getType()->hasIntegerRepresentation() &&
RHS.get()->getType()->hasIntegerRepresentation())
return CheckVectorOperands(LHS, RHS, Loc, IsCompAssign,
- /*AllowBothBool*/true,
- /*AllowBoolConversions*/getLangOpts().ZVector);
+ /*AllowBothBool*/ true,
+ /*AllowBoolConversions*/ getLangOpts().ZVector,
+ /*AllowBooleanOperation*/ LegalBoolVecOperator,
+ /*ReportInvalid*/ true);
+ return InvalidOperands(Loc, LHS, RHS);
+ }
+
+ if (LHS.get()->getType()->isVLSTBuiltinType() ||
+ RHS.get()->getType()->isVLSTBuiltinType()) {
+ if (LHS.get()->getType()->hasIntegerRepresentation() &&
+ RHS.get()->getType()->hasIntegerRepresentation())
+ return CheckSizelessVectorOperands(LHS, RHS, Loc, IsCompAssign,
+ ACK_BitwiseOp);
+ return InvalidOperands(Loc, LHS, RHS);
+ }
+
+ if (LHS.get()->getType()->isVLSTBuiltinType() ||
+ RHS.get()->getType()->isVLSTBuiltinType()) {
+ if (LHS.get()->getType()->hasIntegerRepresentation() &&
+ RHS.get()->getType()->hasIntegerRepresentation())
+ return CheckSizelessVectorOperands(LHS, RHS, Loc, IsCompAssign,
+ ACK_BitwiseOp);
return InvalidOperands(Loc, LHS, RHS);
}
@@ -12662,7 +13291,8 @@ inline QualType Sema::CheckLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc) {
// Check vector operands differently.
- if (LHS.get()->getType()->isVectorType() || RHS.get()->getType()->isVectorType())
+ if (LHS.get()->getType()->isVectorType() ||
+ RHS.get()->getType()->isVectorType())
return CheckVectorLogicalOperands(LHS, RHS, Loc);
bool EnumConstantInBoolContext = false;
@@ -12696,14 +13326,13 @@ inline QualType Sema::CheckLogicalOperands(ExprResult &LHS, ExprResult &RHS,
!RHS.get()->getExprLoc().isMacroID()) ||
(Result != 0 && Result != 1)) {
Diag(Loc, diag::warn_logical_instead_of_bitwise)
- << RHS.get()->getSourceRange()
- << (Opc == BO_LAnd ? "&&" : "||");
+ << RHS.get()->getSourceRange() << (Opc == BO_LAnd ? "&&" : "||");
// Suggest replacing the logical operator with the bitwise version
Diag(Loc, diag::note_logical_instead_of_bitwise_change_operator)
<< (Opc == BO_LAnd ? "&" : "|")
- << FixItHint::CreateReplacement(SourceRange(
- Loc, getLocForEndOfToken(Loc)),
- Opc == BO_LAnd ? "&" : "|");
+ << FixItHint::CreateReplacement(
+ SourceRange(Loc, getLocForEndOfToken(Loc)),
+ Opc == BO_LAnd ? "&" : "|");
if (Opc == BO_LAnd)
// Suggest replacing "Foo() && kNonZero" with "Foo()"
Diag(Loc, diag::note_logical_instead_of_bitwise_remove_constant)
@@ -13317,15 +13946,15 @@ QualType Sema::CheckAssignmentOperands(Expr *LHSExpr, ExprResult &RHS,
}
}
- // C99 6.5.16p3: The type of an assignment expression is the type of the
- // left operand unless the left operand has qualified type, in which case
- // it is the unqualified version of the type of the left operand.
- // C99 6.5.16.1p2: In simple assignment, the value of the right operand
- // is converted to the type of the assignment expression (above).
+ // C11 6.5.16p3: The type of an assignment expression is the type of the
+ // left operand would have after lvalue conversion.
+ // C11 6.3.2.1p2: ...this is called lvalue conversion. If the lvalue has
+ // qualified type, the value has the unqualified version of the type of the
+ // lvalue; additionally, if the lvalue has atomic type, the value has the
+ // non-atomic version of the type of the lvalue.
// C++ 5.17p1: the type of the assignment expression is that of its left
// operand.
- return (getLangOpts().CPlusPlus
- ? LHSType : LHSType.getUnqualifiedType());
+ return getLangOpts().CPlusPlus ? LHSType : LHSType.getAtomicUnqualifiedType();
}
// Only ignore explicit casts to void.
@@ -13805,8 +14434,8 @@ QualType Sema::CheckAddressOfOperand(ExprResult &OrigOp, SourceLocation OpLoc) {
return MPTy;
}
}
- } else if (!isa<FunctionDecl>(dcl) && !isa<NonTypeTemplateParmDecl>(dcl) &&
- !isa<BindingDecl>(dcl) && !isa<MSGuidDecl>(dcl))
+ } else if (!isa<FunctionDecl, NonTypeTemplateParmDecl, BindingDecl,
+ MSGuidDecl, UnnamedGlobalConstantDecl>(dcl))
llvm_unreachable("Unknown/unexpected decl type");
}
@@ -13845,8 +14474,7 @@ static void RecordModifiableNonNullParam(Sema &S, const Expr *Exp) {
if (!FD->hasAttr<NonNullAttr>() && !Param->hasAttr<NonNullAttr>())
return;
if (FunctionScopeInfo *FD = S.getCurFunction())
- if (!FD->ModifiedNonNullParams.count(Param))
- FD->ModifiedNonNullParams.insert(Param);
+ FD->ModifiedNonNullParams.insert(Param);
}
/// CheckIndirectionOperand - Type check unary indirection (prefix '*').
@@ -14865,6 +15493,13 @@ ExprResult Sema::CreateBuiltinUnaryOp(SourceLocation OpLoc,
}
}
+ if (getLangOpts().HLSL) {
+ if (Opc == UO_AddrOf)
+ return ExprError(Diag(OpLoc, diag::err_hlsl_operator_unsupported) << 0);
+ if (Opc == UO_Deref)
+ return ExprError(Diag(OpLoc, diag::err_hlsl_operator_unsupported) << 1);
+ }
+
switch (Opc) {
case UO_PreInc:
case UO_PreDec:
@@ -15990,18 +16625,111 @@ ExprResult Sema::ActOnGNUNullExpr(SourceLocation TokenLoc) {
return new (Context) GNUNullExpr(Ty, TokenLoc);
}
+static CXXRecordDecl *LookupStdSourceLocationImpl(Sema &S, SourceLocation Loc) {
+ CXXRecordDecl *ImplDecl = nullptr;
+
+ // Fetch the std::source_location::__impl decl.
+ if (NamespaceDecl *Std = S.getStdNamespace()) {
+ LookupResult ResultSL(S, &S.PP.getIdentifierTable().get("source_location"),
+ Loc, Sema::LookupOrdinaryName);
+ if (S.LookupQualifiedName(ResultSL, Std)) {
+ if (auto *SLDecl = ResultSL.getAsSingle<RecordDecl>()) {
+ LookupResult ResultImpl(S, &S.PP.getIdentifierTable().get("__impl"),
+ Loc, Sema::LookupOrdinaryName);
+ if ((SLDecl->isCompleteDefinition() || SLDecl->isBeingDefined()) &&
+ S.LookupQualifiedName(ResultImpl, SLDecl)) {
+ ImplDecl = ResultImpl.getAsSingle<CXXRecordDecl>();
+ }
+ }
+ }
+ }
+
+ if (!ImplDecl || !ImplDecl->isCompleteDefinition()) {
+ S.Diag(Loc, diag::err_std_source_location_impl_not_found);
+ return nullptr;
+ }
+
+ // Verify that __impl is a trivial struct type, with no base classes, and with
+ // only the four expected fields.
+ if (ImplDecl->isUnion() || !ImplDecl->isStandardLayout() ||
+ ImplDecl->getNumBases() != 0) {
+ S.Diag(Loc, diag::err_std_source_location_impl_malformed);
+ return nullptr;
+ }
+
+ unsigned Count = 0;
+ for (FieldDecl *F : ImplDecl->fields()) {
+ StringRef Name = F->getName();
+
+ if (Name == "_M_file_name") {
+ if (F->getType() !=
+ S.Context.getPointerType(S.Context.CharTy.withConst()))
+ break;
+ Count++;
+ } else if (Name == "_M_function_name") {
+ if (F->getType() !=
+ S.Context.getPointerType(S.Context.CharTy.withConst()))
+ break;
+ Count++;
+ } else if (Name == "_M_line") {
+ if (!F->getType()->isIntegerType())
+ break;
+ Count++;
+ } else if (Name == "_M_column") {
+ if (!F->getType()->isIntegerType())
+ break;
+ Count++;
+ } else {
+ Count = 100; // invalid
+ break;
+ }
+ }
+ if (Count != 4) {
+ S.Diag(Loc, diag::err_std_source_location_impl_malformed);
+ return nullptr;
+ }
+
+ return ImplDecl;
+}
+
ExprResult Sema::ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc) {
- return BuildSourceLocExpr(Kind, BuiltinLoc, RPLoc, CurContext);
+ QualType ResultTy;
+ switch (Kind) {
+ case SourceLocExpr::File:
+ case SourceLocExpr::Function: {
+ QualType ArrTy = Context.getStringLiteralArrayType(Context.CharTy, 0);
+ ResultTy =
+ Context.getPointerType(ArrTy->getAsArrayTypeUnsafe()->getElementType());
+ break;
+ }
+ case SourceLocExpr::Line:
+ case SourceLocExpr::Column:
+ ResultTy = Context.UnsignedIntTy;
+ break;
+ case SourceLocExpr::SourceLocStruct:
+ if (!StdSourceLocationImplDecl) {
+ StdSourceLocationImplDecl =
+ LookupStdSourceLocationImpl(*this, BuiltinLoc);
+ if (!StdSourceLocationImplDecl)
+ return ExprError();
+ }
+ ResultTy = Context.getPointerType(
+ Context.getRecordType(StdSourceLocationImplDecl).withConst());
+ break;
+ }
+
+ return BuildSourceLocExpr(Kind, ResultTy, BuiltinLoc, RPLoc, CurContext);
}
ExprResult Sema::BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
+ QualType ResultTy,
SourceLocation BuiltinLoc,
SourceLocation RPLoc,
DeclContext *ParentContext) {
return new (Context)
- SourceLocExpr(Context, Kind, BuiltinLoc, RPLoc, ParentContext);
+ SourceLocExpr(Context, Kind, ResultTy, BuiltinLoc, RPLoc, ParentContext);
}
bool Sema::CheckConversionToObjCLiteral(QualType DstType, Expr *&Exp,
@@ -16026,7 +16754,7 @@ bool Sema::CheckConversionToObjCLiteral(QualType DstType, Expr *&Exp,
if (!PT->isObjCIdType() &&
!(ID && ID->getIdentifier()->isStr("NSString")))
return false;
- if (!SL->isAscii())
+ if (!SL->isOrdinary())
return false;
if (Diagnose) {
@@ -16307,10 +17035,12 @@ bool Sema::DiagnoseAssignmentResult(AssignConvertType ConvTy,
}
PartialDiagnostic FDiag = PDiag(DiagKind);
+ AssignmentAction ActionForDiag = Action;
if (Action == AA_Passing_CFAudited)
- FDiag << FirstType << SecondType << AA_Passing << SrcExpr->getSourceRange();
- else
- FDiag << FirstType << SecondType << Action << SrcExpr->getSourceRange();
+ ActionForDiag = AA_Passing;
+
+ FDiag << FirstType << SecondType << ActionForDiag
+ << SrcExpr->getSourceRange();
if (DiagKind == diag::ext_typecheck_convert_incompatible_pointer_sign ||
DiagKind == diag::err_typecheck_convert_incompatible_pointer_sign) {
@@ -16728,7 +17458,10 @@ ExprResult Sema::CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl) {
ConstantExpr::getStorageKind(Decl->getReturnType().getTypePtr(),
getASTContext()),
/*IsImmediateInvocation*/ true);
- ExprEvalContexts.back().ImmediateInvocationCandidates.emplace_back(Res, 0);
+ /// Value-dependent constant expressions should not be immediately
+ /// evaluated until they are instantiated.
+ if (!Res->isValueDependent())
+ ExprEvalContexts.back().ImmediateInvocationCandidates.emplace_back(Res, 0);
return Res;
}
@@ -17211,7 +17944,7 @@ void Sema::MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
if (NeedDefinition &&
(Func->getTemplateSpecializationKind() != TSK_Undeclared ||
Func->getMemberSpecializationInfo()))
- checkSpecializationVisibility(Loc, Func);
+ checkSpecializationReachability(Loc, Func);
if (getLangOpts().CUDA)
CheckCUDACall(Loc, Func);
@@ -17408,7 +18141,7 @@ MarkVarDeclODRUsed(VarDecl *Var, SourceLocation Loc, Sema &SemaRef,
CaptureType, DeclRefType,
FunctionScopeIndexToStopAt);
- if (SemaRef.LangOpts.CUDA && Var && Var->hasGlobalStorage()) {
+ if (SemaRef.LangOpts.CUDA && Var->hasGlobalStorage()) {
auto *FD = dyn_cast_or_null<FunctionDecl>(SemaRef.CurContext);
auto VarTarget = SemaRef.IdentifyCUDATarget(Var);
auto UserTarget = SemaRef.IdentifyCUDATarget(FD);
@@ -17428,8 +18161,7 @@ MarkVarDeclODRUsed(VarDecl *Var, SourceLocation Loc, Sema &SemaRef,
}
} else if (VarTarget == Sema::CVT_Device &&
(UserTarget == Sema::CFT_Host ||
- UserTarget == Sema::CFT_HostDevice) &&
- !Var->hasExternalStorage()) {
+ UserTarget == Sema::CFT_HostDevice)) {
// Record a CUDA/HIP device side variable if it is ODR-used
// by host code. This is done conservatively, when the variable is
// referenced in any of the following contexts:
@@ -17440,7 +18172,10 @@ MarkVarDeclODRUsed(VarDecl *Var, SourceLocation Loc, Sema &SemaRef,
// be visible in the device compilation for the compiler to be able to
// emit template variables instantiated by host code only and to
// externalize the static device side variable ODR-used by host code.
- SemaRef.getASTContext().CUDADeviceVarODRUsedByHost.insert(Var);
+ if (!Var->hasExternalStorage())
+ SemaRef.getASTContext().CUDADeviceVarODRUsedByHost.insert(Var);
+ else if (SemaRef.LangOpts.GPURelocatableDeviceCode)
+ SemaRef.getASTContext().CUDAExternalDeviceDeclODRUsedByHost.insert(Var);
}
}
@@ -18940,11 +19675,17 @@ public:
}
void Visit(Expr *E) {
- if (std::find(StopAt.begin(), StopAt.end(), E) != StopAt.end())
+ if (llvm::is_contained(StopAt, E))
return;
Inherited::Visit(E);
}
+ void VisitConstantExpr(ConstantExpr *E) {
+ // Don't mark declarations within a ConstantExpression, as this expression
+ // will be evaluated and folded to a value.
+ return;
+ }
+
void VisitDeclRefExpr(DeclRefExpr *E) {
// If we were asked not to visit local variables, don't.
if (SkipLocalVariables) {
@@ -19848,7 +20589,8 @@ ExprResult Sema::CheckPlaceholderExpr(Expr *E) {
auto *DRE = dyn_cast<DeclRefExpr>(E->IgnoreParenImpCasts());
if (DRE) {
auto *FD = cast<FunctionDecl>(DRE->getDecl());
- if (FD->getBuiltinID() == Builtin::BI__noop) {
+ unsigned BuiltinID = FD->getBuiltinID();
+ if (BuiltinID == Builtin::BI__noop) {
E = ImpCastExprToType(E, Context.getPointerType(FD->getType()),
CK_BuiltinFnToFnPtr)
.get();
@@ -19856,6 +20598,36 @@ ExprResult Sema::CheckPlaceholderExpr(Expr *E) {
VK_PRValue, SourceLocation(),
FPOptionsOverride());
}
+
+ if (Context.BuiltinInfo.isInStdNamespace(BuiltinID)) {
+ // Any use of these other than a direct call is ill-formed as of C++20,
+ // because they are not addressable functions. In earlier language
+ // modes, warn and force an instantiation of the real body.
+ Diag(E->getBeginLoc(),
+ getLangOpts().CPlusPlus20
+ ? diag::err_use_of_unaddressable_function
+ : diag::warn_cxx20_compat_use_of_unaddressable_function);
+ if (FD->isImplicitlyInstantiable()) {
+ // Require a definition here because a normal attempt at
+ // instantiation for a builtin will be ignored, and we won't try
+ // again later. We assume that the definition of the template
+ // precedes this use.
+ InstantiateFunctionDefinition(E->getBeginLoc(), FD,
+ /*Recursive=*/false,
+ /*DefinitionRequired=*/true,
+ /*AtEndOfTU=*/false);
+ }
+ // Produce a properly-typed reference to the function.
+ CXXScopeSpec SS;
+ SS.Adopt(DRE->getQualifierLoc());
+ TemplateArgumentListInfo TemplateArgs;
+ DRE->copyTemplateArgumentsInto(TemplateArgs);
+ return BuildDeclRefExpr(
+ FD, FD->getType(), VK_LValue, DRE->getNameInfo(),
+ DRE->hasQualifier() ? &SS : nullptr, DRE->getFoundDecl(),
+ DRE->getTemplateKeywordLoc(),
+ DRE->hasExplicitTemplateArgs() ? &TemplateArgs : nullptr);
+ }
}
Diag(E->getBeginLoc(), diag::err_builtin_fn_use);
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaExprCXX.cpp b/contrib/llvm-project/clang/lib/Sema/SemaExprCXX.cpp
index 7ce125f5ef82..0d73fcf8bf4e 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaExprCXX.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaExprCXX.cpp
@@ -11,8 +11,6 @@
///
//===----------------------------------------------------------------------===//
-#include "clang/Sema/Template.h"
-#include "clang/Sema/SemaInternal.h"
#include "TreeTransform.h"
#include "TypeLocBuilder.h"
#include "clang/AST/ASTContext.h"
@@ -25,8 +23,10 @@
#include "clang/AST/RecursiveASTVisitor.h"
#include "clang/AST/TypeLoc.h"
#include "clang/Basic/AlignedAllocation.h"
+#include "clang/Basic/DiagnosticSema.h"
#include "clang/Basic/PartialDiagnostic.h"
#include "clang/Basic/TargetInfo.h"
+#include "clang/Basic/TypeTraits.h"
#include "clang/Lex/Preprocessor.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/Initialization.h"
@@ -34,11 +34,14 @@
#include "clang/Sema/ParsedTemplate.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/ScopeInfo.h"
+#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/SemaLambda.h"
+#include "clang/Sema/Template.h"
#include "clang/Sema/TemplateDeduction.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/Support/ErrorHandling.h"
+#include "llvm/Support/TypeSize.h"
using namespace clang;
using namespace sema;
@@ -840,10 +843,10 @@ Sema::ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *Ex) {
break;
}
+ // FIXME: Many of the scope checks here seem incorrect.
if (S->getFlags() &
(Scope::FnScope | Scope::ClassScope | Scope::BlockScope |
- Scope::FunctionPrototypeScope | Scope::ObjCMethodScope |
- Scope::TryScope))
+ Scope::ObjCMethodScope | Scope::TryScope))
break;
}
}
@@ -1467,6 +1470,9 @@ Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
// C++1z [expr.type.conv]p1:
// If the type is a placeholder for a deduced class type, [...perform class
// template argument deduction...]
+ // C++2b:
+ // Otherwise, if the type contains a placeholder type, it is replaced by the
+ // type determined by placeholder type deduction.
DeducedType *Deduced = Ty->getContainedDeducedType();
if (Deduced && isa<DeducedTemplateSpecializationType>(Deduced)) {
Ty = DeduceTemplateSpecializationFromInitializer(TInfo, Entity,
@@ -1474,6 +1480,41 @@ Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
if (Ty.isNull())
return ExprError();
Entity = InitializedEntity::InitializeTemporary(TInfo, Ty);
+ } else if (Deduced) {
+ MultiExprArg Inits = Exprs;
+ if (ListInitialization) {
+ auto *ILE = cast<InitListExpr>(Exprs[0]);
+ Inits = MultiExprArg(ILE->getInits(), ILE->getNumInits());
+ }
+
+ if (Inits.empty())
+ return ExprError(Diag(TyBeginLoc, diag::err_auto_expr_init_no_expression)
+ << Ty << FullRange);
+ if (Inits.size() > 1) {
+ Expr *FirstBad = Inits[1];
+ return ExprError(Diag(FirstBad->getBeginLoc(),
+ diag::err_auto_expr_init_multiple_expressions)
+ << Ty << FullRange);
+ }
+ if (getLangOpts().CPlusPlus2b) {
+ if (Ty->getAs<AutoType>())
+ Diag(TyBeginLoc, diag::warn_cxx20_compat_auto_expr) << FullRange;
+ }
+ Expr *Deduce = Inits[0];
+ if (isa<InitListExpr>(Deduce))
+ return ExprError(
+ Diag(Deduce->getBeginLoc(), diag::err_auto_expr_init_paren_braces)
+ << ListInitialization << Ty << FullRange);
+ QualType DeducedType;
+ if (DeduceAutoType(TInfo, Deduce, DeducedType) == DAR_Failed)
+ return ExprError(Diag(TyBeginLoc, diag::err_auto_expr_deduction_failure)
+ << Ty << Deduce->getType() << FullRange
+ << Deduce->getSourceRange());
+ if (DeducedType.isNull())
+ return ExprError();
+
+ Ty = DeducedType;
+ Entity = InitializedEntity::InitializeTemporary(TInfo, Ty);
}
if (Ty->isDependentType() || CallExpr::hasAnyTypeDependentArguments(Exprs)) {
@@ -1559,7 +1600,7 @@ Sema::BuildCXXTypeConstructExpr(TypeSourceInfo *TInfo,
bool Sema::isUsualDeallocationFunction(const CXXMethodDecl *Method) {
// [CUDA] Ignore this function, if we can't call it.
- const FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext);
+ const FunctionDecl *Caller = getCurFunctionDecl(/*AllowLambda=*/true);
if (getLangOpts().CUDA) {
auto CallPreference = IdentifyCUDAPreference(Caller, Method);
// If it's not callable at all, it's not the right function.
@@ -1653,7 +1694,7 @@ namespace {
// In CUDA, determine how much we'd like / dislike to call this.
if (S.getLangOpts().CUDA)
- if (auto *Caller = dyn_cast<FunctionDecl>(S.CurContext))
+ if (auto *Caller = S.getCurFunctionDecl(/*AllowLambda=*/true))
CUDAPref = S.IdentifyCUDAPreference(Caller, FD);
}
@@ -1885,7 +1926,7 @@ Sema::isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const {
return false;
Optional<unsigned> AlignmentParam;
if (FD.isReplaceableGlobalAllocationFunction(&AlignmentParam) &&
- AlignmentParam.hasValue())
+ AlignmentParam)
return true;
return false;
}
@@ -1936,12 +1977,10 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
initStyle = CXXNewExpr::NoInit;
}
- Expr **Inits = &Initializer;
- unsigned NumInits = Initializer ? 1 : 0;
+ MultiExprArg Exprs(&Initializer, Initializer ? 1 : 0);
if (ParenListExpr *List = dyn_cast_or_null<ParenListExpr>(Initializer)) {
assert(initStyle == CXXNewExpr::CallInit && "paren init for non-call init");
- Inits = List->getExprs();
- NumInits = List->getNumExprs();
+ Exprs = MultiExprArg(List->getExprs(), List->getNumExprs());
}
// C++11 [expr.new]p15:
@@ -1977,23 +2016,21 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
InitializedEntity Entity
= InitializedEntity::InitializeNew(StartLoc, AllocType);
AllocType = DeduceTemplateSpecializationFromInitializer(
- AllocTypeInfo, Entity, Kind, MultiExprArg(Inits, NumInits));
+ AllocTypeInfo, Entity, Kind, Exprs);
if (AllocType.isNull())
return ExprError();
} else if (Deduced) {
+ MultiExprArg Inits = Exprs;
bool Braced = (initStyle == CXXNewExpr::ListInit);
- if (NumInits == 1) {
- if (auto p = dyn_cast_or_null<InitListExpr>(Inits[0])) {
- Inits = p->getInits();
- NumInits = p->getNumInits();
- Braced = true;
- }
+ if (Braced) {
+ auto *ILE = cast<InitListExpr>(Exprs[0]);
+ Inits = MultiExprArg(ILE->getInits(), ILE->getNumInits());
}
- if (initStyle == CXXNewExpr::NoInit || NumInits == 0)
+ if (initStyle == CXXNewExpr::NoInit || Inits.empty())
return ExprError(Diag(StartLoc, diag::err_auto_new_requires_ctor_arg)
<< AllocType << TypeRange);
- if (NumInits > 1) {
+ if (Inits.size() > 1) {
Expr *FirstBad = Inits[1];
return ExprError(Diag(FirstBad->getBeginLoc(),
diag::err_auto_new_ctor_multiple_expressions)
@@ -2003,6 +2040,10 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
Diag(Initializer->getBeginLoc(), diag::ext_auto_new_list_init)
<< AllocType << TypeRange;
Expr *Deduce = Inits[0];
+ if (isa<InitListExpr>(Deduce))
+ return ExprError(
+ Diag(Deduce->getBeginLoc(), diag::err_auto_expr_init_paren_braces)
+ << Braced << AllocType << TypeRange);
QualType DeducedType;
if (DeduceAutoType(AllocTypeInfo, Deduce, DeducedType) == DAR_Failed)
return ExprError(Diag(StartLoc, diag::err_auto_new_deduction_failure)
@@ -2190,7 +2231,7 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
!Expr::hasAnyTypeDependentArguments(PlacementArgs) &&
FindAllocationFunctions(
StartLoc, SourceRange(PlacementLParen, PlacementRParen), Scope, Scope,
- AllocType, ArraySize.hasValue(), PassAlignment, PlacementArgs,
+ AllocType, ArraySize.has_value(), PassAlignment, PlacementArgs,
OperatorNew, OperatorDelete))
return ExprError();
@@ -2233,10 +2274,10 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
// How many bytes do we want to allocate here?
llvm::Optional<llvm::APInt> AllocationSize;
- if (!ArraySize.hasValue() && !AllocType->isDependentType()) {
+ if (!ArraySize && !AllocType->isDependentType()) {
// For non-array operator new, we only want to allocate one element.
AllocationSize = SingleEltSize;
- } else if (KnownArraySize.hasValue() && !AllocType->isDependentType()) {
+ } else if (KnownArraySize && !AllocType->isDependentType()) {
// For array operator new, only deal with static array size case.
bool Overflow;
AllocationSize = llvm::APInt(SizeTyWidth, *KnownArraySize)
@@ -2248,7 +2289,7 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
}
IntegerLiteral AllocationSizeLiteral(
- Context, AllocationSize.getValueOr(llvm::APInt::getZero(SizeTyWidth)),
+ Context, AllocationSize.value_or(llvm::APInt::getZero(SizeTyWidth)),
SizeTy, SourceLocation());
// Otherwise, if we failed to constant-fold the allocation size, we'll
// just give up and pass-in something opaque, that isn't a null pointer.
@@ -2273,7 +2314,7 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
// Adjust placement args by prepending conjured size and alignment exprs.
llvm::SmallVector<Expr *, 8> CallArgs;
CallArgs.reserve(NumImplicitArgs + PlacementArgs.size());
- CallArgs.emplace_back(AllocationSize.hasValue()
+ CallArgs.emplace_back(AllocationSize
? static_cast<Expr *>(&AllocationSizeLiteral)
: &OpaqueAllocationSize);
if (PassAlignment)
@@ -2303,8 +2344,8 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
// Initializer lists are also allowed, in C++11. Rely on the parser for the
// dialect distinction.
if (ArraySize && !isLegalArrayNewInitializer(initStyle, Initializer)) {
- SourceRange InitRange(Inits[0]->getBeginLoc(),
- Inits[NumInits - 1]->getEndLoc());
+ SourceRange InitRange(Exprs.front()->getBeginLoc(),
+ Exprs.back()->getEndLoc());
Diag(StartLoc, diag::err_new_array_init_args) << InitRange;
return ExprError();
}
@@ -2312,8 +2353,7 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
// If we can perform the initialization, and we've not already done so,
// do it now.
if (!AllocType->isDependentType() &&
- !Expr::hasAnyTypeDependentArguments(
- llvm::makeArrayRef(Inits, NumInits))) {
+ !Expr::hasAnyTypeDependentArguments(Exprs)) {
// The type we initialize is the complete type, including the array bound.
QualType InitType;
if (KnownArraySize)
@@ -2330,10 +2370,8 @@ Sema::BuildCXXNew(SourceRange Range, bool UseGlobal,
InitializedEntity Entity
= InitializedEntity::InitializeNew(StartLoc, InitType);
- InitializationSequence InitSeq(*this, Entity, Kind,
- MultiExprArg(Inits, NumInits));
- ExprResult FullInit = InitSeq.Perform(*this, Entity, Kind,
- MultiExprArg(Inits, NumInits));
+ InitializationSequence InitSeq(*this, Entity, Kind, Exprs);
+ ExprResult FullInit = InitSeq.Perform(*this, Entity, Kind, Exprs);
if (FullInit.isInvalid())
return ExprError();
@@ -2795,7 +2833,8 @@ bool Sema::FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
}
if (getLangOpts().CUDA)
- EraseUnwantedCUDAMatches(dyn_cast<FunctionDecl>(CurContext), Matches);
+ EraseUnwantedCUDAMatches(getCurFunctionDecl(/*AllowLambda=*/true),
+ Matches);
} else {
// C++1y [expr.new]p22:
// For a non-placement allocation function, the normal deallocation
@@ -3706,7 +3745,7 @@ static bool resolveBuiltinNewDeleteOverload(Sema &S, CallExpr *TheCall,
// We do our own custom access checks below.
R.suppressDiagnostics();
- SmallVector<Expr *, 8> Args(TheCall->arg_begin(), TheCall->arg_end());
+ SmallVector<Expr *, 8> Args(TheCall->arguments());
OverloadCandidateSet Candidates(R.getNameLoc(),
OverloadCandidateSet::CSK_Normal);
for (LookupResult::iterator FnOvl = R.begin(), FnOvlEnd = R.end();
@@ -3970,7 +4009,7 @@ Sema::IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType) {
case StringLiteral::UTF32:
// We don't allow UTF literals to be implicitly converted
break;
- case StringLiteral::Ascii:
+ case StringLiteral::Ordinary:
return (ToPointeeType->getKind() == BuiltinType::Char_U ||
ToPointeeType->getKind() == BuiltinType::Char_S);
case StringLiteral::Wide:
@@ -4201,6 +4240,14 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
return ExprError();
From = FixOverloadedFunctionReference(From, Found, Fn);
+
+ // We might get back another placeholder expression if we resolved to a
+ // builtin.
+ ExprResult Checked = CheckPlaceholderExpr(From);
+ if (Checked.isInvalid())
+ return ExprError();
+
+ From = Checked.get();
FromType = From->getType();
}
@@ -4605,6 +4652,13 @@ Sema::PerformImplicitConversion(Expr *From, QualType ToType,
From->getType()->getPointeeType().getAddressSpace())
CK = CK_AddressSpaceConversion;
+ if (!isCast(CCK) &&
+ !ToType->getPointeeType().getQualifiers().hasUnaligned() &&
+ From->getType()->getPointeeType().getQualifiers().hasUnaligned()) {
+ Diag(From->getBeginLoc(), diag::warn_imp_cast_drops_unaligned)
+ << InitialFromType << ToType;
+ }
+
From = ImpCastExprToType(From, ToType.getNonLValueExprType(Context), CK, VK,
/*BasePath=*/nullptr, CCK)
.get();
@@ -4746,6 +4800,8 @@ static bool CheckUnaryTypeTraitTypeCompleteness(Sema &S, TypeTrait UTT,
case UTT_IsStandardLayout:
case UTT_IsPOD:
case UTT_IsLiteral:
+ // By analogy, is_trivially_relocatable imposes the same constraints.
+ case UTT_IsTriviallyRelocatable:
// Per the GCC type traits documentation, T shall be a complete type, cv void,
// or an array of unknown bound. But GCC actually imposes the same constraints
// as above.
@@ -5210,6 +5266,8 @@ static bool EvaluateUnaryTypeTrait(Sema &Self, TypeTrait UTT,
return !T->isIncompleteType();
case UTT_HasUniqueObjectRepresentations:
return C.hasUniqueObjectRepresentations(T);
+ case UTT_IsTriviallyRelocatable:
+ return T.isTriviallyRelocatableType(C);
}
}
@@ -6048,8 +6106,17 @@ static bool isValidVectorForConditionalCondition(ASTContext &Ctx,
return false;
const QualType EltTy =
cast<VectorType>(CondTy.getCanonicalType())->getElementType();
- assert(!EltTy->isBooleanType() && !EltTy->isEnumeralType() &&
- "Vectors cant be boolean or enum types");
+ assert(!EltTy->isEnumeralType() && "Vectors cant be enum types");
+ return EltTy->isIntegralType(Ctx);
+}
+
+static bool isValidSizelessVectorForConditionalCondition(ASTContext &Ctx,
+ QualType CondTy) {
+ if (!CondTy->isVLSTBuiltinType())
+ return false;
+ const QualType EltTy =
+ cast<BuiltinType>(CondTy.getCanonicalType())->getSveEltType(Ctx);
+ assert(!EltTy->isEnumeralType() && "Vectors cant be enum types");
return EltTy->isIntegralType(Ctx);
}
@@ -6088,7 +6155,9 @@ QualType Sema::CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
} else if (LHSVT || RHSVT) {
ResultType = CheckVectorOperands(
LHS, RHS, QuestionLoc, /*isCompAssign*/ false, /*AllowBothBool*/ true,
- /*AllowBoolConversions*/ false);
+ /*AllowBoolConversions*/ false,
+ /*AllowBoolOperation*/ true,
+ /*ReportInvalid*/ true);
if (ResultType.isNull())
return {};
} else {
@@ -6142,6 +6211,89 @@ QualType Sema::CheckVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS,
return ResultType;
}
+QualType Sema::CheckSizelessVectorConditionalTypes(ExprResult &Cond,
+ ExprResult &LHS,
+ ExprResult &RHS,
+ SourceLocation QuestionLoc) {
+ LHS = DefaultFunctionArrayLvalueConversion(LHS.get());
+ RHS = DefaultFunctionArrayLvalueConversion(RHS.get());
+
+ QualType CondType = Cond.get()->getType();
+ const auto *CondBT = CondType->castAs<BuiltinType>();
+ QualType CondElementTy = CondBT->getSveEltType(Context);
+ llvm::ElementCount CondElementCount =
+ Context.getBuiltinVectorTypeInfo(CondBT).EC;
+
+ QualType LHSType = LHS.get()->getType();
+ const auto *LHSBT =
+ LHSType->isVLSTBuiltinType() ? LHSType->getAs<BuiltinType>() : nullptr;
+ QualType RHSType = RHS.get()->getType();
+ const auto *RHSBT =
+ RHSType->isVLSTBuiltinType() ? RHSType->getAs<BuiltinType>() : nullptr;
+
+ QualType ResultType;
+
+ if (LHSBT && RHSBT) {
+ // If both are sizeless vector types, they must be the same type.
+ if (!Context.hasSameType(LHSType, RHSType)) {
+ Diag(QuestionLoc, diag::err_conditional_vector_mismatched)
+ << LHSType << RHSType;
+ return QualType();
+ }
+ ResultType = LHSType;
+ } else if (LHSBT || RHSBT) {
+ ResultType = CheckSizelessVectorOperands(
+ LHS, RHS, QuestionLoc, /*IsCompAssign*/ false, ACK_Conditional);
+ if (ResultType.isNull())
+ return QualType();
+ } else {
+ // Both are scalar so splat
+ QualType ResultElementTy;
+ LHSType = LHSType.getCanonicalType().getUnqualifiedType();
+ RHSType = RHSType.getCanonicalType().getUnqualifiedType();
+
+ if (Context.hasSameType(LHSType, RHSType))
+ ResultElementTy = LHSType;
+ else
+ ResultElementTy =
+ UsualArithmeticConversions(LHS, RHS, QuestionLoc, ACK_Conditional);
+
+ if (ResultElementTy->isEnumeralType()) {
+ Diag(QuestionLoc, diag::err_conditional_vector_operand_type)
+ << ResultElementTy;
+ return QualType();
+ }
+
+ ResultType = Context.getScalableVectorType(
+ ResultElementTy, CondElementCount.getKnownMinValue());
+
+ LHS = ImpCastExprToType(LHS.get(), ResultType, CK_VectorSplat);
+ RHS = ImpCastExprToType(RHS.get(), ResultType, CK_VectorSplat);
+ }
+
+ assert(!ResultType.isNull() && ResultType->isVLSTBuiltinType() &&
+ "Result should have been a vector type");
+ auto *ResultBuiltinTy = ResultType->castAs<BuiltinType>();
+ QualType ResultElementTy = ResultBuiltinTy->getSveEltType(Context);
+ llvm::ElementCount ResultElementCount =
+ Context.getBuiltinVectorTypeInfo(ResultBuiltinTy).EC;
+
+ if (ResultElementCount != CondElementCount) {
+ Diag(QuestionLoc, diag::err_conditional_vector_size)
+ << CondType << ResultType;
+ return QualType();
+ }
+
+ if (Context.getTypeSize(ResultElementTy) !=
+ Context.getTypeSize(CondElementTy)) {
+ Diag(QuestionLoc, diag::err_conditional_vector_element_size)
+ << CondType << ResultType;
+ return QualType();
+ }
+
+ return ResultType;
+}
+
/// Check the operands of ?: under C++ semantics.
///
/// See C++ [expr.cond]. Note that LHS is never null, even for the GNU x ?: y
@@ -6175,10 +6327,14 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
bool IsVectorConditional =
isValidVectorForConditionalCondition(Context, Cond.get()->getType());
+ bool IsSizelessVectorConditional =
+ isValidSizelessVectorForConditionalCondition(Context,
+ Cond.get()->getType());
+
// C++11 [expr.cond]p1
// The first expression is contextually converted to bool.
if (!Cond.get()->isTypeDependent()) {
- ExprResult CondRes = IsVectorConditional
+ ExprResult CondRes = IsVectorConditional || IsSizelessVectorConditional
? DefaultFunctionArrayLvalueConversion(Cond.get())
: CheckCXXBooleanCondition(Cond.get());
if (CondRes.isInvalid())
@@ -6247,6 +6403,9 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
if (IsVectorConditional)
return CheckVectorConditionalTypes(Cond, LHS, RHS, QuestionLoc);
+ if (IsSizelessVectorConditional)
+ return CheckSizelessVectorConditionalTypes(Cond, LHS, RHS, QuestionLoc);
+
// C++11 [expr.cond]p3
// Otherwise, if the second and third operand have different types, and
// either has (cv) class type [...] an attempt is made to convert each of
@@ -6415,9 +6574,11 @@ QualType Sema::CXXCheckConditionalOperands(ExprResult &Cond, ExprResult &LHS,
// Extension: conditional operator involving vector types.
if (LTy->isVectorType() || RTy->isVectorType())
- return CheckVectorOperands(LHS, RHS, QuestionLoc, /*isCompAssign*/false,
- /*AllowBothBool*/true,
- /*AllowBoolConversions*/false);
+ return CheckVectorOperands(LHS, RHS, QuestionLoc, /*isCompAssign*/ false,
+ /*AllowBothBool*/ true,
+ /*AllowBoolConversions*/ false,
+ /*AllowBoolOperation*/ false,
+ /*ReportInvalid*/ true);
// -- The second and third operands have arithmetic or enumeration type;
// the usual arithmetic conversions are performed to bring them to a
@@ -7134,8 +7295,9 @@ Stmt *Sema::MaybeCreateStmtWithCleanups(Stmt *SubStmt) {
// a StmtExpr; currently this is only used for asm statements.
// This is hacky, either create a new CXXStmtWithTemporaries statement or
// a new AsmStmtWithTemporaries.
- CompoundStmt *CompStmt = CompoundStmt::Create(
- Context, SubStmt, SourceLocation(), SourceLocation());
+ CompoundStmt *CompStmt =
+ CompoundStmt::Create(Context, SubStmt, FPOptionsOverride(),
+ SourceLocation(), SourceLocation());
Expr *E = new (Context)
StmtExpr(CompStmt, Context.VoidTy, SourceLocation(), SourceLocation(),
/*FIXME TemplateDepth=*/0);
@@ -7875,6 +8037,8 @@ ExprResult Sema::ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation,
static void MaybeDecrementCount(
Expr *E, llvm::DenseMap<const VarDecl *, int> &RefsMinusAssignments) {
DeclRefExpr *LHS = nullptr;
+ bool IsCompoundAssign = false;
+ bool isIncrementDecrementUnaryOp = false;
if (BinaryOperator *BO = dyn_cast<BinaryOperator>(E)) {
if (BO->getLHS()->getType()->isDependentType() ||
BO->getRHS()->getType()->isDependentType()) {
@@ -7882,17 +8046,30 @@ static void MaybeDecrementCount(
return;
} else if (!BO->isAssignmentOp())
return;
+ else
+ IsCompoundAssign = BO->isCompoundAssignmentOp();
LHS = dyn_cast<DeclRefExpr>(BO->getLHS());
} else if (CXXOperatorCallExpr *COCE = dyn_cast<CXXOperatorCallExpr>(E)) {
if (COCE->getOperator() != OO_Equal)
return;
LHS = dyn_cast<DeclRefExpr>(COCE->getArg(0));
+ } else if (UnaryOperator *UO = dyn_cast<UnaryOperator>(E)) {
+ if (!UO->isIncrementDecrementOp())
+ return;
+ isIncrementDecrementUnaryOp = true;
+ LHS = dyn_cast<DeclRefExpr>(UO->getSubExpr());
}
if (!LHS)
return;
VarDecl *VD = dyn_cast<VarDecl>(LHS->getDecl());
if (!VD)
return;
+ // Don't decrement RefsMinusAssignments if volatile variable with compound
+ // assignment (+=, ...) or increment/decrement unary operator to avoid
+ // potential unused-but-set-variable warning.
+ if ((IsCompoundAssign || isIncrementDecrementUnaryOp) &&
+ VD->getType().isVolatileQualified())
+ return;
auto iter = RefsMinusAssignments.find(VD);
if (iter == RefsMinusAssignments.end())
return;
@@ -8063,8 +8240,7 @@ static void CheckIfAnyEnclosingLambdasMustCaptureAnyPotentialCaptures(
if (const Optional<unsigned> Index =
getStackIndexOfNearestEnclosingCaptureCapableLambda(
S.FunctionScopes, Var, S))
- S.MarkCaptureUsedInEnclosingContext(Var, VarExpr->getExprLoc(),
- Index.getValue());
+ S.MarkCaptureUsedInEnclosingContext(Var, VarExpr->getExprLoc(), *Index);
const bool IsVarNeverAConstantExpression =
VariableCanNeverBeAConstantExpression(Var, S.Context);
if (!IsFullExprInstantiationDependent || IsVarNeverAConstantExpression) {
@@ -8097,7 +8273,7 @@ static void CheckIfAnyEnclosingLambdasMustCaptureAnyPotentialCaptures(
if (const Optional<unsigned> Index =
getStackIndexOfNearestEnclosingCaptureCapableLambda(
S.FunctionScopes, /*0 is 'this'*/ nullptr, S)) {
- const unsigned FunctionScopeIndexOfCapturableLambda = Index.getValue();
+ const unsigned FunctionScopeIndexOfCapturableLambda = *Index;
S.CheckCXXThisCapture(CurrentLSI->PotentialThisCaptureLocation,
/*Explicit*/ false, /*BuildAndDiagnose*/ true,
&FunctionScopeIndexOfCapturableLambda);
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaExprMember.cpp b/contrib/llvm-project/clang/lib/Sema/SemaExprMember.cpp
index dfd93aa4638d..c9d9ef31f3ee 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaExprMember.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaExprMember.cpp
@@ -940,7 +940,7 @@ static bool IsInFnTryBlockHandler(const Scope *S) {
// function scope. If a FnTryCatchScope is found, check whether the TryScope
// flag is set. If it is not, it's a function-try-block handler.
for (; S != S->getFnParent(); S = S->getParent()) {
- if (S->getFlags() & Scope::FnTryCatchScope)
+ if (S->isFnTryCatchScope())
return (S->getFlags() & Scope::TryScope) != Scope::TryScope;
}
return false;
@@ -1292,6 +1292,21 @@ static ExprResult LookupMemberExpr(Sema &S, LookupResult &R,
}
}
+ // If the base type is an atomic type, this access is undefined behavior per
+ // C11 6.5.2.3p5. Instead of giving a typecheck error, we'll warn the user
+ // about the UB and recover by converting the atomic lvalue into a non-atomic
+ // lvalue. Because this is inherently unsafe as an atomic operation, the
+ // warning defaults to an error.
+ if (const auto *ATy = BaseType->getAs<AtomicType>()) {
+ S.DiagRuntimeBehavior(OpLoc, nullptr,
+ S.PDiag(diag::warn_atomic_member_access));
+ BaseType = ATy->getValueType().getUnqualifiedType();
+ BaseExpr = ImplicitCastExpr::Create(
+ S.Context, IsArrow ? S.Context.getPointerType(BaseType) : BaseType,
+ CK_AtomicToNonAtomic, BaseExpr.get(), nullptr,
+ BaseExpr.get()->getValueKind(), FPOptionsOverride());
+ }
+
// Handle field access to simple records.
if (const RecordType *RTy = BaseType->getAs<RecordType>()) {
TypoExpr *TE = nullptr;
@@ -1592,6 +1607,16 @@ static ExprResult LookupMemberExpr(Sema &S, LookupResult &R,
false);
}
+ if (BaseType->isExtVectorBoolType()) {
+ // We disallow element access for ext_vector_type bool. There is no way to
+ // materialize a reference to a vector element as a pointer (each element is
+ // one bit in the vector).
+ S.Diag(R.getNameLoc(), diag::err_ext_vector_component_name_illegal)
+ << MemberName
+ << (BaseExpr.get() ? BaseExpr.get()->getSourceRange() : SourceRange());
+ return ExprError();
+ }
+
// Handle 'field access' to vectors, such as 'V.xx'.
if (BaseType->isExtVectorType()) {
// FIXME: this expr should store IsArrow.
@@ -1711,6 +1736,9 @@ ExprResult Sema::ActOnMemberAccessExpr(Scope *S, Expr *Base,
DeclarationName Name = NameInfo.getName();
bool IsArrow = (OpKind == tok::arrow);
+ if (getLangOpts().HLSL && IsArrow)
+ return ExprError(Diag(OpLoc, diag::err_hlsl_operator_unsupported) << 2);
+
NamedDecl *FirstQualifierInScope
= (!SS.isSet() ? nullptr : FindFirstQualifierInScope(S, SS.getScopeRep()));
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaExprObjC.cpp b/contrib/llvm-project/clang/lib/Sema/SemaExprObjC.cpp
index 4702c405fb4e..a6c92d1a338d 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaExprObjC.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaExprObjC.cpp
@@ -50,7 +50,7 @@ ExprResult Sema::ParseObjCStringLiteral(SourceLocation *AtLocs,
S = cast<StringLiteral>(E);
// ObjC strings can't be wide or UTF.
- if (!S->isAscii()) {
+ if (!S->isOrdinary()) {
Diag(S->getBeginLoc(), diag::err_cfstring_literal_not_string_constant)
<< S->getSourceRange();
return true;
@@ -70,7 +70,7 @@ ExprResult Sema::ParseObjCStringLiteral(SourceLocation *AtLocs,
QualType StrTy = Context.getConstantArrayType(
CAT->getElementType(), llvm::APInt(32, StrBuf.size() + 1), nullptr,
CAT->getSizeModifier(), CAT->getIndexTypeCVRQualifiers());
- S = StringLiteral::Create(Context, StrBuf, StringLiteral::Ascii,
+ S = StringLiteral::Create(Context, StrBuf, StringLiteral::Ordinary,
/*Pascal=*/false, StrTy, &StrLocs[0],
StrLocs.size());
}
@@ -448,7 +448,7 @@ static ExprResult CheckObjCCollectionLiteralElement(Sema &S, Expr *Element,
}
// If this is potentially an Objective-C string literal, add the '@'.
else if (StringLiteral *String = dyn_cast<StringLiteral>(OrigElement)) {
- if (String->isAscii()) {
+ if (String->isOrdinary()) {
S.Diag(OrigElement->getBeginLoc(), diag::err_box_literal_collection)
<< 0 << OrigElement->getSourceRange()
<< FixItHint::CreateInsertion(OrigElement->getBeginLoc(), "@");
@@ -533,7 +533,7 @@ ExprResult Sema::BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr) {
if (CE->getCastKind() == CK_ArrayToPointerDecay)
if (auto *SL =
dyn_cast<StringLiteral>(CE->getSubExpr()->IgnoreParens())) {
- assert((SL->isAscii() || SL->isUTF8()) &&
+ assert((SL->isOrdinary() || SL->isUTF8()) &&
"unexpected character encoding");
StringRef Str = SL->getString();
const llvm::UTF8 *StrBegin = Str.bytes_begin();
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaInit.cpp b/contrib/llvm-project/clang/lib/Sema/SemaInit.cpp
index 119a90deb9c2..d3b454843234 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaInit.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaInit.cpp
@@ -87,7 +87,7 @@ static StringInitFailureKind IsStringInit(Expr *Init, const ArrayType *AT,
if (ElemTy->isChar8Type())
return SIF_None;
LLVM_FALLTHROUGH;
- case StringLiteral::Ascii:
+ case StringLiteral::Ordinary:
// char array can be initialized with a narrow string.
// Only allow char x[] = "foo"; not char x[] = L"foo";
if (ElemTy->isCharType())
@@ -1696,7 +1696,7 @@ void InitListChecker::CheckVectorType(const InitializedEntity &Entity,
return;
}
- if (!SemaRef.getLangOpts().OpenCL) {
+ if (!SemaRef.getLangOpts().OpenCL && !SemaRef.getLangOpts().HLSL ) {
// If the initializing element is a vector, try to copy-initialize
// instead of breaking it apart (which is doomed to failure anyway).
Expr *Init = IList->getInit(Index);
@@ -1790,7 +1790,7 @@ void InitListChecker::CheckVectorType(const InitializedEntity &Entity,
InitializedEntity ElementEntity =
InitializedEntity::InitializeElement(SemaRef.Context, 0, Entity);
- // OpenCL initializers allows vectors to be constructed from vectors.
+ // OpenCL and HLSL initializers allow vectors to be constructed from vectors.
for (unsigned i = 0; i < maxElements; ++i) {
// Don't attempt to go past the end of the init list
if (Index >= IList->getNumInits())
@@ -1819,7 +1819,7 @@ void InitListChecker::CheckVectorType(const InitializedEntity &Entity,
}
}
- // OpenCL requires all elements to be initialized.
+ // OpenCL and HLSL require all elements to be initialized.
if (numEltsInit != maxElements) {
if (!VerifyOnly)
SemaRef.Diag(IList->getBeginLoc(),
@@ -2004,10 +2004,6 @@ bool InitListChecker::CheckFlexibleArrayInit(const InitializedEntity &Entity,
cast<InitListExpr>(InitExpr)->getNumInits() == 0) {
// Empty flexible array init always allowed as an extension
FlexArrayDiag = diag::ext_flexible_array_init;
- } else if (SemaRef.getLangOpts().CPlusPlus) {
- // Disallow flexible array init in C++; it is not required for gcc
- // compatibility, and it needs work to IRGen correctly in general.
- FlexArrayDiag = diag::err_flexible_array_init;
} else if (!TopLevelObject) {
// Disallow flexible array init on non-top-level object
FlexArrayDiag = diag::err_flexible_array_init;
@@ -2127,6 +2123,9 @@ void InitListChecker::CheckStructUnionTypes(
// worthwhile to skip over the rest of the initializer, though.
RecordDecl *RD = DeclType->castAs<RecordType>()->getDecl();
RecordDecl::field_iterator FieldEnd = RD->field_end();
+ size_t NumRecordDecls = llvm::count_if(RD->decls(), [&](const Decl *D) {
+ return isa<FieldDecl>(D) || isa<RecordDecl>(D);
+ });
bool CheckForMissingFields =
!IList->isIdiomaticZeroInitializer(SemaRef.getLangOpts());
bool HasDesignatedInit = false;
@@ -2171,6 +2170,35 @@ void InitListChecker::CheckStructUnionTypes(
continue;
}
+ // Check if this is an initializer of forms:
+ //
+ // struct foo f = {};
+ // struct foo g = {0};
+ //
+ // These are okay for randomized structures. [C99 6.7.8p19]
+ //
+ // Also, if there is only one element in the structure, we allow something
+ // like this, because it's really not randomized in the tranditional sense.
+ //
+ // struct foo h = {bar};
+ auto IsZeroInitializer = [&](const Expr *I) {
+ if (IList->getNumInits() == 1) {
+ if (NumRecordDecls == 1)
+ return true;
+ if (const auto *IL = dyn_cast<IntegerLiteral>(I))
+ return IL->getValue().isZero();
+ }
+ return false;
+ };
+
+ // Don't allow non-designated initializers on randomized structures.
+ if (RD->isRandomized() && !IsZeroInitializer(Init)) {
+ if (!VerifyOnly)
+ SemaRef.Diag(InitLoc, diag::err_non_designated_init_used);
+ hadError = true;
+ break;
+ }
+
if (Field == FieldEnd) {
// We've run out of fields. We're done.
break;
@@ -3441,7 +3469,7 @@ unsigned InitializedEntity::dumpImpl(raw_ostream &OS) const {
D->printQualifiedName(OS);
}
- OS << " '" << getType().getAsString() << "'\n";
+ OS << " '" << getType() << "'\n";
return Depth + 1;
}
@@ -4475,13 +4503,13 @@ static void TryListInitialization(Sema &S,
Kind.getKind() == InitializationKind::IK_DirectList &&
ET && ET->getDecl()->isFixed() &&
!S.Context.hasSameUnqualifiedType(E->getType(), DestType) &&
- (E->getType()->isIntegralOrEnumerationType() ||
+ (E->getType()->isIntegralOrUnscopedEnumerationType() ||
E->getType()->isFloatingType())) {
// There are two ways that T(v) can work when T is an enumeration type.
// If there is either an implicit conversion sequence from v to T or
// a conversion function that can convert from v to T, then we use that.
- // Otherwise, if v is of integral, enumeration, or floating-point type,
- // it is converted to the enumeration type via its underlying type.
+ // Otherwise, if v is of integral, unscoped enumeration, or floating-point
+ // type, it is converted to the enumeration type via its underlying type.
// There is no overlap possible between these two cases (except when the
// source value is already of the destination type), and the first
// case is handled by the general case for single-element lists below.
@@ -5941,6 +5969,37 @@ void InitializationSequence::InitializeFrom(Sema &S,
assert(Args.size() >= 1 && "Zero-argument case handled above");
+ // For HLSL ext vector types we allow list initialization behavior for C++
+ // constructor syntax. This is accomplished by converting initialization
+ // arguments an InitListExpr late.
+ if (S.getLangOpts().HLSL && DestType->isExtVectorType() &&
+ (SourceType.isNull() ||
+ !Context.hasSameUnqualifiedType(SourceType, DestType))) {
+
+ llvm::SmallVector<Expr *> InitArgs;
+ for (auto Arg : Args) {
+ if (Arg->getType()->isExtVectorType()) {
+ const auto *VTy = Arg->getType()->castAs<ExtVectorType>();
+ unsigned Elm = VTy->getNumElements();
+ for (unsigned Idx = 0; Idx < Elm; ++Idx) {
+ InitArgs.emplace_back(new (Context) ArraySubscriptExpr(
+ Arg,
+ IntegerLiteral::Create(
+ Context, llvm::APInt(Context.getIntWidth(Context.IntTy), Idx),
+ Context.IntTy, SourceLocation()),
+ VTy->getElementType(), Arg->getValueKind(), Arg->getObjectKind(),
+ SourceLocation()));
+ }
+ } else
+ InitArgs.emplace_back(Arg);
+ }
+ InitListExpr *ILE = new (Context) InitListExpr(
+ S.getASTContext(), SourceLocation(), InitArgs, SourceLocation());
+ Args[0] = ILE;
+ AddListInitializationStep(DestType);
+ return;
+ }
+
// The remaining cases all need a source type.
if (Args.size() > 1) {
SetFailed(FK_TooManyInitsForScalar);
@@ -8101,6 +8160,12 @@ ExprResult InitializationSequence::Perform(Sema &S,
ExprResult CurInit((Expr *)nullptr);
SmallVector<Expr*, 4> ArrayLoopCommonExprs;
+ // HLSL allows vector initialization to function like list initialization, but
+ // use the syntax of a C++-like constructor.
+ bool IsHLSLVectorInit = S.getLangOpts().HLSL && DestType->isExtVectorType() &&
+ isa<InitListExpr>(Args[0]);
+ (void)IsHLSLVectorInit;
+
// For initialization steps that start with a single initializer,
// grab the only argument out the Args and place it into the "current"
// initializer.
@@ -8138,7 +8203,7 @@ ExprResult InitializationSequence::Perform(Sema &S,
case SK_StdInitializerList:
case SK_OCLSamplerInit:
case SK_OCLZeroOpaqueType: {
- assert(Args.size() == 1);
+ assert(Args.size() == 1 || IsHLSLVectorInit);
CurInit = Args[0];
if (!CurInit.get()) return ExprError();
break;
@@ -8189,6 +8254,10 @@ ExprResult InitializationSequence::Perform(Sema &S,
CurInit = S.FixOverloadedFunctionReference(CurInit,
Step->Function.FoundDecl,
Step->Function.Function);
+ // We might get back another placeholder expression if we resolved to a
+ // builtin.
+ if (!CurInit.isInvalid())
+ CurInit = S.CheckPlaceholderExpr(CurInit.get());
break;
case SK_CastDerivedToBasePRValue:
@@ -9770,7 +9839,7 @@ void InitializationSequence::dump(raw_ostream &OS) const {
break;
}
- OS << " [" << S->Type.getAsString() << ']';
+ OS << " [" << S->Type << ']';
}
OS << '\n';
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaLambda.cpp b/contrib/llvm-project/clang/lib/Sema/SemaLambda.cpp
index b05e0b5cc0f1..afc2f3ef4d76 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaLambda.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaLambda.cpp
@@ -182,7 +182,7 @@ Optional<unsigned> clang::getStackIndexOfNearestEnclosingCaptureCapableLambda(
if (!OptionalStackIndex)
return NoLambdaIsCaptureCapable;
- const unsigned IndexOfCaptureReadyLambda = OptionalStackIndex.getValue();
+ const unsigned IndexOfCaptureReadyLambda = *OptionalStackIndex;
assert(((IndexOfCaptureReadyLambda != (FunctionScopes.size() - 1)) ||
S.getCurGenericLambda()) &&
"The capture ready lambda for a potential capture can only be the "
@@ -238,21 +238,19 @@ getGenericLambdaTemplateParameterList(LambdaScopeInfo *LSI, Sema &SemaRef) {
return LSI->GLTemplateParameterList;
}
-CXXRecordDecl *Sema::createLambdaClosureType(SourceRange IntroducerRange,
- TypeSourceInfo *Info,
- bool KnownDependent,
- LambdaCaptureDefault CaptureDefault) {
+CXXRecordDecl *
+Sema::createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info,
+ unsigned LambdaDependencyKind,
+ LambdaCaptureDefault CaptureDefault) {
DeclContext *DC = CurContext;
while (!(DC->isFunctionOrMethod() || DC->isRecord() || DC->isFileContext()))
DC = DC->getParent();
bool IsGenericLambda = getGenericLambdaTemplateParameterList(getCurLambda(),
*this);
// Start constructing the lambda class.
- CXXRecordDecl *Class = CXXRecordDecl::CreateLambda(Context, DC, Info,
- IntroducerRange.getBegin(),
- KnownDependent,
- IsGenericLambda,
- CaptureDefault);
+ CXXRecordDecl *Class = CXXRecordDecl::CreateLambda(
+ Context, DC, Info, IntroducerRange.getBegin(), LambdaDependencyKind,
+ IsGenericLambda, CaptureDefault);
DC->addDecl(Class);
return Class;
@@ -435,7 +433,7 @@ void Sema::handleLambdaNumbering(
unsigned ManglingNumber, DeviceManglingNumber;
Decl *ManglingContextDecl;
std::tie(HasKnownInternalLinkage, ManglingNumber, DeviceManglingNumber,
- ManglingContextDecl) = Mangling.getValue();
+ ManglingContextDecl) = *Mangling;
Class->setLambdaMangling(ManglingNumber, ManglingContextDecl,
HasKnownInternalLinkage);
Class->setDeviceLambdaManglingNumber(DeviceManglingNumber);
@@ -898,17 +896,18 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
// Determine if we're within a context where we know that the lambda will
// be dependent, because there are template parameters in scope.
- bool KnownDependent;
+ CXXRecordDecl::LambdaDependencyKind LambdaDependencyKind =
+ CXXRecordDecl::LDK_Unknown;
if (LSI->NumExplicitTemplateParams > 0) {
auto *TemplateParamScope = CurScope->getTemplateParamParent();
assert(TemplateParamScope &&
"Lambda with explicit template param list should establish a "
"template param scope");
assert(TemplateParamScope->getParent());
- KnownDependent = TemplateParamScope->getParent()
- ->getTemplateParamParent() != nullptr;
- } else {
- KnownDependent = CurScope->getTemplateParamParent() != nullptr;
+ if (TemplateParamScope->getParent()->getTemplateParamParent() != nullptr)
+ LambdaDependencyKind = CXXRecordDecl::LDK_AlwaysDependent;
+ } else if (CurScope->getTemplateParamParent() != nullptr) {
+ LambdaDependencyKind = CXXRecordDecl::LDK_AlwaysDependent;
}
// Determine the signature of the call operator.
@@ -977,8 +976,8 @@ void Sema::ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
UPPC_DeclarationType);
}
- CXXRecordDecl *Class = createLambdaClosureType(Intro.Range, MethodTyInfo,
- KnownDependent, Intro.Default);
+ CXXRecordDecl *Class = createLambdaClosureType(
+ Intro.Range, MethodTyInfo, LambdaDependencyKind, Intro.Default);
CXXMethodDecl *Method =
startLambdaDefinition(Class, Intro.Range, MethodTyInfo, EndLoc, Params,
ParamInfo.getDeclSpec().getConstexprSpecifier(),
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaLookup.cpp b/contrib/llvm-project/clang/lib/Sema/SemaLookup.cpp
index af6ee24240ce..47c7a61f8072 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaLookup.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaLookup.cpp
@@ -930,9 +930,11 @@ bool Sema::LookupBuiltin(LookupResult &R) {
// If this is a builtin on this (or all) targets, create the decl.
if (unsigned BuiltinID = II->getBuiltinID()) {
- // In C++ and OpenCL (spec v1.2 s6.9.f), we don't have any predefined
- // library functions like 'malloc'. Instead, we'll just error.
- if ((getLangOpts().CPlusPlus || getLangOpts().OpenCL) &&
+ // In C++, C2x, and OpenCL (spec v1.2 s6.9.f), we don't have any
+ // predefined library functions like 'malloc'. Instead, we'll just
+ // error.
+ if ((getLangOpts().CPlusPlus || getLangOpts().OpenCL ||
+ getLangOpts().C2x) &&
Context.BuiltinInfo.isPredefinedLibFunction(BuiltinID))
return false;
@@ -1556,13 +1558,39 @@ llvm::DenseSet<Module*> &Sema::getLookupModules() {
return LookupModulesCache;
}
-/// Determine whether the module M is part of the current module from the
-/// perspective of a module-private visibility check.
-static bool isInCurrentModule(const Module *M, const LangOptions &LangOpts) {
- // If M is the global module fragment of a module that we've not yet finished
- // parsing, then it must be part of the current module.
- return M->getTopLevelModuleName() == LangOpts.CurrentModule ||
- (M->Kind == Module::GlobalModuleFragment && !M->Parent);
+/// Determine if we could use all the declarations in the module.
+bool Sema::isUsableModule(const Module *M) {
+ assert(M && "We shouldn't check nullness for module here");
+ // Return quickly if we cached the result.
+ if (UsableModuleUnitsCache.count(M))
+ return true;
+
+ // If M is the global module fragment of the current translation unit. So it
+ // should be usable.
+ // [module.global.frag]p1:
+ // The global module fragment can be used to provide declarations that are
+ // attached to the global module and usable within the module unit.
+ if (M == GlobalModuleFragment ||
+ // If M is the module we're parsing, it should be usable. This covers the
+ // private module fragment. The private module fragment is usable only if
+ // it is within the current module unit. And it must be the current
+ // parsing module unit if it is within the current module unit according
+ // to the grammar of the private module fragment. NOTE: This is covered by
+ // the following condition. The intention of the check is to avoid string
+ // comparison as much as possible.
+ M == getCurrentModule() ||
+ // The module unit which is in the same module with the current module
+ // unit is usable.
+ //
+ // FIXME: Here we judge if they are in the same module by comparing the
+ // string. Is there any better solution?
+ M->getPrimaryModuleInterfaceName() ==
+ llvm::StringRef(getLangOpts().CurrentModule).split(':').first) {
+ UsableModuleUnitsCache.insert(M);
+ return true;
+ }
+
+ return false;
}
bool Sema::hasVisibleMergedDefinition(NamedDecl *Def) {
@@ -1574,21 +1602,22 @@ bool Sema::hasVisibleMergedDefinition(NamedDecl *Def) {
bool Sema::hasMergedDefinitionInCurrentModule(NamedDecl *Def) {
for (const Module *Merged : Context.getModulesWithMergedDefinition(Def))
- if (isInCurrentModule(Merged, getLangOpts()))
+ if (isUsableModule(Merged))
return true;
return false;
}
-template<typename ParmDecl>
+template <typename ParmDecl>
static bool
-hasVisibleDefaultArgument(Sema &S, const ParmDecl *D,
- llvm::SmallVectorImpl<Module *> *Modules) {
+hasAcceptableDefaultArgument(Sema &S, const ParmDecl *D,
+ llvm::SmallVectorImpl<Module *> *Modules,
+ Sema::AcceptableKind Kind) {
if (!D->hasDefaultArgument())
return false;
while (D) {
auto &DefaultArg = D->getDefaultArgStorage();
- if (!DefaultArg.isInherited() && S.isVisible(D))
+ if (!DefaultArg.isInherited() && S.isAcceptable(D, Kind))
return true;
if (!DefaultArg.isInherited() && Modules) {
@@ -1602,20 +1631,36 @@ hasVisibleDefaultArgument(Sema &S, const ParmDecl *D,
return false;
}
-bool Sema::hasVisibleDefaultArgument(const NamedDecl *D,
- llvm::SmallVectorImpl<Module *> *Modules) {
+bool Sema::hasAcceptableDefaultArgument(
+ const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules,
+ Sema::AcceptableKind Kind) {
if (auto *P = dyn_cast<TemplateTypeParmDecl>(D))
- return ::hasVisibleDefaultArgument(*this, P, Modules);
+ return ::hasAcceptableDefaultArgument(*this, P, Modules, Kind);
+
if (auto *P = dyn_cast<NonTypeTemplateParmDecl>(D))
- return ::hasVisibleDefaultArgument(*this, P, Modules);
- return ::hasVisibleDefaultArgument(*this, cast<TemplateTemplateParmDecl>(D),
- Modules);
+ return ::hasAcceptableDefaultArgument(*this, P, Modules, Kind);
+
+ return ::hasAcceptableDefaultArgument(
+ *this, cast<TemplateTemplateParmDecl>(D), Modules, Kind);
+}
+
+bool Sema::hasVisibleDefaultArgument(const NamedDecl *D,
+ llvm::SmallVectorImpl<Module *> *Modules) {
+ return hasAcceptableDefaultArgument(D, Modules,
+ Sema::AcceptableKind::Visible);
+}
+
+bool Sema::hasReachableDefaultArgument(
+ const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules) {
+ return hasAcceptableDefaultArgument(D, Modules,
+ Sema::AcceptableKind::Reachable);
}
-template<typename Filter>
-static bool hasVisibleDeclarationImpl(Sema &S, const NamedDecl *D,
- llvm::SmallVectorImpl<Module *> *Modules,
- Filter F) {
+template <typename Filter>
+static bool
+hasAcceptableDeclarationImpl(Sema &S, const NamedDecl *D,
+ llvm::SmallVectorImpl<Module *> *Modules, Filter F,
+ Sema::AcceptableKind Kind) {
bool HasFilteredRedecls = false;
for (auto *Redecl : D->redecls()) {
@@ -1623,7 +1668,7 @@ static bool hasVisibleDeclarationImpl(Sema &S, const NamedDecl *D,
if (!F(R))
continue;
- if (S.isVisible(R))
+ if (S.isAcceptable(R, Kind))
return true;
HasFilteredRedecls = true;
@@ -1639,74 +1684,115 @@ static bool hasVisibleDeclarationImpl(Sema &S, const NamedDecl *D,
return true;
}
+static bool
+hasAcceptableExplicitSpecialization(Sema &S, const NamedDecl *D,
+ llvm::SmallVectorImpl<Module *> *Modules,
+ Sema::AcceptableKind Kind) {
+ return hasAcceptableDeclarationImpl(
+ S, D, Modules,
+ [](const NamedDecl *D) {
+ if (auto *RD = dyn_cast<CXXRecordDecl>(D))
+ return RD->getTemplateSpecializationKind() ==
+ TSK_ExplicitSpecialization;
+ if (auto *FD = dyn_cast<FunctionDecl>(D))
+ return FD->getTemplateSpecializationKind() ==
+ TSK_ExplicitSpecialization;
+ if (auto *VD = dyn_cast<VarDecl>(D))
+ return VD->getTemplateSpecializationKind() ==
+ TSK_ExplicitSpecialization;
+ llvm_unreachable("unknown explicit specialization kind");
+ },
+ Kind);
+}
+
bool Sema::hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules) {
- return hasVisibleDeclarationImpl(*this, D, Modules, [](const NamedDecl *D) {
- if (auto *RD = dyn_cast<CXXRecordDecl>(D))
- return RD->getTemplateSpecializationKind() == TSK_ExplicitSpecialization;
- if (auto *FD = dyn_cast<FunctionDecl>(D))
- return FD->getTemplateSpecializationKind() == TSK_ExplicitSpecialization;
- if (auto *VD = dyn_cast<VarDecl>(D))
- return VD->getTemplateSpecializationKind() == TSK_ExplicitSpecialization;
- llvm_unreachable("unknown explicit specialization kind");
- });
+ return ::hasAcceptableExplicitSpecialization(*this, D, Modules,
+ Sema::AcceptableKind::Visible);
}
-bool Sema::hasVisibleMemberSpecialization(
+bool Sema::hasReachableExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules) {
+ return ::hasAcceptableExplicitSpecialization(*this, D, Modules,
+ Sema::AcceptableKind::Reachable);
+}
+
+static bool
+hasAcceptableMemberSpecialization(Sema &S, const NamedDecl *D,
+ llvm::SmallVectorImpl<Module *> *Modules,
+ Sema::AcceptableKind Kind) {
assert(isa<CXXRecordDecl>(D->getDeclContext()) &&
"not a member specialization");
- return hasVisibleDeclarationImpl(*this, D, Modules, [](const NamedDecl *D) {
- // If the specialization is declared at namespace scope, then it's a member
- // specialization declaration. If it's lexically inside the class
- // definition then it was instantiated.
- //
- // FIXME: This is a hack. There should be a better way to determine this.
- // FIXME: What about MS-style explicit specializations declared within a
- // class definition?
- return D->getLexicalDeclContext()->isFileContext();
- });
+ return hasAcceptableDeclarationImpl(
+ S, D, Modules,
+ [](const NamedDecl *D) {
+ // If the specialization is declared at namespace scope, then it's a
+ // member specialization declaration. If it's lexically inside the class
+ // definition then it was instantiated.
+ //
+ // FIXME: This is a hack. There should be a better way to determine
+ // this.
+ // FIXME: What about MS-style explicit specializations declared within a
+ // class definition?
+ return D->getLexicalDeclContext()->isFileContext();
+ },
+ Kind);
}
-/// Determine whether a declaration is visible to name lookup.
+bool Sema::hasVisibleMemberSpecialization(
+ const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules) {
+ return hasAcceptableMemberSpecialization(*this, D, Modules,
+ Sema::AcceptableKind::Visible);
+}
+
+bool Sema::hasReachableMemberSpecialization(
+ const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules) {
+ return hasAcceptableMemberSpecialization(*this, D, Modules,
+ Sema::AcceptableKind::Reachable);
+}
+
+/// Determine whether a declaration is acceptable to name lookup.
///
-/// This routine determines whether the declaration D is visible in the current
-/// lookup context, taking into account the current template instantiation
-/// stack. During template instantiation, a declaration is visible if it is
-/// visible from a module containing any entity on the template instantiation
-/// path (by instantiating a template, you allow it to see the declarations that
-/// your module can see, including those later on in your module).
-bool LookupResult::isVisibleSlow(Sema &SemaRef, NamedDecl *D) {
+/// This routine determines whether the declaration D is acceptable in the
+/// current lookup context, taking into account the current template
+/// instantiation stack. During template instantiation, a declaration is
+/// acceptable if it is acceptable from a module containing any entity on the
+/// template instantiation path (by instantiating a template, you allow it to
+/// see the declarations that your module can see, including those later on in
+/// your module).
+bool LookupResult::isAcceptableSlow(Sema &SemaRef, NamedDecl *D,
+ Sema::AcceptableKind Kind) {
assert(!D->isUnconditionallyVisible() &&
"should not call this: not in slow case");
Module *DeclModule = SemaRef.getOwningModule(D);
assert(DeclModule && "hidden decl has no owning module");
- // If the owning module is visible, the decl is visible.
- if (SemaRef.isModuleVisible(DeclModule, D->isModulePrivate()))
+ // If the owning module is visible, the decl is acceptable.
+ if (SemaRef.isModuleVisible(DeclModule,
+ D->isInvisibleOutsideTheOwningModule()))
return true;
// Determine whether a decl context is a file context for the purpose of
- // visibility. This looks through some (export and linkage spec) transparent
- // contexts, but not others (enums).
+ // visibility/reachability. This looks through some (export and linkage spec)
+ // transparent contexts, but not others (enums).
auto IsEffectivelyFileContext = [](const DeclContext *DC) {
return DC->isFileContext() || isa<LinkageSpecDecl>(DC) ||
isa<ExportDecl>(DC);
};
// If this declaration is not at namespace scope
- // then it is visible if its lexical parent has a visible definition.
+ // then it is acceptable if its lexical parent has a acceptable definition.
DeclContext *DC = D->getLexicalDeclContext();
if (DC && !IsEffectivelyFileContext(DC)) {
// For a parameter, check whether our current template declaration's
- // lexical context is visible, not whether there's some other visible
+ // lexical context is acceptable, not whether there's some other acceptable
// definition of it, because parameters aren't "within" the definition.
//
- // In C++ we need to check for a visible definition due to ODR merging,
+ // In C++ we need to check for a acceptable definition due to ODR merging,
// and in C we must not because each declaration of a function gets its own
// set of declarations for tags in prototype scope.
- bool VisibleWithinParent;
+ bool AcceptableWithinParent;
if (D->isTemplateParameter()) {
bool SearchDefinitions = true;
if (const auto *DCD = dyn_cast<Decl>(DC)) {
@@ -1717,51 +1803,72 @@ bool LookupResult::isVisibleSlow(Sema &SemaRef, NamedDecl *D) {
}
}
if (SearchDefinitions)
- VisibleWithinParent = SemaRef.hasVisibleDefinition(cast<NamedDecl>(DC));
+ AcceptableWithinParent =
+ SemaRef.hasAcceptableDefinition(cast<NamedDecl>(DC), Kind);
else
- VisibleWithinParent = isVisible(SemaRef, cast<NamedDecl>(DC));
+ AcceptableWithinParent =
+ isAcceptable(SemaRef, cast<NamedDecl>(DC), Kind);
} else if (isa<ParmVarDecl>(D) ||
(isa<FunctionDecl>(DC) && !SemaRef.getLangOpts().CPlusPlus))
- VisibleWithinParent = isVisible(SemaRef, cast<NamedDecl>(DC));
+ AcceptableWithinParent = isAcceptable(SemaRef, cast<NamedDecl>(DC), Kind);
else if (D->isModulePrivate()) {
- // A module-private declaration is only visible if an enclosing lexical
+ // A module-private declaration is only acceptable if an enclosing lexical
// parent was merged with another definition in the current module.
- VisibleWithinParent = false;
+ AcceptableWithinParent = false;
do {
if (SemaRef.hasMergedDefinitionInCurrentModule(cast<NamedDecl>(DC))) {
- VisibleWithinParent = true;
+ AcceptableWithinParent = true;
break;
}
DC = DC->getLexicalParent();
} while (!IsEffectivelyFileContext(DC));
} else {
- VisibleWithinParent = SemaRef.hasVisibleDefinition(cast<NamedDecl>(DC));
+ AcceptableWithinParent =
+ SemaRef.hasAcceptableDefinition(cast<NamedDecl>(DC), Kind);
}
- if (VisibleWithinParent && SemaRef.CodeSynthesisContexts.empty() &&
+ if (AcceptableWithinParent && SemaRef.CodeSynthesisContexts.empty() &&
+ Kind == Sema::AcceptableKind::Visible &&
// FIXME: Do something better in this case.
!SemaRef.getLangOpts().ModulesLocalVisibility) {
// Cache the fact that this declaration is implicitly visible because
// its parent has a visible definition.
D->setVisibleDespiteOwningModule();
}
- return VisibleWithinParent;
+ return AcceptableWithinParent;
}
- return false;
+ if (Kind == Sema::AcceptableKind::Visible)
+ return false;
+
+ assert(Kind == Sema::AcceptableKind::Reachable &&
+ "Additional Sema::AcceptableKind?");
+ return isReachableSlow(SemaRef, D);
}
bool Sema::isModuleVisible(const Module *M, bool ModulePrivate) {
+ // [module.global.frag]p2:
+ // A global-module-fragment specifies the contents of the global module
+ // fragment for a module unit. The global module fragment can be used to
+ // provide declarations that are attached to the global module and usable
+ // within the module unit.
+ //
+ // Global module fragment is special. Global Module fragment is only usable
+ // within the module unit it got defined [module.global.frag]p2. So here we
+ // check if the Module is the global module fragment in current translation
+ // unit.
+ if (M->isGlobalModule() && M != this->GlobalModuleFragment)
+ return false;
+
// The module might be ordinarily visible. For a module-private query, that
- // means it is part of the current module. For any other query, that means it
- // is in our visible module set.
- if (ModulePrivate) {
- if (isInCurrentModule(M, getLangOpts()))
- return true;
- } else {
- if (VisibleModules.isVisible(M))
- return true;
- }
+ // means it is part of the current module.
+ if (ModulePrivate && isUsableModule(M))
+ return true;
+
+ // For a query which is not module-private, that means it is in our visible
+ // module set.
+ if (!ModulePrivate && VisibleModules.isVisible(M))
+ return true;
// Otherwise, it might be visible by virtue of the query being within a
// template instantiation or similar that is permitted to look inside M.
@@ -1785,8 +1892,74 @@ bool Sema::isModuleVisible(const Module *M, bool ModulePrivate) {
});
}
-bool Sema::isVisibleSlow(const NamedDecl *D) {
- return LookupResult::isVisible(*this, const_cast<NamedDecl*>(D));
+// FIXME: Return false directly if we don't have an interface dependency on the
+// translation unit containing D.
+bool LookupResult::isReachableSlow(Sema &SemaRef, NamedDecl *D) {
+ assert(!isVisible(SemaRef, D) && "Shouldn't call the slow case.\n");
+
+ Module *DeclModule = SemaRef.getOwningModule(D);
+ assert(DeclModule && "hidden decl has no owning module");
+
+ // Entities in module map modules are reachable only if they're visible.
+ if (DeclModule->isModuleMapModule())
+ return false;
+
+ // If D comes from a module and SemaRef doesn't own a module, it implies D
+ // comes from another TU. In case SemaRef owns a module, we could judge if D
+ // comes from another TU by comparing the module unit.
+ //
+ // FIXME: It would look better if we have direct method to judge whether D is
+ // in another TU.
+ if (SemaRef.getCurrentModule() &&
+ SemaRef.getCurrentModule()->getTopLevelModule() ==
+ DeclModule->getTopLevelModule())
+ return true;
+
+ // [module.reach]/p3:
+ // A declaration D is reachable from a point P if:
+ // ...
+ // - D is not discarded ([module.global.frag]), appears in a translation unit
+ // that is reachable from P, and does not appear within a private module
+ // fragment.
+ //
+ // A declaration that's discarded in the GMF should be module-private.
+ if (D->isModulePrivate())
+ return false;
+
+ // [module.reach]/p1
+ // A translation unit U is necessarily reachable from a point P if U is a
+ // module interface unit on which the translation unit containing P has an
+ // interface dependency, or the translation unit containing P imports U, in
+ // either case prior to P ([module.import]).
+ //
+ // [module.import]/p10
+ // A translation unit has an interface dependency on a translation unit U if
+ // it contains a declaration (possibly a module-declaration) that imports U
+ // or if it has an interface dependency on a translation unit that has an
+ // interface dependency on U.
+ //
+ // So we could conclude the module unit U is necessarily reachable if:
+ // (1) The module unit U is module interface unit.
+ // (2) The current unit has an interface dependency on the module unit U.
+ //
+ // Here we only check for the first condition. Since we couldn't see
+ // DeclModule if it isn't (transitively) imported.
+ if (DeclModule->getTopLevelModule()->isModuleInterfaceUnit())
+ return true;
+
+ // [module.reach]/p2
+ // Additional translation units on
+ // which the point within the program has an interface dependency may be
+ // considered reachable, but it is unspecified which are and under what
+ // circumstances.
+ //
+ // The decision here is to treat all additional tranditional units as
+ // unreachable.
+ return false;
+}
+
+bool Sema::isAcceptableSlow(const NamedDecl *D, Sema::AcceptableKind Kind) {
+ return LookupResult::isAcceptable(*this, const_cast<NamedDecl *>(D), Kind);
}
bool Sema::shouldLinkPossiblyHiddenDecl(LookupResult &R, const NamedDecl *New) {
@@ -1835,7 +2008,7 @@ bool Sema::shouldLinkPossiblyHiddenDecl(LookupResult &R, const NamedDecl *New) {
/// and visible. If no declaration of D is visible, returns null.
static NamedDecl *findAcceptableDecl(Sema &SemaRef, NamedDecl *D,
unsigned IDNS) {
- assert(!LookupResult::isVisible(SemaRef, D) && "not in slow case");
+ assert(!LookupResult::isAvailableForLookup(SemaRef, D) && "not in slow case");
for (auto RD : D->redecls()) {
// Don't bother with extra checks if we already know this one isn't visible.
@@ -1847,7 +2020,7 @@ static NamedDecl *findAcceptableDecl(Sema &SemaRef, NamedDecl *D,
// visible in the same scope as D. This needs to be done much more
// carefully.
if (ND->isInIdentifierNamespace(IDNS) &&
- LookupResult::isVisible(SemaRef, ND))
+ LookupResult::isAvailableForLookup(SemaRef, ND))
return ND;
}
@@ -1857,8 +2030,17 @@ static NamedDecl *findAcceptableDecl(Sema &SemaRef, NamedDecl *D,
bool Sema::hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules) {
assert(!isVisible(D) && "not in slow case");
- return hasVisibleDeclarationImpl(*this, D, Modules,
- [](const NamedDecl *) { return true; });
+ return hasAcceptableDeclarationImpl(
+ *this, D, Modules, [](const NamedDecl *) { return true; },
+ Sema::AcceptableKind::Visible);
+}
+
+bool Sema::hasReachableDeclarationSlow(
+ const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules) {
+ assert(!isReachable(D) && "not in slow case");
+ return hasAcceptableDeclarationImpl(
+ *this, D, Modules, [](const NamedDecl *) { return true; },
+ Sema::AcceptableKind::Reachable);
}
NamedDecl *LookupResult::getAcceptableDeclSlow(NamedDecl *D) const {
@@ -1883,6 +2065,60 @@ NamedDecl *LookupResult::getAcceptableDeclSlow(NamedDecl *D) const {
return findAcceptableDecl(getSema(), D, IDNS);
}
+bool LookupResult::isVisible(Sema &SemaRef, NamedDecl *D) {
+ // If this declaration is already visible, return it directly.
+ if (D->isUnconditionallyVisible())
+ return true;
+
+ // During template instantiation, we can refer to hidden declarations, if
+ // they were visible in any module along the path of instantiation.
+ return isAcceptableSlow(SemaRef, D, Sema::AcceptableKind::Visible);
+}
+
+bool LookupResult::isReachable(Sema &SemaRef, NamedDecl *D) {
+ if (D->isUnconditionallyVisible())
+ return true;
+
+ return isAcceptableSlow(SemaRef, D, Sema::AcceptableKind::Reachable);
+}
+
+bool LookupResult::isAvailableForLookup(Sema &SemaRef, NamedDecl *ND) {
+ // We should check the visibility at the callsite already.
+ if (isVisible(SemaRef, ND))
+ return true;
+
+ auto *DC = ND->getDeclContext();
+ // If ND is not visible and it is at namespace scope, it shouldn't be found
+ // by name lookup.
+ if (DC->isFileContext())
+ return false;
+
+ // [module.interface]p7
+ // Class and enumeration member names can be found by name lookup in any
+ // context in which a definition of the type is reachable.
+ //
+ // FIXME: The current implementation didn't consider about scope. For example,
+ // ```
+ // // m.cppm
+ // export module m;
+ // enum E1 { e1 };
+ // // Use.cpp
+ // import m;
+ // void test() {
+ // auto a = E1::e1; // Error as expected.
+ // auto b = e1; // Should be error. namespace-scope name e1 is not visible
+ // }
+ // ```
+ // For the above example, the current implementation would emit error for `a`
+ // correctly. However, the implementation wouldn't diagnose about `b` now.
+ // Since we only check the reachability for the parent only.
+ // See clang/test/CXX/module/module.interface/p7.cpp for example.
+ if (auto *TD = dyn_cast<TagDecl>(DC))
+ return SemaRef.hasReachableDefinition(TD);
+
+ return false;
+}
+
/// Perform unqualified name lookup starting from a given
/// scope.
///
@@ -1911,13 +2147,14 @@ NamedDecl *LookupResult::getAcceptableDeclSlow(NamedDecl *D) const {
/// used to diagnose ambiguities.
///
/// @returns \c true if lookup succeeded and false otherwise.
-bool Sema::LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation) {
+bool Sema::LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation,
+ bool ForceNoCPlusPlus) {
DeclarationName Name = R.getLookupName();
if (!Name) return false;
LookupNameKind NameKind = R.getLookupKind();
- if (!getLangOpts().CPlusPlus) {
+ if (!getLangOpts().CPlusPlus || ForceNoCPlusPlus) {
// Unqualified name lookup in C/Objective-C is purely lexical, so
// search in the declarations attached to the name.
if (NameKind == Sema::LookupRedeclarationWithLinkage) {
@@ -3613,7 +3850,16 @@ void Sema::ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
}
} else if (D->getFriendObjectKind()) {
auto *RD = cast<CXXRecordDecl>(D->getLexicalDeclContext());
- if (AssociatedClasses.count(RD) && isVisible(D)) {
+ // [basic.lookup.argdep]p4:
+ // Argument-dependent lookup finds all declarations of functions and
+ // function templates that
+ // - ...
+ // - are declared as a friend ([class.friend]) of any class with a
+ // reachable definition in the set of associated entities,
+ //
+ // FIXME: If there's a merged definition of D that is reachable, then
+ // the friend declaration should be considered.
+ if (AssociatedClasses.count(RD) && isReachable(D)) {
Visible = true;
break;
}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaModule.cpp b/contrib/llvm-project/clang/lib/Sema/SemaModule.cpp
index 747734f2d0ff..3aa124d457b0 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaModule.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaModule.cpp
@@ -54,6 +54,23 @@ static void checkModuleImportContext(Sema &S, Module *M,
}
}
+// We represent the primary and partition names as 'Paths' which are sections
+// of the hierarchical access path for a clang module. However for C++20
+// the periods in a name are just another character, and we will need to
+// flatten them into a string.
+static std::string stringFromPath(ModuleIdPath Path) {
+ std::string Name;
+ if (Path.empty())
+ return Name;
+
+ for (auto &Piece : Path) {
+ if (!Name.empty())
+ Name += ".";
+ Name += Piece.first->getName();
+ }
+ return Name;
+}
+
Sema::DeclGroupPtrTy
Sema::ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc) {
if (!ModuleScopes.empty() &&
@@ -73,22 +90,91 @@ Sema::ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc) {
// All declarations created from now on are owned by the global module.
auto *TU = Context.getTranslationUnitDecl();
- TU->setModuleOwnershipKind(Decl::ModuleOwnershipKind::Visible);
+ // [module.global.frag]p2
+ // A global-module-fragment specifies the contents of the global module
+ // fragment for a module unit. The global module fragment can be used to
+ // provide declarations that are attached to the global module and usable
+ // within the module unit.
+ //
+ // So the declations in the global module shouldn't be visible by default.
+ TU->setModuleOwnershipKind(Decl::ModuleOwnershipKind::ReachableWhenImported);
TU->setLocalOwningModule(GlobalModule);
// FIXME: Consider creating an explicit representation of this declaration.
return nullptr;
}
+void Sema::HandleStartOfHeaderUnit() {
+ assert(getLangOpts().CPlusPlusModules &&
+ "Header units are only valid for C++20 modules");
+ SourceLocation StartOfTU =
+ SourceMgr.getLocForStartOfFile(SourceMgr.getMainFileID());
+
+ StringRef HUName = getLangOpts().CurrentModule;
+ if (HUName.empty()) {
+ HUName = SourceMgr.getFileEntryForID(SourceMgr.getMainFileID())->getName();
+ const_cast<LangOptions &>(getLangOpts()).CurrentModule = HUName.str();
+ }
+
+ // TODO: Make the C++20 header lookup independent.
+ // When the input is pre-processed source, we need a file ref to the original
+ // file for the header map.
+ auto F = SourceMgr.getFileManager().getFile(HUName);
+ // For the sake of error recovery (if someone has moved the original header
+ // after creating the pre-processed output) fall back to obtaining the file
+ // ref for the input file, which must be present.
+ if (!F)
+ F = SourceMgr.getFileEntryForID(SourceMgr.getMainFileID());
+ assert(F && "failed to find the header unit source?");
+ Module::Header H{HUName.str(), HUName.str(), *F};
+ auto &Map = PP.getHeaderSearchInfo().getModuleMap();
+ Module *Mod = Map.createHeaderUnit(StartOfTU, HUName, H);
+ assert(Mod && "module creation should not fail");
+ ModuleScopes.push_back({}); // No GMF
+ ModuleScopes.back().BeginLoc = StartOfTU;
+ ModuleScopes.back().Module = Mod;
+ ModuleScopes.back().ModuleInterface = true;
+ ModuleScopes.back().IsPartition = false;
+ VisibleModules.setVisible(Mod, StartOfTU);
+
+ // From now on, we have an owning module for all declarations we see.
+ // All of these are implicitly exported.
+ auto *TU = Context.getTranslationUnitDecl();
+ TU->setModuleOwnershipKind(Decl::ModuleOwnershipKind::Visible);
+ TU->setLocalOwningModule(Mod);
+}
+
Sema::DeclGroupPtrTy
Sema::ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc,
- ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl) {
+ ModuleDeclKind MDK, ModuleIdPath Path,
+ ModuleIdPath Partition, ModuleImportState &ImportState) {
assert((getLangOpts().ModulesTS || getLangOpts().CPlusPlusModules) &&
"should only have module decl in Modules TS or C++20");
- // A module implementation unit requires that we are not compiling a module
- // of any kind. A module interface unit requires that we are not compiling a
- // module map.
+ bool IsFirstDecl = ImportState == ModuleImportState::FirstDecl;
+ bool SeenGMF = ImportState == ModuleImportState::GlobalFragment;
+ // If any of the steps here fail, we count that as invalidating C++20
+ // module state;
+ ImportState = ModuleImportState::NotACXX20Module;
+
+ bool IsPartition = !Partition.empty();
+ if (IsPartition)
+ switch (MDK) {
+ case ModuleDeclKind::Implementation:
+ MDK = ModuleDeclKind::PartitionImplementation;
+ break;
+ case ModuleDeclKind::Interface:
+ MDK = ModuleDeclKind::PartitionInterface;
+ break;
+ default:
+ llvm_unreachable("how did we get a partition type set?");
+ }
+
+ // A (non-partition) module implementation unit requires that we are not
+ // compiling a module of any kind. A partition implementation emits an
+ // interface (and the AST for the implementation), which will subsequently
+ // be consumed to emit a binary.
+ // A module interface unit requires that we are not compiling a module map.
switch (getLangOpts().getCompilingModule()) {
case LangOptions::CMK_None:
// It's OK to compile a module interface as a normal translation unit.
@@ -99,7 +185,7 @@ Sema::ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc,
break;
// We were asked to compile a module interface unit but this is a module
- // implementation unit. That indicates the 'export' is missing.
+ // implementation unit.
Diag(ModuleLoc, diag::err_module_interface_implementation_mismatch)
<< FixItHint::CreateInsertion(ModuleLoc, "export ");
MDK = ModuleDeclKind::Interface;
@@ -110,6 +196,7 @@ Sema::ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc,
return nullptr;
case LangOptions::CMK_HeaderModule:
+ case LangOptions::CMK_HeaderUnit:
Diag(ModuleLoc, diag::err_module_decl_in_header_module);
return nullptr;
}
@@ -134,9 +221,13 @@ Sema::ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc,
ModuleScopes.back().Module->Kind == Module::GlobalModuleFragment)
GlobalModuleFragment = ModuleScopes.back().Module;
+ assert((!getLangOpts().CPlusPlusModules || getLangOpts().ModulesTS ||
+ SeenGMF == (bool)GlobalModuleFragment) &&
+ "mismatched global module state");
+
// In C++20, the module-declaration must be the first declaration if there
// is no global module fragment.
- if (getLangOpts().CPlusPlusModules && !IsFirstDecl && !GlobalModuleFragment) {
+ if (getLangOpts().CPlusPlusModules && !IsFirstDecl && !SeenGMF) {
Diag(ModuleLoc, diag::err_module_decl_not_at_start);
SourceLocation BeginLoc =
ModuleScopes.empty()
@@ -151,19 +242,19 @@ Sema::ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc,
// Flatten the dots in a module name. Unlike Clang's hierarchical module map
// modules, the dots here are just another character that can appear in a
// module name.
- std::string ModuleName;
- for (auto &Piece : Path) {
- if (!ModuleName.empty())
- ModuleName += ".";
- ModuleName += Piece.first->getName();
+ std::string ModuleName = stringFromPath(Path);
+ if (IsPartition) {
+ ModuleName += ":";
+ ModuleName += stringFromPath(Partition);
}
-
// If a module name was explicitly specified on the command line, it must be
// correct.
if (!getLangOpts().CurrentModule.empty() &&
getLangOpts().CurrentModule != ModuleName) {
Diag(Path.front().second, diag::err_current_module_name_mismatch)
- << SourceRange(Path.front().second, Path.back().second)
+ << SourceRange(Path.front().second, IsPartition
+ ? Partition.back().second
+ : Path.back().second)
<< getLangOpts().CurrentModule;
return nullptr;
}
@@ -173,7 +264,8 @@ Sema::ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc,
Module *Mod;
switch (MDK) {
- case ModuleDeclKind::Interface: {
+ case ModuleDeclKind::Interface:
+ case ModuleDeclKind::PartitionInterface: {
// We can't have parsed or imported a definition of this module or parsed a
// module map defining it already.
if (auto *M = Map.findModule(ModuleName)) {
@@ -190,13 +282,19 @@ Sema::ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc,
// Create a Module for the module that we're defining.
Mod = Map.createModuleForInterfaceUnit(ModuleLoc, ModuleName,
GlobalModuleFragment);
+ if (MDK == ModuleDeclKind::PartitionInterface)
+ Mod->Kind = Module::ModulePartitionInterface;
assert(Mod && "module creation should not fail");
break;
}
- case ModuleDeclKind::Implementation:
+ case ModuleDeclKind::Implementation: {
std::pair<IdentifierInfo *, SourceLocation> ModuleNameLoc(
PP.getIdentifierInfo(ModuleName), Path[0].second);
+ // C++20 A module-declaration that contains neither an export-
+ // keyword nor a module-partition implicitly imports the primary
+ // module interface unit of the module as if by a module-import-
+ // declaration.
Mod = getModuleLoader().loadModule(ModuleLoc, {ModuleNameLoc},
Module::AllVisible,
/*IsInclusionDirective=*/false);
@@ -206,6 +304,14 @@ Sema::ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc,
Mod = Map.createModuleForInterfaceUnit(ModuleLoc, ModuleName,
GlobalModuleFragment);
}
+ } break;
+
+ case ModuleDeclKind::PartitionImplementation:
+ // Create an interface, but note that it is an implementation
+ // unit.
+ Mod = Map.createModuleForInterfaceUnit(ModuleLoc, ModuleName,
+ GlobalModuleFragment);
+ Mod->Kind = Module::ModulePartitionImplementation;
break;
}
@@ -222,15 +328,22 @@ Sema::ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc,
ModuleScopes.back().BeginLoc = StartLoc;
ModuleScopes.back().Module = Mod;
ModuleScopes.back().ModuleInterface = MDK != ModuleDeclKind::Implementation;
+ ModuleScopes.back().IsPartition = IsPartition;
VisibleModules.setVisible(Mod, ModuleLoc);
// From now on, we have an owning module for all declarations we see.
- // However, those declarations are module-private unless explicitly
+ // In C++20 modules, those declaration would be reachable when imported
+ // unless explicitily exported.
+ // Otherwise, those declarations are module-private unless explicitly
// exported.
auto *TU = Context.getTranslationUnitDecl();
- TU->setModuleOwnershipKind(Decl::ModuleOwnershipKind::ModulePrivate);
+ TU->setModuleOwnershipKind(Decl::ModuleOwnershipKind::ReachableWhenImported);
TU->setLocalOwningModule(Mod);
+ // We are in the module purview, but before any other (non import)
+ // statements, so imports are allowed.
+ ImportState = ModuleImportState::ImportAllowed;
+
// FIXME: Create a ModuleDecl.
return nullptr;
}
@@ -245,6 +358,9 @@ Sema::ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
: ModuleScopes.back().Module->Kind) {
case Module::ModuleMapModule:
case Module::GlobalModuleFragment:
+ case Module::ModulePartitionImplementation:
+ case Module::ModulePartitionInterface:
+ case Module::ModuleHeaderUnit:
Diag(PrivateLoc, diag::err_private_module_fragment_not_module);
return nullptr;
@@ -299,24 +415,50 @@ Sema::ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
DeclResult Sema::ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
- SourceLocation ImportLoc,
- ModuleIdPath Path) {
- // Flatten the module path for a Modules TS module name.
+ SourceLocation ImportLoc, ModuleIdPath Path,
+ bool IsPartition) {
+
+ bool Cxx20Mode = getLangOpts().CPlusPlusModules || getLangOpts().ModulesTS;
+ assert((!IsPartition || Cxx20Mode) && "partition seen in non-C++20 code?");
+
+ // For a C++20 module name, flatten into a single identifier with the source
+ // location of the first component.
std::pair<IdentifierInfo *, SourceLocation> ModuleNameLoc;
- if (getLangOpts().ModulesTS) {
- std::string ModuleName;
- for (auto &Piece : Path) {
- if (!ModuleName.empty())
- ModuleName += ".";
- ModuleName += Piece.first->getName();
- }
+
+ std::string ModuleName;
+ if (IsPartition) {
+ // We already checked that we are in a module purview in the parser.
+ assert(!ModuleScopes.empty() && "in a module purview, but no module?");
+ Module *NamedMod = ModuleScopes.back().Module;
+ // If we are importing into a partition, find the owning named module,
+ // otherwise, the name of the importing named module.
+ ModuleName = NamedMod->getPrimaryModuleInterfaceName().str();
+ ModuleName += ":";
+ ModuleName += stringFromPath(Path);
+ ModuleNameLoc = {PP.getIdentifierInfo(ModuleName), Path[0].second};
+ Path = ModuleIdPath(ModuleNameLoc);
+ } else if (Cxx20Mode) {
+ ModuleName = stringFromPath(Path);
ModuleNameLoc = {PP.getIdentifierInfo(ModuleName), Path[0].second};
Path = ModuleIdPath(ModuleNameLoc);
}
- Module *Mod =
- getModuleLoader().loadModule(ImportLoc, Path, Module::AllVisible,
- /*IsInclusionDirective=*/false);
+ // Diagnose self-import before attempting a load.
+ // [module.import]/9
+ // A module implementation unit of a module M that is not a module partition
+ // shall not contain a module-import-declaration nominating M.
+ // (for an implementation, the module interface is imported implicitly,
+ // but that's handled in the module decl code).
+
+ if (getLangOpts().CPlusPlusModules && isCurrentModulePurview() &&
+ getCurrentModule()->Name == ModuleName) {
+ Diag(ImportLoc, diag::err_module_self_import_cxx20)
+ << ModuleName << !ModuleScopes.back().ModuleInterface;
+ return true;
+ }
+
+ Module *Mod = getModuleLoader().loadModule(
+ ImportLoc, Path, Module::AllVisible, /*IsInclusionDirective=*/false);
if (!Mod)
return true;
@@ -333,8 +475,8 @@ static const ExportDecl *getEnclosingExportDecl(const Decl *D) {
DeclResult Sema::ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
- SourceLocation ImportLoc,
- Module *Mod, ModuleIdPath Path) {
+ SourceLocation ImportLoc, Module *Mod,
+ ModuleIdPath Path) {
VisibleModules.setVisible(Mod, ImportLoc);
checkModuleImportContext(*this, Mod, ImportLoc, CurContext);
@@ -342,10 +484,7 @@ DeclResult Sema::ActOnModuleImport(SourceLocation StartLoc,
// FIXME: we should support importing a submodule within a different submodule
// of the same top-level module. Until we do, make it an error rather than
// silently ignoring the import.
- // Import-from-implementation is valid in the Modules TS. FIXME: Should we
- // warn on a redundant import of the current module?
- // FIXME: Import of a module from an implementation partition of the same
- // module is permitted.
+ // FIXME: Should we warn on a redundant import of the current module?
if (Mod->getTopLevelModuleName() == getLangOpts().CurrentModule &&
(getLangOpts().isCompilingModule() || !getLangOpts().ModulesTS)) {
Diag(ImportLoc, getLangOpts().isCompilingModule()
@@ -355,22 +494,26 @@ DeclResult Sema::ActOnModuleImport(SourceLocation StartLoc,
}
SmallVector<SourceLocation, 2> IdentifierLocs;
- Module *ModCheck = Mod;
- for (unsigned I = 0, N = Path.size(); I != N; ++I) {
- // If we've run out of module parents, just drop the remaining identifiers.
- // We need the length to be consistent.
- if (!ModCheck)
- break;
- ModCheck = ModCheck->Parent;
- IdentifierLocs.push_back(Path[I].second);
- }
-
- // If this was a header import, pad out with dummy locations.
- // FIXME: Pass in and use the location of the header-name token in this case.
if (Path.empty()) {
- for (; ModCheck; ModCheck = ModCheck->Parent) {
+ // If this was a header import, pad out with dummy locations.
+ // FIXME: Pass in and use the location of the header-name token in this
+ // case.
+ for (Module *ModCheck = Mod; ModCheck; ModCheck = ModCheck->Parent)
IdentifierLocs.push_back(SourceLocation());
+ } else if (getLangOpts().CPlusPlusModules && !Mod->Parent) {
+ // A single identifier for the whole name.
+ IdentifierLocs.push_back(Path[0].second);
+ } else {
+ Module *ModCheck = Mod;
+ for (unsigned I = 0, N = Path.size(); I != N; ++I) {
+ // If we've run out of module parents, just drop the remaining
+ // identifiers. We need the length to be consistent.
+ if (!ModCheck)
+ break;
+ ModCheck = ModCheck->Parent;
+
+ IdentifierLocs.push_back(Path[I].second);
}
}
@@ -383,7 +526,18 @@ DeclResult Sema::ActOnModuleImport(SourceLocation StartLoc,
if (!ModuleScopes.empty())
Context.addModuleInitializer(ModuleScopes.back().Module, Import);
- if (!ModuleScopes.empty() && ModuleScopes.back().ModuleInterface) {
+ // A module (partition) implementation unit shall not be exported.
+ if (getLangOpts().CPlusPlusModules && ExportLoc.isValid() &&
+ Mod->Kind == Module::ModuleKind::ModulePartitionImplementation) {
+ Diag(ExportLoc, diag::err_export_partition_impl)
+ << SourceRange(ExportLoc, Path.back().second);
+ } else if (!ModuleScopes.empty() &&
+ (ModuleScopes.back().ModuleInterface ||
+ (getLangOpts().CPlusPlusModules &&
+ ModuleScopes.back().Module->isGlobalModule()))) {
+ assert((!ModuleScopes.back().Module->isGlobalModule() ||
+ Mod->Kind == Module::ModuleKind::ModuleHeaderUnit) &&
+ "should only be importing a header unit into the GMF");
// Re-export the module if the imported module is exported.
// Note that we don't need to add re-exported module to Imports field
// since `Exports` implies the module is imported already.
@@ -395,9 +549,21 @@ DeclResult Sema::ActOnModuleImport(SourceLocation StartLoc,
// [module.interface]p1:
// An export-declaration shall inhabit a namespace scope and appear in the
// purview of a module interface unit.
- Diag(ExportLoc, diag::err_export_not_in_module_interface) << 0;
+ Diag(ExportLoc, diag::err_export_not_in_module_interface)
+ << (!ModuleScopes.empty() &&
+ !ModuleScopes.back().ImplicitGlobalModuleFragment);
+ } else if (getLangOpts().isCompilingModule()) {
+ Module *ThisModule = PP.getHeaderSearchInfo().lookupModule(
+ getLangOpts().CurrentModule, ExportLoc, false, false);
+ (void)ThisModule;
+ assert(ThisModule && "was expecting a module if building one");
}
+ // In some cases we need to know if an entity was present in a directly-
+ // imported module (as opposed to a transitive import). This avoids
+ // searching both Imports and Exports.
+ DirectModuleImports.insert(Mod);
+
return Import;
}
@@ -433,6 +599,13 @@ void Sema::BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod) {
getModuleLoader().makeModuleVisible(Mod, Module::AllVisible, DirectiveLoc);
VisibleModules.setVisible(Mod, DirectiveLoc);
+
+ if (getLangOpts().isCompilingModule()) {
+ Module *ThisModule = PP.getHeaderSearchInfo().lookupModule(
+ getLangOpts().CurrentModule, DirectiveLoc, false, false);
+ (void)ThisModule;
+ assert(ThisModule && "was expecting a module if building one");
+ }
}
void Sema::ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod) {
@@ -598,6 +771,7 @@ enum class UnnamedDeclKind {
StaticAssert,
Asm,
UsingDirective,
+ Namespace,
Context
};
}
@@ -627,6 +801,10 @@ unsigned getUnnamedDeclDiag(UnnamedDeclKind UDK, bool InBlock) {
// Allow exporting using-directives as an extension.
return diag::ext_export_using_directive;
+ case UnnamedDeclKind::Namespace:
+ // Anonymous namespace with no content.
+ return diag::introduces_no_names;
+
case UnnamedDeclKind::Context:
// Allow exporting DeclContexts that transitively contain no declarations
// as an extension.
@@ -654,10 +832,12 @@ static bool checkExportedDecl(Sema &S, Decl *D, SourceLocation BlockStart) {
diagExportedUnnamedDecl(S, *UDK, D, BlockStart);
// [...] shall not declare a name with internal linkage.
+ bool HasName = false;
if (auto *ND = dyn_cast<NamedDecl>(D)) {
// Don't diagnose anonymous union objects; we'll diagnose their members
// instead.
- if (ND->getDeclName() && ND->getFormalLinkage() == InternalLinkage) {
+ HasName = (bool)ND->getDeclName();
+ if (HasName && ND->getFormalLinkage() == InternalLinkage) {
S.Diag(ND->getLocation(), diag::err_export_internal) << ND;
if (BlockStart.isValid())
S.Diag(BlockStart, diag::note_export);
@@ -669,8 +849,10 @@ static bool checkExportedDecl(Sema &S, Decl *D, SourceLocation BlockStart) {
// shall have been introduced with a name having external linkage
if (auto *USD = dyn_cast<UsingShadowDecl>(D)) {
NamedDecl *Target = USD->getUnderlyingDecl();
- if (Target->getFormalLinkage() == InternalLinkage) {
- S.Diag(USD->getLocation(), diag::err_export_using_internal) << Target;
+ Linkage Lk = Target->getFormalLinkage();
+ if (Lk == InternalLinkage || Lk == ModuleLinkage) {
+ S.Diag(USD->getLocation(), diag::err_export_using_internal)
+ << (Lk == InternalLinkage ? 0 : 1) << Target;
S.Diag(Target->getLocation(), diag::note_using_decl_target);
if (BlockStart.isValid())
S.Diag(BlockStart, diag::note_export);
@@ -678,10 +860,17 @@ static bool checkExportedDecl(Sema &S, Decl *D, SourceLocation BlockStart) {
}
// Recurse into namespace-scope DeclContexts. (Only namespace-scope
- // declarations are exported.)
- if (auto *DC = dyn_cast<DeclContext>(D))
- if (DC->getRedeclContext()->isFileContext() && !isa<EnumDecl>(D))
+ // declarations are exported.).
+ if (auto *DC = dyn_cast<DeclContext>(D)) {
+ if (isa<NamespaceDecl>(D) && DC->decls().empty()) {
+ if (!HasName)
+ // We don't allow an empty anonymous namespace (we don't allow decls
+ // in them either, but that's handled in the recursion).
+ diagExportedUnnamedDecl(S, UnnamedDeclKind::Namespace, D, BlockStart);
+ // We allow an empty named namespace decl.
+ } else if (DC->getRedeclContext()->isFileContext() && !isa<EnumDecl>(D))
return checkExportedDeclContext(S, DC, BlockStart);
+ }
return false;
}
@@ -720,19 +909,25 @@ Decl *Sema::ActOnFinishExportDecl(Scope *S, Decl *D, SourceLocation RBraceLoc) {
Module *Sema::PushGlobalModuleFragment(SourceLocation BeginLoc,
bool IsImplicit) {
- ModuleMap &Map = PP.getHeaderSearchInfo().getModuleMap();
- Module *GlobalModule =
- Map.createGlobalModuleFragmentForModuleUnit(BeginLoc, getCurrentModule());
- assert(GlobalModule && "module creation should not fail");
+ // We shouldn't create new global module fragment if there is already
+ // one.
+ if (!GlobalModuleFragment) {
+ ModuleMap &Map = PP.getHeaderSearchInfo().getModuleMap();
+ GlobalModuleFragment = Map.createGlobalModuleFragmentForModuleUnit(
+ BeginLoc, getCurrentModule());
+ }
+
+ assert(GlobalModuleFragment && "module creation should not fail");
// Enter the scope of the global module.
- ModuleScopes.push_back({BeginLoc, GlobalModule,
+ ModuleScopes.push_back({BeginLoc, GlobalModuleFragment,
/*ModuleInterface=*/false,
+ /*IsPartition=*/false,
/*ImplicitGlobalModuleFragment=*/IsImplicit,
- /*VisibleModuleSet*/{}});
- VisibleModules.setVisible(GlobalModule, BeginLoc);
+ /*OuterVisibleModules=*/{}});
+ VisibleModules.setVisible(GlobalModuleFragment, BeginLoc);
- return GlobalModule;
+ return GlobalModuleFragment;
}
void Sema::PopGlobalModuleFragment() {
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaOpenMP.cpp b/contrib/llvm-project/clang/lib/Sema/SemaOpenMP.cpp
index 32e90ced7b29..6f501965552e 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaOpenMP.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaOpenMP.cpp
@@ -35,6 +35,7 @@
#include "llvm/ADT/IndexedMap.h"
#include "llvm/ADT/PointerEmbeddedInt.h"
#include "llvm/ADT/STLExtras.h"
+#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/StringExtras.h"
#include "llvm/Frontend/OpenMP/OMPAssume.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
@@ -58,7 +59,8 @@ enum DefaultDataSharingAttributes {
DSA_unspecified = 0, /// Data sharing attribute not specified.
DSA_none = 1 << 0, /// Default data sharing attribute 'none'.
DSA_shared = 1 << 1, /// Default data sharing attribute 'shared'.
- DSA_firstprivate = 1 << 2, /// Default data sharing attribute 'firstprivate'.
+ DSA_private = 1 << 2, /// Default data sharing attribute 'private'.
+ DSA_firstprivate = 1 << 3, /// Default data sharing attribute 'firstprivate'.
};
/// Stack for tracking declarations used in OpenMP directives and
@@ -175,6 +177,7 @@ private:
bool HasMutipleLoops = false;
const Decl *PossiblyLoopCounter = nullptr;
bool NowaitRegion = false;
+ bool UntiedRegion = false;
bool CancelRegion = false;
bool LoopStart = false;
bool BodyComplete = false;
@@ -193,6 +196,22 @@ private:
llvm::DenseSet<CanonicalDeclPtr<Decl>> UsedInScanDirective;
llvm::DenseMap<CanonicalDeclPtr<const Decl>, UsesAllocatorsDeclKind>
UsesAllocatorsDecls;
+ /// Data is required on creating capture fields for implicit
+ /// default first|private clause.
+ struct ImplicitDefaultFDInfoTy {
+ /// Field decl.
+ const FieldDecl *FD = nullptr;
+ /// Nesting stack level
+ size_t StackLevel = 0;
+ /// Capture variable decl.
+ VarDecl *VD = nullptr;
+ ImplicitDefaultFDInfoTy(const FieldDecl *FD, size_t StackLevel,
+ VarDecl *VD)
+ : FD(FD), StackLevel(StackLevel), VD(VD) {}
+ };
+ /// List of captured fields
+ llvm::SmallVector<ImplicitDefaultFDInfoTy, 8>
+ ImplicitDefaultFirstprivateFDs;
Expr *DeclareMapperVar = nullptr;
SharingMapTy(OpenMPDirectiveKind DKind, DeclarationNameInfo Name,
Scope *CurScope, SourceLocation Loc)
@@ -574,7 +593,9 @@ public:
/// predicate.
const DSAVarData
hasDSA(ValueDecl *D,
- const llvm::function_ref<bool(OpenMPClauseKind, bool)> CPred,
+ const llvm::function_ref<bool(OpenMPClauseKind, bool,
+ DefaultDataSharingAttributes)>
+ CPred,
const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
bool FromParent) const;
/// Checks if the specified variables has data-sharing attributes which
@@ -693,6 +714,11 @@ public:
getTopOfStack().DefaultAttr = DSA_shared;
getTopOfStack().DefaultAttrLoc = Loc;
}
+ /// Set default data sharing attribute to private.
+ void setDefaultDSAPrivate(SourceLocation Loc) {
+ getTopOfStack().DefaultAttr = DSA_private;
+ getTopOfStack().DefaultAttrLoc = Loc;
+ }
/// Set default data sharing attribute to firstprivate.
void setDefaultDSAFirstPrivate(SourceLocation Loc) {
getTopOfStack().DefaultAttr = DSA_firstprivate;
@@ -820,7 +846,7 @@ public:
/// Returns optional parameter for the ordered region.
std::pair<const Expr *, OMPOrderedClause *> getOrderedRegionParam() const {
if (const SharingMapTy *Top = getTopOfStackOrNull())
- if (Top->OrderedRegion.hasValue())
+ if (Top->OrderedRegion)
return Top->OrderedRegion.getValue();
return std::make_pair(nullptr, nullptr);
}
@@ -835,7 +861,7 @@ public:
std::pair<const Expr *, OMPOrderedClause *>
getParentOrderedRegionParam() const {
if (const SharingMapTy *Parent = getSecondOnStackOrNull())
- if (Parent->OrderedRegion.hasValue())
+ if (Parent->OrderedRegion)
return Parent->OrderedRegion.getValue();
return std::make_pair(nullptr, nullptr);
}
@@ -850,6 +876,15 @@ public:
return Parent->NowaitRegion;
return false;
}
+ /// Marks current region as untied (it has a 'untied' clause).
+ void setUntiedRegion(bool IsUntied = true) {
+ getTopOfStack().UntiedRegion = IsUntied;
+ }
+ /// Return true if current region is untied.
+ bool isUntiedRegion() const {
+ const SharingMapTy *Top = getTopOfStackOrNull();
+ return Top ? Top->UntiedRegion : false;
+ }
/// Marks parent region as cancel region.
void setParentCancelRegion(bool Cancel = true) {
if (SharingMapTy *Parent = getSecondOnStackOrNull())
@@ -1103,6 +1138,52 @@ public:
const SharingMapTy *Top = getTopOfStackOrNull();
return Top ? Top->DeclareMapperVar : nullptr;
}
+ /// get captured field from ImplicitDefaultFirstprivateFDs
+ VarDecl *getImplicitFDCapExprDecl(const FieldDecl *FD) const {
+ const_iterator I = begin();
+ const_iterator EndI = end();
+ size_t StackLevel = getStackSize();
+ for (; I != EndI; ++I) {
+ if (I->DefaultAttr == DSA_firstprivate || I->DefaultAttr == DSA_private)
+ break;
+ StackLevel--;
+ }
+ assert((StackLevel > 0 && I != EndI) || (StackLevel == 0 && I == EndI));
+ if (I == EndI)
+ return nullptr;
+ for (const auto &IFD : I->ImplicitDefaultFirstprivateFDs)
+ if (IFD.FD == FD && IFD.StackLevel == StackLevel)
+ return IFD.VD;
+ return nullptr;
+ }
+ /// Check if capture decl is field captured in ImplicitDefaultFirstprivateFDs
+ bool isImplicitDefaultFirstprivateFD(VarDecl *VD) const {
+ const_iterator I = begin();
+ const_iterator EndI = end();
+ for (; I != EndI; ++I)
+ if (I->DefaultAttr == DSA_firstprivate || I->DefaultAttr == DSA_private)
+ break;
+ if (I == EndI)
+ return false;
+ for (const auto &IFD : I->ImplicitDefaultFirstprivateFDs)
+ if (IFD.VD == VD)
+ return true;
+ return false;
+ }
+ /// Store capture FD info in ImplicitDefaultFirstprivateFDs
+ void addImplicitDefaultFirstprivateFD(const FieldDecl *FD, VarDecl *VD) {
+ iterator I = begin();
+ const_iterator EndI = end();
+ size_t StackLevel = getStackSize();
+ for (; I != EndI; ++I) {
+ if (I->DefaultAttr == DSA_private || I->DefaultAttr == DSA_firstprivate) {
+ I->ImplicitDefaultFirstprivateFDs.emplace_back(FD, StackLevel, VD);
+ break;
+ }
+ StackLevel--;
+ }
+ assert((StackLevel > 0 && I != EndI) || (StackLevel == 0 && I == EndI));
+ }
};
bool isImplicitTaskingRegion(OpenMPDirectiveKind DKind) {
@@ -1222,7 +1303,7 @@ DSAStackTy::DSAVarData DSAStackTy::getDSA(const_iterator &Iter,
case DSA_none:
return DVar;
case DSA_firstprivate:
- if (VD->getStorageDuration() == SD_Static &&
+ if (VD && VD->getStorageDuration() == SD_Static &&
VD->getDeclContext()->isFileContext()) {
DVar.CKind = OMPC_unknown;
} else {
@@ -1230,6 +1311,18 @@ DSAStackTy::DSAVarData DSAStackTy::getDSA(const_iterator &Iter,
}
DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
return DVar;
+ case DSA_private:
+ // each variable with static storage duration that is declared
+ // in a namespace or global scope and referenced in the construct,
+ // and that does not have a predetermined data-sharing attribute
+ if (VD && VD->getStorageDuration() == SD_Static &&
+ VD->getDeclContext()->isFileContext()) {
+ DVar.CKind = OMPC_unknown;
+ } else {
+ DVar.CKind = OMPC_private;
+ }
+ DVar.ImplicitDSALoc = Iter->DefaultAttrLoc;
+ return DVar;
case DSA_unspecified:
// OpenMP [2.9.1.1, Data-sharing Attribute Rules for Variables Referenced
// in a Construct, implicitly determined, p.2]
@@ -1786,7 +1879,9 @@ const DSAStackTy::DSAVarData DSAStackTy::getImplicitDSA(ValueDecl *D,
const DSAStackTy::DSAVarData
DSAStackTy::hasDSA(ValueDecl *D,
- const llvm::function_ref<bool(OpenMPClauseKind, bool)> CPred,
+ const llvm::function_ref<bool(OpenMPClauseKind, bool,
+ DefaultDataSharingAttributes)>
+ CPred,
const llvm::function_ref<bool(OpenMPDirectiveKind)> DPred,
bool FromParent) const {
if (isStackEmpty())
@@ -1802,7 +1897,7 @@ DSAStackTy::hasDSA(ValueDecl *D,
continue;
const_iterator NewI = I;
DSAVarData DVar = getDSA(NewI, D);
- if (I == NewI && CPred(DVar.CKind, DVar.AppliedToPointee))
+ if (I == NewI && CPred(DVar.CKind, DVar.AppliedToPointee, I->DefaultAttr))
return DVar;
}
return {};
@@ -2131,7 +2226,8 @@ bool Sema::isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level,
!cast<OMPCapturedExprDecl>(D)->getInit()->isGLValue()) &&
// If the variable is implicitly firstprivate and scalar - capture by
// copy
- !(DSAStack->getDefaultDSA() == DSA_firstprivate &&
+ !((DSAStack->getDefaultDSA() == DSA_firstprivate ||
+ DSAStack->getDefaultDSA() == DSA_private) &&
!DSAStack->hasExplicitDSA(
D, [](OpenMPClauseKind K, bool) { return K != OMPC_unknown; },
Level) &&
@@ -2157,6 +2253,11 @@ unsigned Sema::getOpenMPNestingLevel() const {
return DSAStack->getNestingLevel();
}
+bool Sema::isInOpenMPTaskUntiedContext() const {
+ return isOpenMPTaskingDirective(DSAStack->getCurrentDirective()) &&
+ DSAStack->isUntiedRegion();
+}
+
bool Sema::isInOpenMPTargetExecutionDirective() const {
return (isOpenMPTargetExecutionDirective(DSAStack->getCurrentDirective()) &&
!DSAStack->isClauseParsingMode()) ||
@@ -2168,6 +2269,26 @@ bool Sema::isInOpenMPTargetExecutionDirective() const {
false);
}
+bool Sema::isOpenMPRebuildMemberExpr(ValueDecl *D) {
+ DSAStackTy::DSAVarData DVarPrivate = DSAStack->hasDSA(
+ D,
+ [](OpenMPClauseKind C, bool AppliedToPointee,
+ DefaultDataSharingAttributes DefaultAttr) {
+ return isOpenMPPrivate(C) && !AppliedToPointee &&
+ (DefaultAttr == DSA_firstprivate || DefaultAttr == DSA_private);
+ },
+ [](OpenMPDirectiveKind) { return true; },
+ DSAStack->isClauseParsingMode());
+ if (DVarPrivate.CKind != OMPC_unknown)
+ return true;
+ return false;
+}
+
+static OMPCapturedExprDecl *buildCaptureDecl(Sema &S, IdentifierInfo *Id,
+ Expr *CaptureExpr, bool WithInit,
+ DeclContext *CurContext,
+ bool AsExpression);
+
VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo,
unsigned StopAt) {
assert(LangOpts.OpenMP && "OpenMP is not allowed");
@@ -2266,7 +2387,7 @@ VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo,
// default(none) clause and not used in any clause.
DSAStackTy::DSAVarData DVarPrivate = DSAStack->hasDSA(
D,
- [](OpenMPClauseKind C, bool AppliedToPointee) {
+ [](OpenMPClauseKind C, bool AppliedToPointee, bool) {
return isOpenMPPrivate(C) && !AppliedToPointee;
},
[](OpenMPDirectiveKind) { return true; },
@@ -2274,11 +2395,52 @@ VarDecl *Sema::isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo,
// Global shared must not be captured.
if (VD && !VD->hasLocalStorage() && DVarPrivate.CKind == OMPC_unknown &&
((DSAStack->getDefaultDSA() != DSA_none &&
+ DSAStack->getDefaultDSA() != DSA_private &&
DSAStack->getDefaultDSA() != DSA_firstprivate) ||
DVarTop.CKind == OMPC_shared))
return nullptr;
+ auto *FD = dyn_cast<FieldDecl>(D);
+ if (DVarPrivate.CKind != OMPC_unknown && !VD && FD &&
+ !DVarPrivate.PrivateCopy) {
+ DSAStackTy::DSAVarData DVarPrivate = DSAStack->hasDSA(
+ D,
+ [](OpenMPClauseKind C, bool AppliedToPointee,
+ DefaultDataSharingAttributes DefaultAttr) {
+ return isOpenMPPrivate(C) && !AppliedToPointee &&
+ (DefaultAttr == DSA_firstprivate ||
+ DefaultAttr == DSA_private);
+ },
+ [](OpenMPDirectiveKind) { return true; },
+ DSAStack->isClauseParsingMode());
+ if (DVarPrivate.CKind == OMPC_unknown)
+ return nullptr;
+
+ VarDecl *VD = DSAStack->getImplicitFDCapExprDecl(FD);
+ if (VD)
+ return VD;
+ if (getCurrentThisType().isNull())
+ return nullptr;
+ Expr *ThisExpr = BuildCXXThisExpr(SourceLocation(), getCurrentThisType(),
+ /*IsImplicit=*/true);
+ const CXXScopeSpec CS = CXXScopeSpec();
+ Expr *ME = BuildMemberExpr(ThisExpr, /*IsArrow=*/true, SourceLocation(),
+ NestedNameSpecifierLoc(), SourceLocation(), FD,
+ DeclAccessPair::make(FD, FD->getAccess()),
+ /*HadMultipleCandidates=*/false,
+ DeclarationNameInfo(), FD->getType(),
+ VK_LValue, OK_Ordinary);
+ OMPCapturedExprDecl *CD = buildCaptureDecl(
+ *this, FD->getIdentifier(), ME, DVarPrivate.CKind != OMPC_private,
+ CurContext->getParent(), /*AsExpression=*/false);
+ DeclRefExpr *VDPrivateRefExpr = buildDeclRefExpr(
+ *this, CD, CD->getType().getNonReferenceType(), SourceLocation());
+ VD = cast<VarDecl>(VDPrivateRefExpr->getDecl());
+ DSAStack->addImplicitDefaultFirstprivateFD(FD, VD);
+ return VD;
+ }
if (DVarPrivate.CKind != OMPC_unknown ||
(VD && (DSAStack->getDefaultDSA() == DSA_none ||
+ DSAStack->getDefaultDSA() == DSA_private ||
DSAStack->getDefaultDSA() == DSA_firstprivate)))
return VD ? VD : cast<VarDecl>(DVarPrivate.PrivateCopy->getDecl());
}
@@ -2307,6 +2469,23 @@ void Sema::startOpenMPCXXRangeFor() {
OpenMPClauseKind Sema::isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
unsigned CapLevel) const {
assert(LangOpts.OpenMP && "OpenMP is not allowed");
+ if (DSAStack->getCurrentDirective() != OMPD_unknown &&
+ (!DSAStack->isClauseParsingMode() ||
+ DSAStack->getParentDirective() != OMPD_unknown)) {
+ DSAStackTy::DSAVarData DVarPrivate = DSAStack->hasDSA(
+ D,
+ [](OpenMPClauseKind C, bool AppliedToPointee,
+ DefaultDataSharingAttributes DefaultAttr) {
+ return isOpenMPPrivate(C) && !AppliedToPointee &&
+ DefaultAttr == DSA_private;
+ },
+ [](OpenMPDirectiveKind) { return true; },
+ DSAStack->isClauseParsingMode());
+ if (DVarPrivate.CKind == OMPC_private && isa<OMPCapturedExprDecl>(D) &&
+ DSAStack->isImplicitDefaultFirstprivateFD(cast<VarDecl>(D)) &&
+ !DSAStack->isLoopControlVariable(D).first)
+ return OMPC_private;
+ }
if (DSAStack->hasExplicitDirective(isOpenMPTaskingDirective, Level)) {
bool IsTriviallyCopyable =
D->getType().getNonReferenceType().isTriviallyCopyableType(Context) &&
@@ -2358,7 +2537,7 @@ OpenMPClauseKind Sema::isOpenMPPrivateDecl(ValueDecl *D, unsigned Level,
// User-defined allocators are private since they must be defined in the
// context of target region.
if (DSAStack->hasExplicitDirective(isOpenMPTargetExecutionDirective, Level) &&
- DSAStack->isUsesAllocatorsDecl(Level, D).getValueOr(
+ DSAStack->isUsesAllocatorsDecl(Level, D).value_or(
DSAStackTy::UsesAllocatorsDeclKind::AllocatorTrait) ==
DSAStackTy::UsesAllocatorsDeclKind::UserDefinedAllocator)
return OMPC_private;
@@ -2448,7 +2627,11 @@ bool Sema::isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level,
unsigned NumLevels =
getOpenMPCaptureLevels(DSAStack->getDirective(Level));
if (Level == 0)
- return (NumLevels == CaptureLevel + 1) && TopDVar.CKind != OMPC_shared;
+ // non-file scope static variale with default(firstprivate)
+ // should be gloabal captured.
+ return (NumLevels == CaptureLevel + 1 &&
+ (TopDVar.CKind != OMPC_shared ||
+ DSAStack->getDefaultDSA() == DSA_firstprivate));
do {
--Level;
DSAStackTy::DSAVarData DVar = DSAStack->getImplicitDSA(D, Level);
@@ -2501,7 +2684,7 @@ void Sema::finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller,
<< HostDevTy;
return;
}
- if (!LangOpts.OpenMPIsDevice && DevTy &&
+ if (!LangOpts.OpenMPIsDevice && !LangOpts.OpenMPOffloadMandatory && DevTy &&
*DevTy == OMPDeclareTargetDeclAttr::DT_NoHost) {
// Diagnose nohost function called during host codegen.
StringRef NoHostDevTy = getOpenMPSimpleClauseTypeName(
@@ -3263,8 +3446,7 @@ void Sema::ActOnOpenMPAssumesDirective(SourceLocation Loc,
continue;
if (auto *CTD = dyn_cast<ClassTemplateDecl>(SubDC)) {
DeclContexts.push_back(CTD->getTemplatedDecl());
- for (auto *S : CTD->specializations())
- DeclContexts.push_back(S);
+ llvm::append_range(DeclContexts, CTD->specializations());
continue;
}
if (auto *DC = dyn_cast<DeclContext>(SubDC))
@@ -3429,6 +3611,7 @@ class DSAAttrChecker final : public StmtVisitor<DSAAttrChecker, void> {
CapturedStmt *CS = nullptr;
const static unsigned DefaultmapKindNum = OMPC_DEFAULTMAP_pointer + 1;
llvm::SmallVector<Expr *, 4> ImplicitFirstprivate;
+ llvm::SmallVector<Expr *, 4> ImplicitPrivate;
llvm::SmallVector<Expr *, 4> ImplicitMap[DefaultmapKindNum][OMPC_MAP_delete];
llvm::SmallVector<OpenMPMapModifierKind, NumberOfOMPMapClauseModifiers>
ImplicitMapModifier[DefaultmapKindNum];
@@ -3483,7 +3666,8 @@ public:
if (auto *VD = dyn_cast<VarDecl>(E->getDecl())) {
// Check the datasharing rules for the expressions in the clauses.
if (!CS || (isa<OMPCapturedExprDecl>(VD) && !CS->capturesVariable(VD) &&
- !Stack->getTopDSA(VD, /*FromParent=*/false).RefExpr)) {
+ !Stack->getTopDSA(VD, /*FromParent=*/false).RefExpr &&
+ !Stack->isImplicitDefaultFirstprivateFD(VD))) {
if (auto *CED = dyn_cast<OMPCapturedExprDecl>(VD))
if (!CED->hasAttr<OMPCaptureNoInitAttr>()) {
Visit(CED->getInit());
@@ -3492,14 +3676,16 @@ public:
} else if (VD->isImplicit() || isa<OMPCapturedExprDecl>(VD))
// Do not analyze internal variables and do not enclose them into
// implicit clauses.
- return;
+ if (!Stack->isImplicitDefaultFirstprivateFD(VD))
+ return;
VD = VD->getCanonicalDecl();
// Skip internally declared variables.
if (VD->hasLocalStorage() && CS && !CS->capturesVariable(VD) &&
+ !Stack->isImplicitDefaultFirstprivateFD(VD) &&
!Stack->isImplicitTaskFirstprivate(VD))
return;
// Skip allocators in uses_allocators clauses.
- if (Stack->isUsesAllocatorsDecl(VD).hasValue())
+ if (Stack->isUsesAllocatorsDecl(VD))
return;
DSAStackTy::DSAVarData DVar = Stack->getTopDSA(VD, /*FromParent=*/false);
@@ -3513,6 +3699,7 @@ public:
if (VD->hasGlobalStorage() && CS && !CS->capturesVariable(VD) &&
(Stack->hasRequiresDeclWithClause<OMPUnifiedSharedMemoryClause>() ||
!Res || *Res != OMPDeclareTargetDeclAttr::MT_Link) &&
+ !Stack->isImplicitDefaultFirstprivateFD(VD) &&
!Stack->isImplicitTaskFirstprivate(VD))
return;
@@ -3524,18 +3711,21 @@ public:
// by being listed in a data-sharing attribute clause.
if (DVar.CKind == OMPC_unknown &&
(Stack->getDefaultDSA() == DSA_none ||
+ Stack->getDefaultDSA() == DSA_private ||
Stack->getDefaultDSA() == DSA_firstprivate) &&
isImplicitOrExplicitTaskingRegion(DKind) &&
VarsWithInheritedDSA.count(VD) == 0) {
bool InheritedDSA = Stack->getDefaultDSA() == DSA_none;
- if (!InheritedDSA && Stack->getDefaultDSA() == DSA_firstprivate) {
+ if (!InheritedDSA && (Stack->getDefaultDSA() == DSA_firstprivate ||
+ Stack->getDefaultDSA() == DSA_private)) {
DSAStackTy::DSAVarData DVar =
Stack->getImplicitDSA(VD, /*FromParent=*/false);
InheritedDSA = DVar.CKind == OMPC_unknown;
}
if (InheritedDSA)
VarsWithInheritedDSA[VD] = E;
- return;
+ if (Stack->getDefaultDSA() == DSA_none)
+ return;
}
// OpenMP 5.0 [2.19.7.2, defaultmap clause, Description]
@@ -3543,7 +3733,7 @@ public:
// construct that does not have a predetermined data-sharing attribute
// and does not appear in a to or link clause on a declare target
// directive must be listed in a data-mapping attribute clause, a
- // data-haring attribute clause (including a data-sharing attribute
+ // data-sharing attribute clause (including a data-sharing attribute
// clause on a combined construct where target. is one of the
// constituent constructs), or an is_device_ptr clause.
OpenMPDefaultmapClauseKind ClauseKind =
@@ -3654,10 +3844,16 @@ public:
// Define implicit data-sharing attributes for task.
DVar = Stack->getImplicitDSA(VD, /*FromParent=*/false);
if (((isOpenMPTaskingDirective(DKind) && DVar.CKind != OMPC_shared) ||
- (Stack->getDefaultDSA() == DSA_firstprivate &&
- DVar.CKind == OMPC_firstprivate && !DVar.RefExpr)) &&
+ (((Stack->getDefaultDSA() == DSA_firstprivate &&
+ DVar.CKind == OMPC_firstprivate) ||
+ (Stack->getDefaultDSA() == DSA_private &&
+ DVar.CKind == OMPC_private)) &&
+ !DVar.RefExpr)) &&
!Stack->isLoopControlVariable(VD).first) {
- ImplicitFirstprivate.push_back(E);
+ if (Stack->getDefaultDSA() == DSA_private)
+ ImplicitPrivate.push_back(E);
+ else
+ ImplicitFirstprivate.push_back(E);
return;
}
@@ -3873,6 +4069,7 @@ public:
ArrayRef<Expr *> getImplicitFirstprivate() const {
return ImplicitFirstprivate;
}
+ ArrayRef<Expr *> getImplicitPrivate() const { return ImplicitPrivate; }
ArrayRef<Expr *> getImplicitMap(OpenMPDefaultmapClauseKind DK,
OpenMPMapClauseKind MK) const {
return ImplicitMap[DK][MK];
@@ -3920,6 +4117,8 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
case OMPD_parallel_for_simd:
case OMPD_parallel_sections:
case OMPD_parallel_master:
+ case OMPD_parallel_masked:
+ case OMPD_parallel_loop:
case OMPD_teams:
case OMPD_teams_distribute:
case OMPD_teams_distribute_simd: {
@@ -3939,6 +4138,8 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
case OMPD_target_parallel:
case OMPD_target_parallel_for:
case OMPD_target_parallel_for_simd:
+ case OMPD_target_teams_loop:
+ case OMPD_target_parallel_loop:
case OMPD_target_teams_distribute:
case OMPD_target_teams_distribute_simd: {
QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
@@ -4078,6 +4279,8 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
case OMPD_taskloop:
case OMPD_taskloop_simd:
case OMPD_master_taskloop:
+ case OMPD_masked_taskloop:
+ case OMPD_masked_taskloop_simd:
case OMPD_master_taskloop_simd: {
QualType KmpInt32Ty =
Context.getIntTypeForBitwidth(/*DestWidth=*/32, /*Signed=*/1)
@@ -4120,6 +4323,8 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
AlwaysInlineAttr::Keyword_forceinline));
break;
}
+ case OMPD_parallel_masked_taskloop:
+ case OMPD_parallel_masked_taskloop_simd:
case OMPD_parallel_master_taskloop:
case OMPD_parallel_master_taskloop_simd: {
QualType KmpInt32Ty =
@@ -4246,6 +4451,22 @@ void Sema::ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope) {
break;
}
+ case OMPD_teams_loop: {
+ QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
+ QualType KmpInt32PtrTy =
+ Context.getPointerType(KmpInt32Ty).withConst().withRestrict();
+
+ Sema::CapturedParamNameType ParamsTeams[] = {
+ std::make_pair(".global_tid.", KmpInt32PtrTy),
+ std::make_pair(".bound_tid.", KmpInt32PtrTy),
+ std::make_pair(StringRef(), QualType()) // __context with shared vars
+ };
+ // Start a captured region for 'teams'.
+ ActOnCapturedRegionStart(DSAStack->getConstructLoc(), CurScope, CR_OpenMP,
+ ParamsTeams, /*OpenMPCaptureLevel=*/0);
+ break;
+ }
+
case OMPD_teams_distribute_parallel_for:
case OMPD_teams_distribute_parallel_for_simd: {
QualType KmpInt32Ty = Context.getIntTypeForBitwidth(32, 1).withConst();
@@ -4346,6 +4567,7 @@ int Sema::getOpenMPCaptureLevels(OpenMPDirectiveKind DKind) {
static OMPCapturedExprDecl *buildCaptureDecl(Sema &S, IdentifierInfo *Id,
Expr *CaptureExpr, bool WithInit,
+ DeclContext *CurContext,
bool AsExpression) {
assert(CaptureExpr);
ASTContext &C = S.getASTContext();
@@ -4364,11 +4586,11 @@ static OMPCapturedExprDecl *buildCaptureDecl(Sema &S, IdentifierInfo *Id,
}
WithInit = true;
}
- auto *CED = OMPCapturedExprDecl::Create(C, S.CurContext, Id, Ty,
+ auto *CED = OMPCapturedExprDecl::Create(C, CurContext, Id, Ty,
CaptureExpr->getBeginLoc());
if (!WithInit)
CED->addAttr(OMPCaptureNoInitAttr::CreateImplicit(C));
- S.CurContext->addHiddenDecl(CED);
+ CurContext->addHiddenDecl(CED);
Sema::TentativeAnalysisScope Trap(S);
S.AddInitializerToDecl(CED, Init, /*DirectInit=*/false);
return CED;
@@ -4381,6 +4603,7 @@ static DeclRefExpr *buildCapture(Sema &S, ValueDecl *D, Expr *CaptureExpr,
CD = cast<OMPCapturedExprDecl>(VD);
else
CD = buildCaptureDecl(S, D->getIdentifier(), CaptureExpr, WithInit,
+ S.CurContext,
/*AsExpression=*/false);
return buildDeclRefExpr(S, CD, CD->getType().getNonReferenceType(),
CaptureExpr->getExprLoc());
@@ -4391,7 +4614,7 @@ static ExprResult buildCapture(Sema &S, Expr *CaptureExpr, DeclRefExpr *&Ref) {
if (!Ref) {
OMPCapturedExprDecl *CD = buildCaptureDecl(
S, &S.getASTContext().Idents.get(".capture_expr."), CaptureExpr,
- /*WithInit=*/true, /*AsExpression=*/true);
+ /*WithInit=*/true, S.CurContext, /*AsExpression=*/true);
Ref = buildDeclRefExpr(S, CD, CD->getType().getNonReferenceType(),
CaptureExpr->getExprLoc());
}
@@ -4533,7 +4756,8 @@ StmtResult Sema::ActOnOpenMPRegionEnd(StmtResult S,
// This is required for proper codegen.
for (OMPClause *Clause : Clauses) {
if (!LangOpts.OpenMPSimd &&
- isOpenMPTaskingDirective(DSAStack->getCurrentDirective()) &&
+ (isOpenMPTaskingDirective(DSAStack->getCurrentDirective()) ||
+ DSAStack->getCurrentDirective() == OMPD_target) &&
Clause->getClauseKind() == OMPC_in_reduction) {
// Capture taskgroup task_reduction descriptors inside the tasking regions
// with the corresponding in_reduction items.
@@ -4794,6 +5018,8 @@ static bool checkNestingOfRegions(Sema &SemaRef, const DSAStackTy *Stack,
(SemaRef.getLangOpts().OpenMP >= 50 &&
(ParentRegion == OMPD_taskloop ||
ParentRegion == OMPD_master_taskloop ||
+ ParentRegion == OMPD_masked_taskloop ||
+ ParentRegion == OMPD_parallel_masked_taskloop ||
ParentRegion == OMPD_parallel_master_taskloop)))) ||
(CancelRegion == OMPD_sections &&
(ParentRegion == OMPD_section || ParentRegion == OMPD_sections ||
@@ -4842,6 +5068,7 @@ static bool checkNestingOfRegions(Sema &SemaRef, const DSAStackTy *Stack,
isOpenMPTaskingDirective(ParentRegion) ||
ParentRegion == OMPD_master || ParentRegion == OMPD_masked ||
ParentRegion == OMPD_parallel_master ||
+ ParentRegion == OMPD_parallel_masked ||
ParentRegion == OMPD_critical || ParentRegion == OMPD_ordered;
} else if (isOpenMPWorksharingDirective(CurrentRegion) &&
!isOpenMPParallelDirective(CurrentRegion) &&
@@ -4856,6 +5083,7 @@ static bool checkNestingOfRegions(Sema &SemaRef, const DSAStackTy *Stack,
isOpenMPTaskingDirective(ParentRegion) ||
ParentRegion == OMPD_master || ParentRegion == OMPD_masked ||
ParentRegion == OMPD_parallel_master ||
+ ParentRegion == OMPD_parallel_masked ||
ParentRegion == OMPD_critical || ParentRegion == OMPD_ordered;
Recommend = ShouldBeInParallelRegion;
} else if (CurrentRegion == OMPD_ordered) {
@@ -4905,9 +5133,13 @@ static bool checkNestingOfRegions(Sema &SemaRef, const DSAStackTy *Stack,
// omp_get_num_teams() regions, and omp_get_team_num() regions are the
// only OpenMP regions that may be strictly nested inside the teams
// region.
+ //
+ // As an extension, we permit atomic within teams as well.
NestingProhibited = !isOpenMPParallelDirective(CurrentRegion) &&
!isOpenMPDistributeDirective(CurrentRegion) &&
- CurrentRegion != OMPD_loop;
+ CurrentRegion != OMPD_loop &&
+ !(SemaRef.getLangOpts().OpenMPExtensions &&
+ CurrentRegion == OMPD_atomic);
Recommend = ShouldBeInParallelRegion;
}
if (!NestingProhibited && CurrentRegion == OMPD_loop) {
@@ -5112,8 +5344,7 @@ class AllocatorChecker final : public ConstStmtVisitor<AllocatorChecker, bool> {
public:
bool VisitDeclRefExpr(const DeclRefExpr *E) {
return S->isUsesAllocatorsDecl(E->getDecl())
- .getValueOr(
- DSAStackTy::UsesAllocatorsDeclKind::AllocatorTrait) ==
+ .value_or(DSAStackTy::UsesAllocatorsDeclKind::AllocatorTrait) ==
DSAStackTy::UsesAllocatorsDeclKind::AllocatorTrait;
}
bool VisitStmt(const Stmt *S) {
@@ -5391,7 +5622,7 @@ static CapturedStmt *buildDistanceFunc(Sema &Actions, QualType LogicalTy,
// the step size, rounding-up the effective upper bound ensures that the
// last iteration is included.
// Note that the rounding-up may cause an overflow in a temporry that
- // could be avoided, but would have occured in a C-style for-loop as well.
+ // could be avoided, but would have occurred in a C-style for-loop as well.
Expr *Divisor = BuildVarRef(NewStep);
if (Rel == BO_GE || Rel == BO_GT)
Divisor =
@@ -5848,6 +6079,9 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
SmallVector<Expr *, 4> ImplicitFirstprivates(
DSAChecker.getImplicitFirstprivate().begin(),
DSAChecker.getImplicitFirstprivate().end());
+ SmallVector<Expr *, 4> ImplicitPrivates(
+ DSAChecker.getImplicitPrivate().begin(),
+ DSAChecker.getImplicitPrivate().end());
const unsigned DefaultmapKindNum = OMPC_DEFAULTMAP_pointer + 1;
SmallVector<Expr *, 4> ImplicitMaps[DefaultmapKindNum][OMPC_MAP_delete];
SmallVector<OpenMPMapModifierKind, NumberOfOMPMapClauseModifiers>
@@ -5900,6 +6134,17 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
ErrorFound = true;
}
}
+ if (!ImplicitPrivates.empty()) {
+ if (OMPClause *Implicit =
+ ActOnOpenMPPrivateClause(ImplicitPrivates, SourceLocation(),
+ SourceLocation(), SourceLocation())) {
+ ClausesWithImplicit.push_back(Implicit);
+ ErrorFound = cast<OMPPrivateClause>(Implicit)->varlist_size() !=
+ ImplicitPrivates.size();
+ } else {
+ ErrorFound = true;
+ }
+ }
// OpenMP 5.0 [2.19.7]
// If a list item appears in a reduction, lastprivate or linear
// clause on a combined target construct then it is treated as
@@ -6028,6 +6273,11 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
StartLoc, EndLoc);
AllowedNameModifiers.push_back(OMPD_parallel);
break;
+ case OMPD_parallel_masked:
+ Res = ActOnOpenMPParallelMaskedDirective(ClausesWithImplicit, AStmt,
+ StartLoc, EndLoc);
+ AllowedNameModifiers.push_back(OMPD_parallel);
+ break;
case OMPD_parallel_sections:
Res = ActOnOpenMPParallelSectionsDirective(ClausesWithImplicit, AStmt,
StartLoc, EndLoc);
@@ -6151,6 +6401,11 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
AllowedNameModifiers.push_back(OMPD_taskloop);
break;
+ case OMPD_masked_taskloop:
+ Res = ActOnOpenMPMaskedTaskLoopDirective(
+ ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
+ AllowedNameModifiers.push_back(OMPD_taskloop);
+ break;
case OMPD_master_taskloop_simd:
Res = ActOnOpenMPMasterTaskLoopSimdDirective(
ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
@@ -6158,12 +6413,28 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
if (LangOpts.OpenMP >= 50)
AllowedNameModifiers.push_back(OMPD_simd);
break;
+ case OMPD_masked_taskloop_simd:
+ Res = ActOnOpenMPMaskedTaskLoopSimdDirective(
+ ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
+ if (LangOpts.OpenMP >= 51) {
+ AllowedNameModifiers.push_back(OMPD_taskloop);
+ AllowedNameModifiers.push_back(OMPD_simd);
+ }
+ break;
case OMPD_parallel_master_taskloop:
Res = ActOnOpenMPParallelMasterTaskLoopDirective(
ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
AllowedNameModifiers.push_back(OMPD_taskloop);
AllowedNameModifiers.push_back(OMPD_parallel);
break;
+ case OMPD_parallel_masked_taskloop:
+ Res = ActOnOpenMPParallelMaskedTaskLoopDirective(
+ ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
+ if (LangOpts.OpenMP >= 51) {
+ AllowedNameModifiers.push_back(OMPD_taskloop);
+ AllowedNameModifiers.push_back(OMPD_parallel);
+ }
+ break;
case OMPD_parallel_master_taskloop_simd:
Res = ActOnOpenMPParallelMasterTaskLoopSimdDirective(
ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
@@ -6172,6 +6443,15 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
if (LangOpts.OpenMP >= 50)
AllowedNameModifiers.push_back(OMPD_simd);
break;
+ case OMPD_parallel_masked_taskloop_simd:
+ Res = ActOnOpenMPParallelMaskedTaskLoopSimdDirective(
+ ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
+ if (LangOpts.OpenMP >= 51) {
+ AllowedNameModifiers.push_back(OMPD_taskloop);
+ AllowedNameModifiers.push_back(OMPD_parallel);
+ AllowedNameModifiers.push_back(OMPD_simd);
+ }
+ break;
case OMPD_distribute:
Res = ActOnOpenMPDistributeDirective(ClausesWithImplicit, AStmt, StartLoc,
EndLoc, VarsWithInheritedDSA);
@@ -6280,6 +6560,22 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
Res = ActOnOpenMPGenericLoopDirective(ClausesWithImplicit, AStmt, StartLoc,
EndLoc, VarsWithInheritedDSA);
break;
+ case OMPD_teams_loop:
+ Res = ActOnOpenMPTeamsGenericLoopDirective(
+ ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
+ break;
+ case OMPD_target_teams_loop:
+ Res = ActOnOpenMPTargetTeamsGenericLoopDirective(
+ ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
+ break;
+ case OMPD_parallel_loop:
+ Res = ActOnOpenMPParallelGenericLoopDirective(
+ ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
+ break;
+ case OMPD_target_parallel_loop:
+ Res = ActOnOpenMPTargetParallelGenericLoopDirective(
+ ClausesWithImplicit, AStmt, StartLoc, EndLoc, VarsWithInheritedDSA);
+ break;
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_threadprivate:
@@ -6302,6 +6598,7 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
// Check variables in the clauses if default(none) or
// default(firstprivate) was specified.
if (DSAStack->getDefaultDSA() == DSA_none ||
+ DSAStack->getDefaultDSA() == DSA_private ||
DSAStack->getDefaultDSA() == DSA_firstprivate) {
DSAAttrChecker DSAChecker(DSAStack, *this, nullptr);
for (OMPClause *C : Clauses) {
@@ -6381,6 +6678,7 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
case OMPC_use_device_ptr:
case OMPC_use_device_addr:
case OMPC_is_device_ptr:
+ case OMPC_has_device_addr:
case OMPC_nontemporal:
case OMPC_order:
case OMPC_destroy:
@@ -6389,6 +6687,7 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
case OMPC_uses_allocators:
case OMPC_affinity:
case OMPC_bind:
+ case OMPC_filter:
continue;
case OMPC_allocator:
case OMPC_flush:
@@ -6420,6 +6719,7 @@ StmtResult Sema::ActOnOpenMPExecutableDirective(
continue;
ErrorFound = true;
if (DSAStack->getDefaultDSA() == DSA_none ||
+ DSAStack->getDefaultDSA() == DSA_private ||
DSAStack->getDefaultDSA() == DSA_firstprivate) {
Diag(P.second->getExprLoc(), diag::err_omp_no_dsa_for_variable)
<< P.first << P.second->getSourceRange();
@@ -6996,11 +7296,11 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG,
}
auto &&HasMultiVersionAttributes = [](const FunctionDecl *FD) {
- return FD->hasAttrs() &&
- (FD->hasAttr<CPUDispatchAttr>() || FD->hasAttr<CPUSpecificAttr>() ||
- FD->hasAttr<TargetAttr>());
+ // The 'target' attribute needs to be separately checked because it does
+ // not always signify a multiversion function declaration.
+ return FD->isMultiVersion() || FD->hasAttr<TargetAttr>();
};
- // OpenMP is not compatible with CPU-specific attributes.
+ // OpenMP is not compatible with multiversion function attributes.
if (HasMultiVersionAttributes(FD)) {
Diag(FD->getLocation(), diag::err_omp_declare_variant_incompat_attributes)
<< SR;
@@ -7171,6 +7471,13 @@ Sema::checkOpenMPDeclareVariantFunction(Sema::DeclGroupPtrTy DG,
return None;
}
+ if (FD->getCanonicalDecl() == NewFD->getCanonicalDecl()) {
+ Diag(VariantRef->getExprLoc(),
+ diag::err_omp_declare_variant_same_base_function)
+ << VariantRef->getSourceRange();
+ return None;
+ }
+
// Check if function types are compatible in C.
if (!LangOpts.CPlusPlus) {
QualType NewType =
@@ -7529,7 +7836,7 @@ public:
bool doesCondDependOnLC() const { return CondDependOnLC.hasValue(); }
/// Returns index of the loop we depend on (starting from 1), or 0 otherwise.
unsigned getLoopDependentIdx() const {
- return InitDependOnLC.getValueOr(CondDependOnLC.getValueOr(0));
+ return InitDependOnLC.value_or(CondDependOnLC.value_or(0));
}
private:
@@ -7632,7 +7939,7 @@ bool OpenMPIterationSpaceChecker::setStep(Expr *NewStep, bool Subtract) {
bool IsConstZero = Result && !Result->getBoolValue();
// != with increment is treated as <; != with decrement is treated as >
- if (!TestIsLessOp.hasValue())
+ if (!TestIsLessOp)
TestIsLessOp = IsConstPos || (IsUnsigned && !Subtract);
if (UB &&
(IsConstZero || (TestIsLessOp.getValue()
@@ -7928,7 +8235,7 @@ bool OpenMPIterationSpaceChecker::checkAndSetCond(Expr *S) {
CE->getArg(1), CE->getSourceRange(), CE->getOperatorLoc());
}
}
- if (Res.hasValue())
+ if (Res)
return *Res;
if (dependent() || SemaRef.CurContext->isDependentContext())
return false;
@@ -8306,7 +8613,7 @@ Expr *OpenMPIterationSpaceChecker::buildNumIterations(
tryBuildCapture(SemaRef, MinLessMaxRes.get(), Captures).get();
if (!MinLessMax)
return nullptr;
- if (TestIsLessOp.getValue()) {
+ if (*TestIsLessOp) {
// LB(MinVal) < LB(MaxVal) ? LB(MinVal) : LB(MaxVal) - min(LB(MinVal),
// LB(MaxVal))
ExprResult MinLB = SemaRef.ActOnConditionalOp(DefaultLoc, DefaultLoc,
@@ -8383,7 +8690,7 @@ Expr *OpenMPIterationSpaceChecker::buildNumIterations(
tryBuildCapture(SemaRef, MinGreaterMaxRes.get(), Captures).get();
if (!MinGreaterMax)
return nullptr;
- if (TestIsLessOp.getValue()) {
+ if (*TestIsLessOp) {
// UB(MinVal) > UB(MaxVal) ? UB(MinVal) : UB(MaxVal) - max(UB(MinVal),
// UB(MaxVal))
ExprResult MaxUB = SemaRef.ActOnConditionalOp(
@@ -8478,7 +8785,7 @@ std::pair<Expr *, Expr *> OpenMPIterationSpaceChecker::buildMinMaxValues(
if (!Upper || !Lower)
return std::make_pair(nullptr, nullptr);
- if (TestIsLessOp.getValue())
+ if (*TestIsLessOp)
MinExpr = Lower;
else
MaxExpr = Upper;
@@ -8522,7 +8829,7 @@ std::pair<Expr *, Expr *> OpenMPIterationSpaceChecker::buildMinMaxValues(
if (!Diff.isUsable())
return std::make_pair(nullptr, nullptr);
- if (TestIsLessOp.getValue()) {
+ if (*TestIsLessOp) {
// MinExpr = Lower;
// MaxExpr = Lower + (((Upper - Lower [- 1]) / Step) * Step)
Diff = SemaRef.BuildBinOp(
@@ -8555,7 +8862,7 @@ std::pair<Expr *, Expr *> OpenMPIterationSpaceChecker::buildMinMaxValues(
if (!Diff.isUsable())
return std::make_pair(nullptr, nullptr);
- if (TestIsLessOp.getValue())
+ if (*TestIsLessOp)
MaxExpr = Diff.get();
else
MinExpr = Diff.get();
@@ -8744,8 +9051,9 @@ void Sema::ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init) {
(LangOpts.OpenMP <= 45 || (DVar.CKind != OMPC_lastprivate &&
DVar.CKind != OMPC_private))) ||
((isOpenMPWorksharingDirective(DKind) || DKind == OMPD_taskloop ||
- DKind == OMPD_master_taskloop ||
+ DKind == OMPD_master_taskloop || DKind == OMPD_masked_taskloop ||
DKind == OMPD_parallel_master_taskloop ||
+ DKind == OMPD_parallel_masked_taskloop ||
isOpenMPDistributeDirective(DKind)) &&
!isOpenMPSimdDirective(DKind) && DVar.CKind != OMPC_unknown &&
DVar.CKind != OMPC_private && DVar.CKind != OMPC_lastprivate)) &&
@@ -10106,32 +10414,42 @@ StmtResult Sema::ActOnOpenMPDispatchDirective(ArrayRef<OMPClause *> Clauses,
TargetCallLoc);
}
-StmtResult Sema::ActOnOpenMPGenericLoopDirective(
- ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
- SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
- if (!AStmt)
- return StmtError();
-
- // OpenMP 5.1 [2.11.7, loop construct]
- // A list item may not appear in a lastprivate clause unless it is the
- // loop iteration variable of a loop that is associated with the construct.
+static bool checkGenericLoopLastprivate(Sema &S, ArrayRef<OMPClause *> Clauses,
+ OpenMPDirectiveKind K,
+ DSAStackTy *Stack) {
+ bool ErrorFound = false;
for (OMPClause *C : Clauses) {
if (auto *LPC = dyn_cast<OMPLastprivateClause>(C)) {
for (Expr *RefExpr : LPC->varlists()) {
SourceLocation ELoc;
SourceRange ERange;
Expr *SimpleRefExpr = RefExpr;
- auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange);
+ auto Res = getPrivateItem(S, SimpleRefExpr, ELoc, ERange);
if (ValueDecl *D = Res.first) {
- auto &&Info = DSAStack->isLoopControlVariable(D);
+ auto &&Info = Stack->isLoopControlVariable(D);
if (!Info.first) {
- Diag(ELoc, diag::err_omp_lastprivate_loop_var_non_loop_iteration);
- return StmtError();
+ S.Diag(ELoc, diag::err_omp_lastprivate_loop_var_non_loop_iteration)
+ << getOpenMPDirectiveName(K);
+ ErrorFound = true;
}
}
}
}
}
+ return ErrorFound;
+}
+
+StmtResult Sema::ActOnOpenMPGenericLoopDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ // OpenMP 5.1 [2.11.7, loop construct, Restrictions]
+ // A list item may not appear in a lastprivate clause unless it is the
+ // loop iteration variable of a loop that is associated with the construct.
+ if (checkGenericLoopLastprivate(*this, Clauses, OMPD_loop, DSAStack))
+ return StmtError();
auto *CS = cast<CapturedStmt>(AStmt);
// 1.2.2 OpenMP Language Terminology
@@ -10157,6 +10475,201 @@ StmtResult Sema::ActOnOpenMPGenericLoopDirective(
NestedLoopCount, Clauses, AStmt, B);
}
+StmtResult Sema::ActOnOpenMPTeamsGenericLoopDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ // OpenMP 5.1 [2.11.7, loop construct, Restrictions]
+ // A list item may not appear in a lastprivate clause unless it is the
+ // loop iteration variable of a loop that is associated with the construct.
+ if (checkGenericLoopLastprivate(*this, Clauses, OMPD_teams_loop, DSAStack))
+ return StmtError();
+
+ auto *CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+ for (int ThisCaptureLevel = getOpenMPCaptureLevels(OMPD_teams_loop);
+ ThisCaptureLevel > 1; --ThisCaptureLevel) {
+ CS = cast<CapturedStmt>(CS->getCapturedStmt());
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+ }
+
+ OMPLoopDirective::HelperExprs B;
+ // In presence of clause 'collapse', it will define the nested loops number.
+ unsigned NestedLoopCount =
+ checkOpenMPLoop(OMPD_teams_loop, getCollapseNumberExpr(Clauses),
+ /*OrderedLoopCountExpr=*/nullptr, CS, *this, *DSAStack,
+ VarsWithImplicitDSA, B);
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp loop exprs were not built");
+
+ setFunctionHasBranchProtectedScope();
+ DSAStack->setParentTeamsRegionLoc(StartLoc);
+
+ return OMPTeamsGenericLoopDirective::Create(
+ Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+}
+
+StmtResult Sema::ActOnOpenMPTargetTeamsGenericLoopDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ // OpenMP 5.1 [2.11.7, loop construct, Restrictions]
+ // A list item may not appear in a lastprivate clause unless it is the
+ // loop iteration variable of a loop that is associated with the construct.
+ if (checkGenericLoopLastprivate(*this, Clauses, OMPD_target_teams_loop,
+ DSAStack))
+ return StmtError();
+
+ auto *CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+ for (int ThisCaptureLevel = getOpenMPCaptureLevels(OMPD_target_teams_loop);
+ ThisCaptureLevel > 1; --ThisCaptureLevel) {
+ CS = cast<CapturedStmt>(CS->getCapturedStmt());
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+ }
+
+ OMPLoopDirective::HelperExprs B;
+ // In presence of clause 'collapse', it will define the nested loops number.
+ unsigned NestedLoopCount =
+ checkOpenMPLoop(OMPD_target_teams_loop, getCollapseNumberExpr(Clauses),
+ /*OrderedLoopCountExpr=*/nullptr, CS, *this, *DSAStack,
+ VarsWithImplicitDSA, B);
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp loop exprs were not built");
+
+ setFunctionHasBranchProtectedScope();
+
+ return OMPTargetTeamsGenericLoopDirective::Create(
+ Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+}
+
+StmtResult Sema::ActOnOpenMPParallelGenericLoopDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ // OpenMP 5.1 [2.11.7, loop construct, Restrictions]
+ // A list item may not appear in a lastprivate clause unless it is the
+ // loop iteration variable of a loop that is associated with the construct.
+ if (checkGenericLoopLastprivate(*this, Clauses, OMPD_parallel_loop, DSAStack))
+ return StmtError();
+
+ auto *CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+ for (int ThisCaptureLevel = getOpenMPCaptureLevels(OMPD_parallel_loop);
+ ThisCaptureLevel > 1; --ThisCaptureLevel) {
+ CS = cast<CapturedStmt>(CS->getCapturedStmt());
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+ }
+
+ OMPLoopDirective::HelperExprs B;
+ // In presence of clause 'collapse', it will define the nested loops number.
+ unsigned NestedLoopCount =
+ checkOpenMPLoop(OMPD_parallel_loop, getCollapseNumberExpr(Clauses),
+ /*OrderedLoopCountExpr=*/nullptr, CS, *this, *DSAStack,
+ VarsWithImplicitDSA, B);
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp loop exprs were not built");
+
+ setFunctionHasBranchProtectedScope();
+
+ return OMPParallelGenericLoopDirective::Create(
+ Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+}
+
+StmtResult Sema::ActOnOpenMPTargetParallelGenericLoopDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ // OpenMP 5.1 [2.11.7, loop construct, Restrictions]
+ // A list item may not appear in a lastprivate clause unless it is the
+ // loop iteration variable of a loop that is associated with the construct.
+ if (checkGenericLoopLastprivate(*this, Clauses, OMPD_target_parallel_loop,
+ DSAStack))
+ return StmtError();
+
+ auto *CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+ for (int ThisCaptureLevel = getOpenMPCaptureLevels(OMPD_target_parallel_loop);
+ ThisCaptureLevel > 1; --ThisCaptureLevel) {
+ CS = cast<CapturedStmt>(CS->getCapturedStmt());
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+ }
+
+ OMPLoopDirective::HelperExprs B;
+ // In presence of clause 'collapse', it will define the nested loops number.
+ unsigned NestedLoopCount =
+ checkOpenMPLoop(OMPD_target_parallel_loop, getCollapseNumberExpr(Clauses),
+ /*OrderedLoopCountExpr=*/nullptr, CS, *this, *DSAStack,
+ VarsWithImplicitDSA, B);
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp loop exprs were not built");
+
+ setFunctionHasBranchProtectedScope();
+
+ return OMPTargetParallelGenericLoopDirective::Create(
+ Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+}
+
StmtResult Sema::ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
@@ -10379,6 +10892,29 @@ Sema::ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses,
}
StmtResult
+Sema::ActOnOpenMPParallelMaskedDirective(ArrayRef<OMPClause *> Clauses,
+ Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc) {
+ if (!AStmt)
+ return StmtError();
+
+ assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
+ auto *CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+
+ setFunctionHasBranchProtectedScope();
+
+ return OMPParallelMaskedDirective::Create(
+ Context, StartLoc, EndLoc, Clauses, AStmt,
+ DSAStack->getTaskgroupReductionRef());
+}
+
+StmtResult
Sema::ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc) {
@@ -10925,6 +11461,797 @@ bool OpenMPAtomicUpdateChecker::checkStatement(Stmt *S, unsigned DiagId,
}
return ErrorFound != NoError;
}
+
+/// Get the node id of the fixed point of an expression \a S.
+llvm::FoldingSetNodeID getNodeId(ASTContext &Context, const Expr *S) {
+ llvm::FoldingSetNodeID Id;
+ S->IgnoreParenImpCasts()->Profile(Id, Context, true);
+ return Id;
+}
+
+/// Check if two expressions are same.
+bool checkIfTwoExprsAreSame(ASTContext &Context, const Expr *LHS,
+ const Expr *RHS) {
+ return getNodeId(Context, LHS) == getNodeId(Context, RHS);
+}
+
+class OpenMPAtomicCompareChecker {
+public:
+ /// All kinds of errors that can occur in `atomic compare`
+ enum ErrorTy {
+ /// Empty compound statement.
+ NoStmt = 0,
+ /// More than one statement in a compound statement.
+ MoreThanOneStmt,
+ /// Not an assignment binary operator.
+ NotAnAssignment,
+ /// Not a conditional operator.
+ NotCondOp,
+ /// Wrong false expr. According to the spec, 'x' should be at the false
+ /// expression of a conditional expression.
+ WrongFalseExpr,
+ /// The condition of a conditional expression is not a binary operator.
+ NotABinaryOp,
+ /// Invalid binary operator (not <, >, or ==).
+ InvalidBinaryOp,
+ /// Invalid comparison (not x == e, e == x, x ordop expr, or expr ordop x).
+ InvalidComparison,
+ /// X is not a lvalue.
+ XNotLValue,
+ /// Not a scalar.
+ NotScalar,
+ /// Not an integer.
+ NotInteger,
+ /// 'else' statement is not expected.
+ UnexpectedElse,
+ /// Not an equality operator.
+ NotEQ,
+ /// Invalid assignment (not v == x).
+ InvalidAssignment,
+ /// Not if statement
+ NotIfStmt,
+ /// More than two statements in a compund statement.
+ MoreThanTwoStmts,
+ /// Not a compound statement.
+ NotCompoundStmt,
+ /// No else statement.
+ NoElse,
+ /// Not 'if (r)'.
+ InvalidCondition,
+ /// No error.
+ NoError,
+ };
+
+ struct ErrorInfoTy {
+ ErrorTy Error;
+ SourceLocation ErrorLoc;
+ SourceRange ErrorRange;
+ SourceLocation NoteLoc;
+ SourceRange NoteRange;
+ };
+
+ OpenMPAtomicCompareChecker(Sema &S) : ContextRef(S.getASTContext()) {}
+
+ /// Check if statement \a S is valid for <tt>atomic compare</tt>.
+ bool checkStmt(Stmt *S, ErrorInfoTy &ErrorInfo);
+
+ Expr *getX() const { return X; }
+ Expr *getE() const { return E; }
+ Expr *getD() const { return D; }
+ Expr *getCond() const { return C; }
+ bool isXBinopExpr() const { return IsXBinopExpr; }
+
+protected:
+ /// Reference to ASTContext
+ ASTContext &ContextRef;
+ /// 'x' lvalue part of the source atomic expression.
+ Expr *X = nullptr;
+ /// 'expr' or 'e' rvalue part of the source atomic expression.
+ Expr *E = nullptr;
+ /// 'd' rvalue part of the source atomic expression.
+ Expr *D = nullptr;
+ /// 'cond' part of the source atomic expression. It is in one of the following
+ /// forms:
+ /// expr ordop x
+ /// x ordop expr
+ /// x == e
+ /// e == x
+ Expr *C = nullptr;
+ /// True if the cond expr is in the form of 'x ordop expr'.
+ bool IsXBinopExpr = true;
+
+ /// Check if it is a valid conditional update statement (cond-update-stmt).
+ bool checkCondUpdateStmt(IfStmt *S, ErrorInfoTy &ErrorInfo);
+
+ /// Check if it is a valid conditional expression statement (cond-expr-stmt).
+ bool checkCondExprStmt(Stmt *S, ErrorInfoTy &ErrorInfo);
+
+ /// Check if all captured values have right type.
+ bool checkType(ErrorInfoTy &ErrorInfo) const;
+
+ static bool CheckValue(const Expr *E, ErrorInfoTy &ErrorInfo,
+ bool ShouldBeLValue) {
+ if (ShouldBeLValue && !E->isLValue()) {
+ ErrorInfo.Error = ErrorTy::XNotLValue;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = E->getExprLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = E->getSourceRange();
+ return false;
+ }
+
+ if (!E->isInstantiationDependent()) {
+ QualType QTy = E->getType();
+ if (!QTy->isScalarType()) {
+ ErrorInfo.Error = ErrorTy::NotScalar;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = E->getExprLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = E->getSourceRange();
+ return false;
+ }
+
+ if (!QTy->isIntegerType()) {
+ ErrorInfo.Error = ErrorTy::NotInteger;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = E->getExprLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = E->getSourceRange();
+ return false;
+ }
+ }
+
+ return true;
+ }
+};
+
+bool OpenMPAtomicCompareChecker::checkCondUpdateStmt(IfStmt *S,
+ ErrorInfoTy &ErrorInfo) {
+ auto *Then = S->getThen();
+ if (auto *CS = dyn_cast<CompoundStmt>(Then)) {
+ if (CS->body_empty()) {
+ ErrorInfo.Error = ErrorTy::NoStmt;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = CS->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = CS->getSourceRange();
+ return false;
+ }
+ if (CS->size() > 1) {
+ ErrorInfo.Error = ErrorTy::MoreThanOneStmt;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = CS->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = S->getSourceRange();
+ return false;
+ }
+ Then = CS->body_front();
+ }
+
+ auto *BO = dyn_cast<BinaryOperator>(Then);
+ if (!BO) {
+ ErrorInfo.Error = ErrorTy::NotAnAssignment;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = Then->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = Then->getSourceRange();
+ return false;
+ }
+ if (BO->getOpcode() != BO_Assign) {
+ ErrorInfo.Error = ErrorTy::NotAnAssignment;
+ ErrorInfo.ErrorLoc = BO->getExprLoc();
+ ErrorInfo.NoteLoc = BO->getOperatorLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = BO->getSourceRange();
+ return false;
+ }
+
+ X = BO->getLHS();
+
+ auto *Cond = dyn_cast<BinaryOperator>(S->getCond());
+ if (!Cond) {
+ ErrorInfo.Error = ErrorTy::NotABinaryOp;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = S->getCond()->getExprLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = S->getCond()->getSourceRange();
+ return false;
+ }
+
+ switch (Cond->getOpcode()) {
+ case BO_EQ: {
+ C = Cond;
+ D = BO->getRHS();
+ if (checkIfTwoExprsAreSame(ContextRef, X, Cond->getLHS())) {
+ E = Cond->getRHS();
+ } else if (checkIfTwoExprsAreSame(ContextRef, X, Cond->getRHS())) {
+ E = Cond->getLHS();
+ } else {
+ ErrorInfo.Error = ErrorTy::InvalidComparison;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = Cond->getExprLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = Cond->getSourceRange();
+ return false;
+ }
+ break;
+ }
+ case BO_LT:
+ case BO_GT: {
+ E = BO->getRHS();
+ if (checkIfTwoExprsAreSame(ContextRef, X, Cond->getLHS()) &&
+ checkIfTwoExprsAreSame(ContextRef, E, Cond->getRHS())) {
+ C = Cond;
+ } else if (checkIfTwoExprsAreSame(ContextRef, E, Cond->getLHS()) &&
+ checkIfTwoExprsAreSame(ContextRef, X, Cond->getRHS())) {
+ C = Cond;
+ IsXBinopExpr = false;
+ } else {
+ ErrorInfo.Error = ErrorTy::InvalidComparison;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = Cond->getExprLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = Cond->getSourceRange();
+ return false;
+ }
+ break;
+ }
+ default:
+ ErrorInfo.Error = ErrorTy::InvalidBinaryOp;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = Cond->getExprLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = Cond->getSourceRange();
+ return false;
+ }
+
+ if (S->getElse()) {
+ ErrorInfo.Error = ErrorTy::UnexpectedElse;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = S->getElse()->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = S->getElse()->getSourceRange();
+ return false;
+ }
+
+ return true;
+}
+
+bool OpenMPAtomicCompareChecker::checkCondExprStmt(Stmt *S,
+ ErrorInfoTy &ErrorInfo) {
+ auto *BO = dyn_cast<BinaryOperator>(S);
+ if (!BO) {
+ ErrorInfo.Error = ErrorTy::NotAnAssignment;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = S->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = S->getSourceRange();
+ return false;
+ }
+ if (BO->getOpcode() != BO_Assign) {
+ ErrorInfo.Error = ErrorTy::NotAnAssignment;
+ ErrorInfo.ErrorLoc = BO->getExprLoc();
+ ErrorInfo.NoteLoc = BO->getOperatorLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = BO->getSourceRange();
+ return false;
+ }
+
+ X = BO->getLHS();
+
+ auto *CO = dyn_cast<ConditionalOperator>(BO->getRHS()->IgnoreParenImpCasts());
+ if (!CO) {
+ ErrorInfo.Error = ErrorTy::NotCondOp;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = BO->getRHS()->getExprLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = BO->getRHS()->getSourceRange();
+ return false;
+ }
+
+ if (!checkIfTwoExprsAreSame(ContextRef, X, CO->getFalseExpr())) {
+ ErrorInfo.Error = ErrorTy::WrongFalseExpr;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = CO->getFalseExpr()->getExprLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange =
+ CO->getFalseExpr()->getSourceRange();
+ return false;
+ }
+
+ auto *Cond = dyn_cast<BinaryOperator>(CO->getCond());
+ if (!Cond) {
+ ErrorInfo.Error = ErrorTy::NotABinaryOp;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = CO->getCond()->getExprLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange =
+ CO->getCond()->getSourceRange();
+ return false;
+ }
+
+ switch (Cond->getOpcode()) {
+ case BO_EQ: {
+ C = Cond;
+ D = CO->getTrueExpr();
+ if (checkIfTwoExprsAreSame(ContextRef, X, Cond->getLHS())) {
+ E = Cond->getRHS();
+ } else if (checkIfTwoExprsAreSame(ContextRef, X, Cond->getRHS())) {
+ E = Cond->getLHS();
+ } else {
+ ErrorInfo.Error = ErrorTy::InvalidComparison;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = Cond->getExprLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = Cond->getSourceRange();
+ return false;
+ }
+ break;
+ }
+ case BO_LT:
+ case BO_GT: {
+ E = CO->getTrueExpr();
+ if (checkIfTwoExprsAreSame(ContextRef, X, Cond->getLHS()) &&
+ checkIfTwoExprsAreSame(ContextRef, E, Cond->getRHS())) {
+ C = Cond;
+ } else if (checkIfTwoExprsAreSame(ContextRef, E, Cond->getLHS()) &&
+ checkIfTwoExprsAreSame(ContextRef, X, Cond->getRHS())) {
+ C = Cond;
+ IsXBinopExpr = false;
+ } else {
+ ErrorInfo.Error = ErrorTy::InvalidComparison;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = Cond->getExprLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = Cond->getSourceRange();
+ return false;
+ }
+ break;
+ }
+ default:
+ ErrorInfo.Error = ErrorTy::InvalidBinaryOp;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = Cond->getExprLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = Cond->getSourceRange();
+ return false;
+ }
+
+ return true;
+}
+
+bool OpenMPAtomicCompareChecker::checkType(ErrorInfoTy &ErrorInfo) const {
+ // 'x' and 'e' cannot be nullptr
+ assert(X && E && "X and E cannot be nullptr");
+
+ if (!CheckValue(X, ErrorInfo, true))
+ return false;
+
+ if (!CheckValue(E, ErrorInfo, false))
+ return false;
+
+ if (D && !CheckValue(D, ErrorInfo, false))
+ return false;
+
+ return true;
+}
+
+bool OpenMPAtomicCompareChecker::checkStmt(
+ Stmt *S, OpenMPAtomicCompareChecker::ErrorInfoTy &ErrorInfo) {
+ auto *CS = dyn_cast<CompoundStmt>(S);
+ if (CS) {
+ if (CS->body_empty()) {
+ ErrorInfo.Error = ErrorTy::NoStmt;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = CS->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = CS->getSourceRange();
+ return false;
+ }
+
+ if (CS->size() != 1) {
+ ErrorInfo.Error = ErrorTy::MoreThanOneStmt;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = CS->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = CS->getSourceRange();
+ return false;
+ }
+ S = CS->body_front();
+ }
+
+ auto Res = false;
+
+ if (auto *IS = dyn_cast<IfStmt>(S)) {
+ // Check if the statement is in one of the following forms
+ // (cond-update-stmt):
+ // if (expr ordop x) { x = expr; }
+ // if (x ordop expr) { x = expr; }
+ // if (x == e) { x = d; }
+ Res = checkCondUpdateStmt(IS, ErrorInfo);
+ } else {
+ // Check if the statement is in one of the following forms (cond-expr-stmt):
+ // x = expr ordop x ? expr : x;
+ // x = x ordop expr ? expr : x;
+ // x = x == e ? d : x;
+ Res = checkCondExprStmt(S, ErrorInfo);
+ }
+
+ if (!Res)
+ return false;
+
+ return checkType(ErrorInfo);
+}
+
+class OpenMPAtomicCompareCaptureChecker final
+ : public OpenMPAtomicCompareChecker {
+public:
+ OpenMPAtomicCompareCaptureChecker(Sema &S) : OpenMPAtomicCompareChecker(S) {}
+
+ Expr *getV() const { return V; }
+ Expr *getR() const { return R; }
+ bool isFailOnly() const { return IsFailOnly; }
+ bool isPostfixUpdate() const { return IsPostfixUpdate; }
+
+ /// Check if statement \a S is valid for <tt>atomic compare capture</tt>.
+ bool checkStmt(Stmt *S, ErrorInfoTy &ErrorInfo);
+
+private:
+ bool checkType(ErrorInfoTy &ErrorInfo);
+
+ // NOTE: Form 3, 4, 5 in the following comments mean the 3rd, 4th, and 5th
+ // form of 'conditional-update-capture-atomic' structured block on the v5.2
+ // spec p.p. 82:
+ // (1) { v = x; cond-update-stmt }
+ // (2) { cond-update-stmt v = x; }
+ // (3) if(x == e) { x = d; } else { v = x; }
+ // (4) { r = x == e; if(r) { x = d; } }
+ // (5) { r = x == e; if(r) { x = d; } else { v = x; } }
+
+ /// Check if it is valid 'if(x == e) { x = d; } else { v = x; }' (form 3)
+ bool checkForm3(IfStmt *S, ErrorInfoTy &ErrorInfo);
+
+ /// Check if it is valid '{ r = x == e; if(r) { x = d; } }',
+ /// or '{ r = x == e; if(r) { x = d; } else { v = x; } }' (form 4 and 5)
+ bool checkForm45(Stmt *S, ErrorInfoTy &ErrorInfo);
+
+ /// 'v' lvalue part of the source atomic expression.
+ Expr *V = nullptr;
+ /// 'r' lvalue part of the source atomic expression.
+ Expr *R = nullptr;
+ /// If 'v' is only updated when the comparison fails.
+ bool IsFailOnly = false;
+ /// If original value of 'x' must be stored in 'v', not an updated one.
+ bool IsPostfixUpdate = false;
+};
+
+bool OpenMPAtomicCompareCaptureChecker::checkType(ErrorInfoTy &ErrorInfo) {
+ if (!OpenMPAtomicCompareChecker::checkType(ErrorInfo))
+ return false;
+
+ if (V && !CheckValue(V, ErrorInfo, true))
+ return false;
+
+ if (R && !CheckValue(R, ErrorInfo, true))
+ return false;
+
+ return true;
+}
+
+bool OpenMPAtomicCompareCaptureChecker::checkForm3(IfStmt *S,
+ ErrorInfoTy &ErrorInfo) {
+ IsFailOnly = true;
+
+ auto *Then = S->getThen();
+ if (auto *CS = dyn_cast<CompoundStmt>(Then)) {
+ if (CS->body_empty()) {
+ ErrorInfo.Error = ErrorTy::NoStmt;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = CS->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = CS->getSourceRange();
+ return false;
+ }
+ if (CS->size() > 1) {
+ ErrorInfo.Error = ErrorTy::MoreThanOneStmt;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = CS->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = CS->getSourceRange();
+ return false;
+ }
+ Then = CS->body_front();
+ }
+
+ auto *BO = dyn_cast<BinaryOperator>(Then);
+ if (!BO) {
+ ErrorInfo.Error = ErrorTy::NotAnAssignment;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = Then->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = Then->getSourceRange();
+ return false;
+ }
+ if (BO->getOpcode() != BO_Assign) {
+ ErrorInfo.Error = ErrorTy::NotAnAssignment;
+ ErrorInfo.ErrorLoc = BO->getExprLoc();
+ ErrorInfo.NoteLoc = BO->getOperatorLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = BO->getSourceRange();
+ return false;
+ }
+
+ X = BO->getLHS();
+ D = BO->getRHS();
+
+ auto *Cond = dyn_cast<BinaryOperator>(S->getCond());
+ if (!Cond) {
+ ErrorInfo.Error = ErrorTy::NotABinaryOp;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = S->getCond()->getExprLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = S->getCond()->getSourceRange();
+ return false;
+ }
+ if (Cond->getOpcode() != BO_EQ) {
+ ErrorInfo.Error = ErrorTy::NotEQ;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = Cond->getExprLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = Cond->getSourceRange();
+ return false;
+ }
+
+ if (checkIfTwoExprsAreSame(ContextRef, X, Cond->getLHS())) {
+ E = Cond->getRHS();
+ } else if (checkIfTwoExprsAreSame(ContextRef, X, Cond->getRHS())) {
+ E = Cond->getLHS();
+ } else {
+ ErrorInfo.Error = ErrorTy::InvalidComparison;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = Cond->getExprLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = Cond->getSourceRange();
+ return false;
+ }
+
+ C = Cond;
+
+ if (!S->getElse()) {
+ ErrorInfo.Error = ErrorTy::NoElse;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = S->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = S->getSourceRange();
+ return false;
+ }
+
+ auto *Else = S->getElse();
+ if (auto *CS = dyn_cast<CompoundStmt>(Else)) {
+ if (CS->body_empty()) {
+ ErrorInfo.Error = ErrorTy::NoStmt;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = CS->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = CS->getSourceRange();
+ return false;
+ }
+ if (CS->size() > 1) {
+ ErrorInfo.Error = ErrorTy::MoreThanOneStmt;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = CS->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = S->getSourceRange();
+ return false;
+ }
+ Else = CS->body_front();
+ }
+
+ auto *ElseBO = dyn_cast<BinaryOperator>(Else);
+ if (!ElseBO) {
+ ErrorInfo.Error = ErrorTy::NotAnAssignment;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = Else->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = Else->getSourceRange();
+ return false;
+ }
+ if (ElseBO->getOpcode() != BO_Assign) {
+ ErrorInfo.Error = ErrorTy::NotAnAssignment;
+ ErrorInfo.ErrorLoc = ElseBO->getExprLoc();
+ ErrorInfo.NoteLoc = ElseBO->getOperatorLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = ElseBO->getSourceRange();
+ return false;
+ }
+
+ if (!checkIfTwoExprsAreSame(ContextRef, X, ElseBO->getRHS())) {
+ ErrorInfo.Error = ErrorTy::InvalidAssignment;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = ElseBO->getRHS()->getExprLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange =
+ ElseBO->getRHS()->getSourceRange();
+ return false;
+ }
+
+ V = ElseBO->getLHS();
+
+ return checkType(ErrorInfo);
+}
+
+bool OpenMPAtomicCompareCaptureChecker::checkForm45(Stmt *S,
+ ErrorInfoTy &ErrorInfo) {
+ // We don't check here as they should be already done before call this
+ // function.
+ auto *CS = cast<CompoundStmt>(S);
+ assert(CS->size() == 2 && "CompoundStmt size is not expected");
+ auto *S1 = cast<BinaryOperator>(CS->body_front());
+ auto *S2 = cast<IfStmt>(CS->body_back());
+ assert(S1->getOpcode() == BO_Assign && "unexpected binary operator");
+
+ if (!checkIfTwoExprsAreSame(ContextRef, S1->getLHS(), S2->getCond())) {
+ ErrorInfo.Error = ErrorTy::InvalidCondition;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = S2->getCond()->getExprLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = S1->getLHS()->getSourceRange();
+ return false;
+ }
+
+ R = S1->getLHS();
+
+ auto *Then = S2->getThen();
+ if (auto *ThenCS = dyn_cast<CompoundStmt>(Then)) {
+ if (ThenCS->body_empty()) {
+ ErrorInfo.Error = ErrorTy::NoStmt;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = ThenCS->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = ThenCS->getSourceRange();
+ return false;
+ }
+ if (ThenCS->size() > 1) {
+ ErrorInfo.Error = ErrorTy::MoreThanOneStmt;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = ThenCS->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = ThenCS->getSourceRange();
+ return false;
+ }
+ Then = ThenCS->body_front();
+ }
+
+ auto *ThenBO = dyn_cast<BinaryOperator>(Then);
+ if (!ThenBO) {
+ ErrorInfo.Error = ErrorTy::NotAnAssignment;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = S2->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = S2->getSourceRange();
+ return false;
+ }
+ if (ThenBO->getOpcode() != BO_Assign) {
+ ErrorInfo.Error = ErrorTy::NotAnAssignment;
+ ErrorInfo.ErrorLoc = ThenBO->getExprLoc();
+ ErrorInfo.NoteLoc = ThenBO->getOperatorLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = ThenBO->getSourceRange();
+ return false;
+ }
+
+ X = ThenBO->getLHS();
+ D = ThenBO->getRHS();
+
+ auto *BO = cast<BinaryOperator>(S1->getRHS()->IgnoreImpCasts());
+ if (BO->getOpcode() != BO_EQ) {
+ ErrorInfo.Error = ErrorTy::NotEQ;
+ ErrorInfo.ErrorLoc = BO->getExprLoc();
+ ErrorInfo.NoteLoc = BO->getOperatorLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = BO->getSourceRange();
+ return false;
+ }
+
+ C = BO;
+
+ if (checkIfTwoExprsAreSame(ContextRef, X, BO->getLHS())) {
+ E = BO->getRHS();
+ } else if (checkIfTwoExprsAreSame(ContextRef, X, BO->getRHS())) {
+ E = BO->getLHS();
+ } else {
+ ErrorInfo.Error = ErrorTy::InvalidComparison;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = BO->getExprLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = BO->getSourceRange();
+ return false;
+ }
+
+ if (S2->getElse()) {
+ IsFailOnly = true;
+
+ auto *Else = S2->getElse();
+ if (auto *ElseCS = dyn_cast<CompoundStmt>(Else)) {
+ if (ElseCS->body_empty()) {
+ ErrorInfo.Error = ErrorTy::NoStmt;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = ElseCS->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = ElseCS->getSourceRange();
+ return false;
+ }
+ if (ElseCS->size() > 1) {
+ ErrorInfo.Error = ErrorTy::MoreThanOneStmt;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = ElseCS->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = ElseCS->getSourceRange();
+ return false;
+ }
+ Else = ElseCS->body_front();
+ }
+
+ auto *ElseBO = dyn_cast<BinaryOperator>(Else);
+ if (!ElseBO) {
+ ErrorInfo.Error = ErrorTy::NotAnAssignment;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = Else->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = Else->getSourceRange();
+ return false;
+ }
+ if (ElseBO->getOpcode() != BO_Assign) {
+ ErrorInfo.Error = ErrorTy::NotAnAssignment;
+ ErrorInfo.ErrorLoc = ElseBO->getExprLoc();
+ ErrorInfo.NoteLoc = ElseBO->getOperatorLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = ElseBO->getSourceRange();
+ return false;
+ }
+ if (!checkIfTwoExprsAreSame(ContextRef, X, ElseBO->getRHS())) {
+ ErrorInfo.Error = ErrorTy::InvalidAssignment;
+ ErrorInfo.ErrorLoc = ElseBO->getRHS()->getExprLoc();
+ ErrorInfo.NoteLoc = X->getExprLoc();
+ ErrorInfo.ErrorRange = ElseBO->getRHS()->getSourceRange();
+ ErrorInfo.NoteRange = X->getSourceRange();
+ return false;
+ }
+
+ V = ElseBO->getLHS();
+ }
+
+ return checkType(ErrorInfo);
+}
+
+bool OpenMPAtomicCompareCaptureChecker::checkStmt(Stmt *S,
+ ErrorInfoTy &ErrorInfo) {
+ // if(x == e) { x = d; } else { v = x; }
+ if (auto *IS = dyn_cast<IfStmt>(S))
+ return checkForm3(IS, ErrorInfo);
+
+ auto *CS = dyn_cast<CompoundStmt>(S);
+ if (!CS) {
+ ErrorInfo.Error = ErrorTy::NotCompoundStmt;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = S->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = S->getSourceRange();
+ return false;
+ }
+ if (CS->body_empty()) {
+ ErrorInfo.Error = ErrorTy::NoStmt;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = CS->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = CS->getSourceRange();
+ return false;
+ }
+
+ // { if(x == e) { x = d; } else { v = x; } }
+ if (CS->size() == 1) {
+ auto *IS = dyn_cast<IfStmt>(CS->body_front());
+ if (!IS) {
+ ErrorInfo.Error = ErrorTy::NotIfStmt;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = CS->body_front()->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange =
+ CS->body_front()->getSourceRange();
+ return false;
+ }
+
+ return checkForm3(IS, ErrorInfo);
+ } else if (CS->size() == 2) {
+ auto *S1 = CS->body_front();
+ auto *S2 = CS->body_back();
+
+ Stmt *UpdateStmt = nullptr;
+ Stmt *CondUpdateStmt = nullptr;
+
+ if (auto *BO = dyn_cast<BinaryOperator>(S1)) {
+ // { v = x; cond-update-stmt } or form 45.
+ UpdateStmt = S1;
+ CondUpdateStmt = S2;
+ // Check if form 45.
+ if (isa<BinaryOperator>(BO->getRHS()->IgnoreImpCasts()) &&
+ isa<IfStmt>(S2))
+ return checkForm45(CS, ErrorInfo);
+ // It cannot be set before we the check for form45.
+ IsPostfixUpdate = true;
+ } else {
+ // { cond-update-stmt v = x; }
+ UpdateStmt = S2;
+ CondUpdateStmt = S1;
+ }
+
+ auto CheckCondUpdateStmt = [this, &ErrorInfo](Stmt *CUS) {
+ auto *IS = dyn_cast<IfStmt>(CUS);
+ if (!IS) {
+ ErrorInfo.Error = ErrorTy::NotIfStmt;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = CUS->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = CUS->getSourceRange();
+ return false;
+ }
+
+ if (!checkCondUpdateStmt(IS, ErrorInfo))
+ return false;
+
+ return true;
+ };
+
+ // CheckUpdateStmt has to be called *after* CheckCondUpdateStmt.
+ auto CheckUpdateStmt = [this, &ErrorInfo](Stmt *US) {
+ auto *BO = dyn_cast<BinaryOperator>(US);
+ if (!BO) {
+ ErrorInfo.Error = ErrorTy::NotAnAssignment;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = US->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = US->getSourceRange();
+ return false;
+ }
+ if (BO->getOpcode() != BO_Assign) {
+ ErrorInfo.Error = ErrorTy::NotAnAssignment;
+ ErrorInfo.ErrorLoc = BO->getExprLoc();
+ ErrorInfo.NoteLoc = BO->getOperatorLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = BO->getSourceRange();
+ return false;
+ }
+ if (!checkIfTwoExprsAreSame(ContextRef, this->X, BO->getRHS())) {
+ ErrorInfo.Error = ErrorTy::InvalidAssignment;
+ ErrorInfo.ErrorLoc = BO->getRHS()->getExprLoc();
+ ErrorInfo.NoteLoc = this->X->getExprLoc();
+ ErrorInfo.ErrorRange = BO->getRHS()->getSourceRange();
+ ErrorInfo.NoteRange = this->X->getSourceRange();
+ return false;
+ }
+
+ this->V = BO->getLHS();
+
+ return true;
+ };
+
+ if (!CheckCondUpdateStmt(CondUpdateStmt))
+ return false;
+ if (!CheckUpdateStmt(UpdateStmt))
+ return false;
+ } else {
+ ErrorInfo.Error = ErrorTy::MoreThanTwoStmts;
+ ErrorInfo.ErrorLoc = ErrorInfo.NoteLoc = CS->getBeginLoc();
+ ErrorInfo.ErrorRange = ErrorInfo.NoteRange = CS->getSourceRange();
+ return false;
+ }
+
+ return checkType(ErrorInfo);
+}
} // namespace
StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
@@ -10945,14 +12272,18 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation AtomicKindLoc;
OpenMPClauseKind MemOrderKind = OMPC_unknown;
SourceLocation MemOrderLoc;
+ bool MutexClauseEncountered = false;
+ llvm::SmallSet<OpenMPClauseKind, 2> EncounteredAtomicKinds;
for (const OMPClause *C : Clauses) {
switch (C->getClauseKind()) {
case OMPC_read:
case OMPC_write:
case OMPC_update:
+ MutexClauseEncountered = true;
+ LLVM_FALLTHROUGH;
case OMPC_capture:
case OMPC_compare: {
- if (AtomicKind != OMPC_unknown) {
+ if (AtomicKind != OMPC_unknown && MutexClauseEncountered) {
Diag(C->getBeginLoc(), diag::err_omp_atomic_several_clauses)
<< SourceRange(C->getBeginLoc(), C->getEndLoc());
Diag(AtomicKindLoc, diag::note_omp_previous_mem_order_clause)
@@ -10960,6 +12291,12 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
} else {
AtomicKind = C->getClauseKind();
AtomicKindLoc = C->getBeginLoc();
+ if (!EncounteredAtomicKinds.insert(C->getClauseKind()).second) {
+ Diag(C->getBeginLoc(), diag::err_omp_atomic_several_clauses)
+ << SourceRange(C->getBeginLoc(), C->getEndLoc());
+ Diag(AtomicKindLoc, diag::note_omp_previous_mem_order_clause)
+ << getOpenMPClauseName(AtomicKind);
+ }
}
break;
}
@@ -10987,6 +12324,12 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
llvm_unreachable("unknown clause is encountered");
}
}
+ bool IsCompareCapture = false;
+ if (EncounteredAtomicKinds.contains(OMPC_compare) &&
+ EncounteredAtomicKinds.contains(OMPC_capture)) {
+ IsCompareCapture = true;
+ AtomicKind = OMPC_compare;
+ }
// OpenMP 5.0, 2.17.7 atomic Construct, Restrictions
// If atomic-clause is read then memory-order-clause must not be acq_rel or
// release.
@@ -11018,8 +12361,12 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Expr *V = nullptr;
Expr *E = nullptr;
Expr *UE = nullptr;
+ Expr *D = nullptr;
+ Expr *CE = nullptr;
+ Expr *R = nullptr;
bool IsXLHSInRHSPart = false;
bool IsPostfixUpdate = false;
+ bool IsFailOnly = false;
// OpenMP [2.12.6, atomic Construct]
// In the next expressions:
// * x and v (as applicable) are both l-value expressions with scalar type.
@@ -11405,18 +12752,50 @@ StmtResult Sema::ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
if (CurContext->isDependentContext())
UE = V = E = X = nullptr;
} else if (AtomicKind == OMPC_compare) {
- // TODO: For now we emit an error here and in emitOMPAtomicExpr we ignore
- // code gen.
- unsigned DiagID = Diags.getCustomDiagID(
- DiagnosticsEngine::Error, "atomic compare is not supported for now");
- Diag(AtomicKindLoc, DiagID);
+ if (IsCompareCapture) {
+ OpenMPAtomicCompareCaptureChecker::ErrorInfoTy ErrorInfo;
+ OpenMPAtomicCompareCaptureChecker Checker(*this);
+ if (!Checker.checkStmt(Body, ErrorInfo)) {
+ Diag(ErrorInfo.ErrorLoc, diag::err_omp_atomic_compare_capture)
+ << ErrorInfo.ErrorRange;
+ Diag(ErrorInfo.NoteLoc, diag::note_omp_atomic_compare)
+ << ErrorInfo.Error << ErrorInfo.NoteRange;
+ return StmtError();
+ }
+ X = Checker.getX();
+ E = Checker.getE();
+ D = Checker.getD();
+ CE = Checker.getCond();
+ V = Checker.getV();
+ R = Checker.getR();
+ // We reuse IsXLHSInRHSPart to tell if it is in the form 'x ordop expr'.
+ IsXLHSInRHSPart = Checker.isXBinopExpr();
+ IsFailOnly = Checker.isFailOnly();
+ IsPostfixUpdate = Checker.isPostfixUpdate();
+ } else {
+ OpenMPAtomicCompareChecker::ErrorInfoTy ErrorInfo;
+ OpenMPAtomicCompareChecker Checker(*this);
+ if (!Checker.checkStmt(Body, ErrorInfo)) {
+ Diag(ErrorInfo.ErrorLoc, diag::err_omp_atomic_compare)
+ << ErrorInfo.ErrorRange;
+ Diag(ErrorInfo.NoteLoc, diag::note_omp_atomic_compare)
+ << ErrorInfo.Error << ErrorInfo.NoteRange;
+ return StmtError();
+ }
+ X = Checker.getX();
+ E = Checker.getE();
+ D = Checker.getD();
+ CE = Checker.getCond();
+ // We reuse IsXLHSInRHSPart to tell if it is in the form 'x ordop expr'.
+ IsXLHSInRHSPart = Checker.isXBinopExpr();
+ }
}
setFunctionHasBranchProtectedScope();
- return OMPAtomicDirective::Create(Context, StartLoc, EndLoc, Clauses, AStmt,
- X, V, E, UE, IsXLHSInRHSPart,
- IsPostfixUpdate);
+ return OMPAtomicDirective::Create(
+ Context, StartLoc, EndLoc, Clauses, AStmt,
+ {X, V, R, E, UE, D, CE, IsXLHSInRHSPart, IsPostfixUpdate, IsFailOnly});
}
StmtResult Sema::ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
@@ -11583,6 +12962,26 @@ static bool hasClauses(ArrayRef<OMPClause *> Clauses, const OpenMPClauseKind K,
return hasClauses(Clauses, K) || hasClauses(Clauses, ClauseTypes...);
}
+/// Check if the variables in the mapping clause are externally visible.
+static bool isClauseMappable(ArrayRef<OMPClause *> Clauses) {
+ for (const OMPClause *C : Clauses) {
+ if (auto *TC = dyn_cast<OMPToClause>(C))
+ return llvm::all_of(TC->all_decls(), [](ValueDecl *VD) {
+ return !VD || !VD->hasAttr<OMPDeclareTargetDeclAttr>() ||
+ (VD->isExternallyVisible() &&
+ VD->getVisibility() != HiddenVisibility);
+ });
+ else if (auto *FC = dyn_cast<OMPFromClause>(C))
+ return llvm::all_of(FC->all_decls(), [](ValueDecl *VD) {
+ return !VD || !VD->hasAttr<OMPDeclareTargetDeclAttr>() ||
+ (VD->isExternallyVisible() &&
+ VD->getVisibility() != HiddenVisibility);
+ });
+ }
+
+ return true;
+}
+
StmtResult Sema::ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
@@ -11716,6 +13115,12 @@ StmtResult Sema::ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
Diag(StartLoc, diag::err_omp_at_least_one_motion_clause_required);
return StmtError();
}
+
+ if (!isClauseMappable(Clauses)) {
+ Diag(StartLoc, diag::err_omp_cannot_update_with_internal_linkage);
+ return StmtError();
+ }
+
return OMPTargetUpdateDirective::Create(Context, StartLoc, EndLoc, Clauses,
AStmt);
}
@@ -11927,6 +13332,44 @@ StmtResult Sema::ActOnOpenMPMasterTaskLoopDirective(
DSAStack->isCancelRegion());
}
+StmtResult Sema::ActOnOpenMPMaskedTaskLoopDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
+ OMPLoopBasedDirective::HelperExprs B;
+ // In presence of clause 'collapse' or 'ordered' with number of loops, it will
+ // define the nested loops number.
+ unsigned NestedLoopCount =
+ checkOpenMPLoop(OMPD_masked_taskloop, getCollapseNumberExpr(Clauses),
+ /*OrderedLoopCountExpr=*/nullptr, AStmt, *this, *DSAStack,
+ VarsWithImplicitDSA, B);
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp for loop exprs were not built");
+
+ // OpenMP, [2.9.2 taskloop Construct, Restrictions]
+ // The grainsize clause and num_tasks clause are mutually exclusive and may
+ // not appear on the same taskloop directive.
+ if (checkMutuallyExclusiveClauses(*this, Clauses,
+ {OMPC_grainsize, OMPC_num_tasks}))
+ return StmtError();
+ // OpenMP, [2.9.2 taskloop Construct, Restrictions]
+ // If a reduction clause is present on the taskloop directive, the nogroup
+ // clause must not be specified.
+ if (checkReductionClauseWithNogroup(*this, Clauses))
+ return StmtError();
+
+ setFunctionHasBranchProtectedScope();
+ return OMPMaskedTaskLoopDirective::Create(Context, StartLoc, EndLoc,
+ NestedLoopCount, Clauses, AStmt, B,
+ DSAStack->isCancelRegion());
+}
+
StmtResult Sema::ActOnOpenMPMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
@@ -11977,6 +13420,56 @@ StmtResult Sema::ActOnOpenMPMasterTaskLoopSimdDirective(
Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
}
+StmtResult Sema::ActOnOpenMPMaskedTaskLoopSimdDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
+ OMPLoopBasedDirective::HelperExprs B;
+ // In presence of clause 'collapse' or 'ordered' with number of loops, it will
+ // define the nested loops number.
+ unsigned NestedLoopCount =
+ checkOpenMPLoop(OMPD_masked_taskloop_simd, getCollapseNumberExpr(Clauses),
+ /*OrderedLoopCountExpr=*/nullptr, AStmt, *this, *DSAStack,
+ VarsWithImplicitDSA, B);
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp for loop exprs were not built");
+
+ if (!CurContext->isDependentContext()) {
+ // Finalize the clauses that need pre-built expressions for CodeGen.
+ for (OMPClause *C : Clauses) {
+ if (auto *LC = dyn_cast<OMPLinearClause>(C))
+ if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
+ B.NumIterations, *this, CurScope,
+ DSAStack))
+ return StmtError();
+ }
+ }
+
+ // OpenMP, [2.9.2 taskloop Construct, Restrictions]
+ // The grainsize clause and num_tasks clause are mutually exclusive and may
+ // not appear on the same taskloop directive.
+ if (checkMutuallyExclusiveClauses(*this, Clauses,
+ {OMPC_grainsize, OMPC_num_tasks}))
+ return StmtError();
+ // OpenMP, [2.9.2 taskloop Construct, Restrictions]
+ // If a reduction clause is present on the taskloop directive, the nogroup
+ // clause must not be specified.
+ if (checkReductionClauseWithNogroup(*this, Clauses))
+ return StmtError();
+ if (checkSimdlenSafelenSpecified(*this, Clauses))
+ return StmtError();
+
+ setFunctionHasBranchProtectedScope();
+ return OMPMaskedTaskLoopSimdDirective::Create(
+ Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+}
+
StmtResult Sema::ActOnOpenMPParallelMasterTaskLoopDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
@@ -12034,6 +13527,63 @@ StmtResult Sema::ActOnOpenMPParallelMasterTaskLoopDirective(
DSAStack->isCancelRegion());
}
+StmtResult Sema::ActOnOpenMPParallelMaskedTaskLoopDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
+ auto *CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+ for (int ThisCaptureLevel =
+ getOpenMPCaptureLevels(OMPD_parallel_masked_taskloop);
+ ThisCaptureLevel > 1; --ThisCaptureLevel) {
+ CS = cast<CapturedStmt>(CS->getCapturedStmt());
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+ }
+
+ OMPLoopBasedDirective::HelperExprs B;
+ // In presence of clause 'collapse' or 'ordered' with number of loops, it will
+ // define the nested loops number.
+ unsigned NestedLoopCount = checkOpenMPLoop(
+ OMPD_parallel_masked_taskloop, getCollapseNumberExpr(Clauses),
+ /*OrderedLoopCountExpr=*/nullptr, CS, *this, *DSAStack,
+ VarsWithImplicitDSA, B);
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp for loop exprs were not built");
+
+ // OpenMP, [2.9.2 taskloop Construct, Restrictions]
+ // The grainsize clause and num_tasks clause are mutually exclusive and may
+ // not appear on the same taskloop directive.
+ if (checkMutuallyExclusiveClauses(*this, Clauses,
+ {OMPC_grainsize, OMPC_num_tasks}))
+ return StmtError();
+ // OpenMP, [2.9.2 taskloop Construct, Restrictions]
+ // If a reduction clause is present on the taskloop directive, the nogroup
+ // clause must not be specified.
+ if (checkReductionClauseWithNogroup(*this, Clauses))
+ return StmtError();
+
+ setFunctionHasBranchProtectedScope();
+ return OMPParallelMaskedTaskLoopDirective::Create(
+ Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B,
+ DSAStack->isCancelRegion());
+}
+
StmtResult Sema::ActOnOpenMPParallelMasterTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
@@ -12103,6 +13653,75 @@ StmtResult Sema::ActOnOpenMPParallelMasterTaskLoopSimdDirective(
Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
}
+StmtResult Sema::ActOnOpenMPParallelMaskedTaskLoopSimdDirective(
+ ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
+ SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
+ if (!AStmt)
+ return StmtError();
+
+ assert(isa<CapturedStmt>(AStmt) && "Captured statement expected");
+ auto *CS = cast<CapturedStmt>(AStmt);
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+ for (int ThisCaptureLevel =
+ getOpenMPCaptureLevels(OMPD_parallel_masked_taskloop_simd);
+ ThisCaptureLevel > 1; --ThisCaptureLevel) {
+ CS = cast<CapturedStmt>(CS->getCapturedStmt());
+ // 1.2.2 OpenMP Language Terminology
+ // Structured block - An executable statement with a single entry at the
+ // top and a single exit at the bottom.
+ // The point of exit cannot be a branch out of the structured block.
+ // longjmp() and throw() must not violate the entry/exit criteria.
+ CS->getCapturedDecl()->setNothrow();
+ }
+
+ OMPLoopBasedDirective::HelperExprs B;
+ // In presence of clause 'collapse' or 'ordered' with number of loops, it will
+ // define the nested loops number.
+ unsigned NestedLoopCount = checkOpenMPLoop(
+ OMPD_parallel_masked_taskloop_simd, getCollapseNumberExpr(Clauses),
+ /*OrderedLoopCountExpr=*/nullptr, CS, *this, *DSAStack,
+ VarsWithImplicitDSA, B);
+ if (NestedLoopCount == 0)
+ return StmtError();
+
+ assert((CurContext->isDependentContext() || B.builtAll()) &&
+ "omp for loop exprs were not built");
+
+ if (!CurContext->isDependentContext()) {
+ // Finalize the clauses that need pre-built expressions for CodeGen.
+ for (OMPClause *C : Clauses) {
+ if (auto *LC = dyn_cast<OMPLinearClause>(C))
+ if (FinishOpenMPLinearClause(*LC, cast<DeclRefExpr>(B.IterationVarRef),
+ B.NumIterations, *this, CurScope,
+ DSAStack))
+ return StmtError();
+ }
+ }
+
+ // OpenMP, [2.9.2 taskloop Construct, Restrictions]
+ // The grainsize clause and num_tasks clause are mutually exclusive and may
+ // not appear on the same taskloop directive.
+ if (checkMutuallyExclusiveClauses(*this, Clauses,
+ {OMPC_grainsize, OMPC_num_tasks}))
+ return StmtError();
+ // OpenMP, [2.9.2 taskloop Construct, Restrictions]
+ // If a reduction clause is present on the taskloop directive, the nogroup
+ // clause must not be specified.
+ if (checkReductionClauseWithNogroup(*this, Clauses))
+ return StmtError();
+ if (checkSimdlenSafelenSpecified(*this, Clauses))
+ return StmtError();
+
+ setFunctionHasBranchProtectedScope();
+ return OMPParallelMaskedTaskLoopSimdDirective::Create(
+ Context, StartLoc, EndLoc, NestedLoopCount, Clauses, AStmt, B);
+}
+
StmtResult Sema::ActOnOpenMPDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA) {
@@ -12899,8 +14518,8 @@ bool Sema::checkTransformableLoopNest(
llvm_unreachable("Unhandled loop transformation");
if (!DependentPreInits)
return;
- for (Decl *C : cast<DeclStmt>(DependentPreInits)->getDeclGroup())
- OriginalInits.back().push_back(C);
+ llvm::append_range(OriginalInits.back(),
+ cast<DeclStmt>(DependentPreInits)->getDeclGroup());
});
assert(OriginalInits.back().empty() && "No preinit after innermost loop");
OriginalInits.pop_back();
@@ -13062,8 +14681,8 @@ StmtResult Sema::ActOnOpenMPTileDirective(ArrayRef<OMPClause *> Clauses,
SmallVector<Stmt *, 4> BodyParts;
BodyParts.append(LoopHelper.Updates.begin(), LoopHelper.Updates.end());
BodyParts.push_back(Inner);
- Inner = CompoundStmt::Create(Context, BodyParts, Inner->getBeginLoc(),
- Inner->getEndLoc());
+ Inner = CompoundStmt::Create(Context, BodyParts, FPOptionsOverride(),
+ Inner->getBeginLoc(), Inner->getEndLoc());
Inner = new (Context)
ForStmt(Context, InitStmt.get(), CondExpr.get(), nullptr,
IncrStmt.get(), Inner, LoopHelper.Init->getBeginLoc(),
@@ -13245,8 +14864,7 @@ StmtResult Sema::ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses,
uint64_t Factor;
SourceLocation FactorLoc;
if (Expr *FactorVal = PartialClause->getFactor()) {
- Factor =
- FactorVal->getIntegerConstantExpr(Context).getValue().getZExtValue();
+ Factor = FactorVal->getIntegerConstantExpr(Context)->getZExtValue();
FactorLoc = FactorVal->getExprLoc();
} else {
// TODO: Use a better profitability model.
@@ -13338,8 +14956,9 @@ StmtResult Sema::ActOnOpenMPUnrollDirective(ArrayRef<OMPClause *> Clauses,
SmallVector<Stmt *> InnerBodyStmts;
InnerBodyStmts.append(LoopHelper.Updates.begin(), LoopHelper.Updates.end());
InnerBodyStmts.push_back(Body);
- CompoundStmt *InnerBody = CompoundStmt::Create(
- Context, InnerBodyStmts, Body->getBeginLoc(), Body->getEndLoc());
+ CompoundStmt *InnerBody =
+ CompoundStmt::Create(Context, InnerBodyStmts, FPOptionsOverride(),
+ Body->getBeginLoc(), Body->getEndLoc());
ForStmt *InnerFor = new (Context)
ForStmt(Context, InnerInit.get(), InnerCond.get(), nullptr,
InnerIncr.get(), InnerBody, LoopHelper.Init->getBeginLoc(),
@@ -13553,6 +15172,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
LLVM_FALLTHROUGH;
case OMPD_target_parallel:
case OMPD_target_parallel_for:
+ case OMPD_target_parallel_loop:
// If this clause applies to the nested 'parallel' region, capture within
// the 'target' region, otherwise do not capture.
if (NameModifier == OMPD_unknown || NameModifier == OMPD_parallel)
@@ -13586,10 +15206,25 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_target_exit_data:
CaptureRegion = OMPD_task;
break;
+ case OMPD_parallel_masked_taskloop:
+ if (NameModifier == OMPD_unknown || NameModifier == OMPD_taskloop)
+ CaptureRegion = OMPD_parallel;
+ break;
case OMPD_parallel_master_taskloop:
if (NameModifier == OMPD_unknown || NameModifier == OMPD_taskloop)
CaptureRegion = OMPD_parallel;
break;
+ case OMPD_parallel_masked_taskloop_simd:
+ if ((OpenMPVersion <= 45 && NameModifier == OMPD_unknown) ||
+ NameModifier == OMPD_taskloop) {
+ CaptureRegion = OMPD_parallel;
+ break;
+ }
+ if (OpenMPVersion <= 45)
+ break;
+ if (NameModifier == OMPD_unknown || NameModifier == OMPD_simd)
+ CaptureRegion = OMPD_taskloop;
+ break;
case OMPD_parallel_master_taskloop_simd:
if ((OpenMPVersion <= 45 && NameModifier == OMPD_unknown) ||
NameModifier == OMPD_taskloop) {
@@ -13609,6 +15244,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
break;
case OMPD_taskloop_simd:
case OMPD_master_taskloop_simd:
+ case OMPD_masked_taskloop_simd:
if (OpenMPVersion <= 45)
break;
if (NameModifier == OMPD_unknown || NameModifier == OMPD_simd)
@@ -13634,15 +15270,19 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_cancel:
case OMPD_parallel:
case OMPD_parallel_master:
+ case OMPD_parallel_masked:
case OMPD_parallel_sections:
case OMPD_parallel_for:
+ case OMPD_parallel_loop:
case OMPD_target:
case OMPD_target_teams:
case OMPD_target_teams_distribute:
+ case OMPD_target_teams_loop:
case OMPD_distribute_parallel_for:
case OMPD_task:
case OMPD_taskloop:
case OMPD_master_taskloop:
+ case OMPD_masked_taskloop:
case OMPD_target_data:
case OMPD_simd:
case OMPD_for_simd:
@@ -13667,6 +15307,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_loop:
+ case OMPD_teams_loop:
case OMPD_teams:
case OMPD_tile:
case OMPD_unroll:
@@ -13695,6 +15336,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_target_parallel:
case OMPD_target_parallel_for:
case OMPD_target_parallel_for_simd:
+ case OMPD_target_parallel_loop:
CaptureRegion = OMPD_target;
break;
case OMPD_teams_distribute_parallel_for:
@@ -13705,13 +15347,17 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
break;
case OMPD_parallel:
case OMPD_parallel_master:
+ case OMPD_parallel_masked:
case OMPD_parallel_sections:
case OMPD_parallel_for:
case OMPD_parallel_for_simd:
+ case OMPD_parallel_loop:
case OMPD_distribute_parallel_for:
case OMPD_distribute_parallel_for_simd:
case OMPD_parallel_master_taskloop:
+ case OMPD_parallel_masked_taskloop:
case OMPD_parallel_master_taskloop_simd:
+ case OMPD_parallel_masked_taskloop_simd:
// Do not capture num_threads-clause expressions.
break;
case OMPD_target_data:
@@ -13728,7 +15374,9 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_taskloop:
case OMPD_taskloop_simd:
case OMPD_master_taskloop:
+ case OMPD_masked_taskloop:
case OMPD_master_taskloop_simd:
+ case OMPD_masked_taskloop_simd:
case OMPD_threadprivate:
case OMPD_allocate:
case OMPD_taskyield:
@@ -13747,6 +15395,8 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_loop:
+ case OMPD_teams_loop:
+ case OMPD_target_teams_loop:
case OMPD_teams:
case OMPD_simd:
case OMPD_tile:
@@ -13781,6 +15431,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_target_teams_distribute_simd:
case OMPD_target_teams_distribute_parallel_for:
case OMPD_target_teams_distribute_parallel_for_simd:
+ case OMPD_target_teams_loop:
CaptureRegion = OMPD_target;
break;
case OMPD_teams_distribute_parallel_for:
@@ -13788,6 +15439,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_teams:
case OMPD_teams_distribute:
case OMPD_teams_distribute_simd:
+ case OMPD_teams_loop:
// Do not capture num_teams-clause expressions.
break;
case OMPD_distribute_parallel_for:
@@ -13796,9 +15448,13 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_taskloop:
case OMPD_taskloop_simd:
case OMPD_master_taskloop:
+ case OMPD_masked_taskloop:
case OMPD_master_taskloop_simd:
+ case OMPD_masked_taskloop_simd:
case OMPD_parallel_master_taskloop:
+ case OMPD_parallel_masked_taskloop:
case OMPD_parallel_master_taskloop_simd:
+ case OMPD_parallel_masked_taskloop_simd:
case OMPD_target_data:
case OMPD_target_enter_data:
case OMPD_target_exit_data:
@@ -13806,14 +15462,17 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_cancel:
case OMPD_parallel:
case OMPD_parallel_master:
+ case OMPD_parallel_masked:
case OMPD_parallel_sections:
case OMPD_parallel_for:
case OMPD_parallel_for_simd:
+ case OMPD_parallel_loop:
case OMPD_target:
case OMPD_target_simd:
case OMPD_target_parallel:
case OMPD_target_parallel_for:
case OMPD_target_parallel_for_simd:
+ case OMPD_target_parallel_loop:
case OMPD_threadprivate:
case OMPD_allocate:
case OMPD_taskyield:
@@ -13863,6 +15522,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_target_teams_distribute_simd:
case OMPD_target_teams_distribute_parallel_for:
case OMPD_target_teams_distribute_parallel_for_simd:
+ case OMPD_target_teams_loop:
CaptureRegion = OMPD_target;
break;
case OMPD_teams_distribute_parallel_for:
@@ -13870,6 +15530,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_teams:
case OMPD_teams_distribute:
case OMPD_teams_distribute_simd:
+ case OMPD_teams_loop:
// Do not capture thread_limit-clause expressions.
break;
case OMPD_distribute_parallel_for:
@@ -13878,9 +15539,13 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_taskloop:
case OMPD_taskloop_simd:
case OMPD_master_taskloop:
+ case OMPD_masked_taskloop:
case OMPD_master_taskloop_simd:
+ case OMPD_masked_taskloop_simd:
case OMPD_parallel_master_taskloop:
+ case OMPD_parallel_masked_taskloop:
case OMPD_parallel_master_taskloop_simd:
+ case OMPD_parallel_masked_taskloop_simd:
case OMPD_target_data:
case OMPD_target_enter_data:
case OMPD_target_exit_data:
@@ -13888,14 +15553,17 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_cancel:
case OMPD_parallel:
case OMPD_parallel_master:
+ case OMPD_parallel_masked:
case OMPD_parallel_sections:
case OMPD_parallel_for:
case OMPD_parallel_for_simd:
+ case OMPD_parallel_loop:
case OMPD_target:
case OMPD_target_simd:
case OMPD_target_parallel:
case OMPD_target_parallel_for:
case OMPD_target_parallel_for_simd:
+ case OMPD_target_parallel_loop:
case OMPD_threadprivate:
case OMPD_allocate:
case OMPD_taskyield:
@@ -13960,9 +15628,13 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_taskloop:
case OMPD_taskloop_simd:
case OMPD_master_taskloop:
+ case OMPD_masked_taskloop:
case OMPD_master_taskloop_simd:
+ case OMPD_masked_taskloop_simd:
case OMPD_parallel_master_taskloop:
+ case OMPD_parallel_masked_taskloop:
case OMPD_parallel_master_taskloop_simd:
+ case OMPD_parallel_masked_taskloop_simd:
case OMPD_target_data:
case OMPD_target_enter_data:
case OMPD_target_exit_data:
@@ -13978,6 +15650,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_cancel:
case OMPD_parallel:
case OMPD_parallel_master:
+ case OMPD_parallel_masked:
case OMPD_parallel_sections:
case OMPD_threadprivate:
case OMPD_allocate:
@@ -13997,6 +15670,10 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_loop:
+ case OMPD_teams_loop:
+ case OMPD_target_teams_loop:
+ case OMPD_parallel_loop:
+ case OMPD_target_parallel_loop:
case OMPD_simd:
case OMPD_tile:
case OMPD_unroll:
@@ -14046,9 +15723,13 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_taskloop:
case OMPD_taskloop_simd:
case OMPD_master_taskloop:
+ case OMPD_masked_taskloop:
case OMPD_master_taskloop_simd:
+ case OMPD_masked_taskloop_simd:
case OMPD_parallel_master_taskloop:
+ case OMPD_parallel_masked_taskloop:
case OMPD_parallel_master_taskloop_simd:
+ case OMPD_parallel_masked_taskloop_simd:
case OMPD_target_data:
case OMPD_target_enter_data:
case OMPD_target_exit_data:
@@ -14060,6 +15741,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_cancel:
case OMPD_parallel:
case OMPD_parallel_master:
+ case OMPD_parallel_masked:
case OMPD_parallel_sections:
case OMPD_threadprivate:
case OMPD_allocate:
@@ -14079,6 +15761,10 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_loop:
+ case OMPD_teams_loop:
+ case OMPD_target_teams_loop:
+ case OMPD_parallel_loop:
+ case OMPD_target_parallel_loop:
case OMPD_simd:
case OMPD_tile:
case OMPD_unroll:
@@ -14115,8 +15801,10 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_target_teams_distribute_simd:
case OMPD_target_parallel_for:
case OMPD_target_parallel_for_simd:
+ case OMPD_target_parallel_loop:
case OMPD_target_teams_distribute_parallel_for:
case OMPD_target_teams_distribute_parallel_for_simd:
+ case OMPD_target_teams_loop:
case OMPD_dispatch:
CaptureRegion = OMPD_task;
break;
@@ -14135,12 +15823,17 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_taskloop:
case OMPD_taskloop_simd:
case OMPD_master_taskloop:
+ case OMPD_masked_taskloop:
case OMPD_master_taskloop_simd:
+ case OMPD_masked_taskloop_simd:
case OMPD_parallel_master_taskloop:
+ case OMPD_parallel_masked_taskloop:
case OMPD_parallel_master_taskloop_simd:
+ case OMPD_parallel_masked_taskloop_simd:
case OMPD_cancel:
case OMPD_parallel:
case OMPD_parallel_master:
+ case OMPD_parallel_masked:
case OMPD_parallel_sections:
case OMPD_parallel_for:
case OMPD_parallel_for_simd:
@@ -14162,6 +15855,8 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_loop:
+ case OMPD_teams_loop:
+ case OMPD_parallel_loop:
case OMPD_simd:
case OMPD_tile:
case OMPD_unroll:
@@ -14195,8 +15890,12 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_taskloop:
case OMPD_taskloop_simd:
case OMPD_master_taskloop:
+ case OMPD_masked_taskloop:
case OMPD_master_taskloop_simd:
+ case OMPD_masked_taskloop_simd:
break;
+ case OMPD_parallel_masked_taskloop:
+ case OMPD_parallel_masked_taskloop_simd:
case OMPD_parallel_master_taskloop:
case OMPD_parallel_master_taskloop_simd:
CaptureRegion = OMPD_parallel;
@@ -14225,6 +15924,7 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_cancel:
case OMPD_parallel:
case OMPD_parallel_master:
+ case OMPD_parallel_masked:
case OMPD_parallel_sections:
case OMPD_parallel_for:
case OMPD_parallel_for_simd:
@@ -14246,6 +15946,10 @@ static OpenMPDirectiveKind getOpenMPCaptureRegionForClause(
case OMPD_declare_target:
case OMPD_end_declare_target:
case OMPD_loop:
+ case OMPD_teams_loop:
+ case OMPD_target_teams_loop:
+ case OMPD_parallel_loop:
+ case OMPD_target_parallel_loop:
case OMPD_simd:
case OMPD_tile:
case OMPD_unroll:
@@ -14811,6 +16515,7 @@ OMPClause *Sema::ActOnOpenMPSimpleClause(
case OMPC_use_device_ptr:
case OMPC_use_device_addr:
case OMPC_is_device_ptr:
+ case OMPC_has_device_addr:
case OMPC_unified_address:
case OMPC_unified_shared_memory:
case OMPC_reverse_offload:
@@ -14877,6 +16582,9 @@ OMPClause *Sema::ActOnOpenMPDefaultClause(DefaultKind Kind,
case OMP_DEFAULT_firstprivate:
DSAStack->setDefaultDSAFirstPrivate(KindKwLoc);
break;
+ case OMP_DEFAULT_private:
+ DSAStack->setDefaultDSAPrivate(KindKwLoc);
+ break;
default:
llvm_unreachable("DSA unexpected in OpenMP default clause");
}
@@ -14953,8 +16661,11 @@ OMPClause *Sema::ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind,
SourceLocation EndLoc) {
if (Kind == OMPC_DEPEND_unknown || Kind == OMPC_DEPEND_source ||
Kind == OMPC_DEPEND_sink || Kind == OMPC_DEPEND_depobj) {
- unsigned Except[] = {OMPC_DEPEND_source, OMPC_DEPEND_sink,
- OMPC_DEPEND_depobj};
+ SmallVector<unsigned> Except = {
+ OMPC_DEPEND_source, OMPC_DEPEND_sink, OMPC_DEPEND_depobj,
+ OMPC_DEPEND_outallmemory, OMPC_DEPEND_inoutallmemory};
+ if (LangOpts.OpenMP < 51)
+ Except.push_back(OMPC_DEPEND_inoutset);
Diag(KindKwLoc, diag::err_omp_unexpected_clause_value)
<< getListOfPossibleValues(OMPC_depend, /*First=*/0,
/*Last=*/OMPC_DEPEND_unknown, Except)
@@ -15114,6 +16825,7 @@ OMPClause *Sema::ActOnOpenMPSingleExprWithArgClause(
case OMPC_use_device_ptr:
case OMPC_use_device_addr:
case OMPC_is_device_ptr:
+ case OMPC_has_device_addr:
case OMPC_unified_address:
case OMPC_unified_shared_memory:
case OMPC_reverse_offload:
@@ -15373,6 +17085,7 @@ OMPClause *Sema::ActOnOpenMPClause(OpenMPClauseKind Kind,
case OMPC_use_device_ptr:
case OMPC_use_device_addr:
case OMPC_is_device_ptr:
+ case OMPC_has_device_addr:
case OMPC_atomic_default_mem_order:
case OMPC_device_type:
case OMPC_match:
@@ -15400,6 +17113,7 @@ OMPClause *Sema::ActOnOpenMPNowaitClause(SourceLocation StartLoc,
OMPClause *Sema::ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc) {
+ DSAStack->setUntiedRegion();
return new (Context) OMPUntiedClause(StartLoc, EndLoc);
}
@@ -15759,20 +17473,17 @@ OMPClause *Sema::ActOnOpenMPFilterClause(Expr *ThreadID,
StartLoc, LParenLoc, EndLoc);
}
-OMPClause *Sema::ActOnOpenMPVarListClause(
- OpenMPClauseKind Kind, ArrayRef<Expr *> VarList, Expr *DepModOrTailExpr,
- const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
- CXXScopeSpec &ReductionOrMapperIdScopeSpec,
- DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier,
- ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
- ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit,
- SourceLocation ExtraModifierLoc,
- ArrayRef<OpenMPMotionModifierKind> MotionModifiers,
- ArrayRef<SourceLocation> MotionModifiersLoc) {
+OMPClause *Sema::ActOnOpenMPVarListClause(OpenMPClauseKind Kind,
+ ArrayRef<Expr *> VarList,
+ const OMPVarListLocTy &Locs,
+ OpenMPVarListDataTy &Data) {
SourceLocation StartLoc = Locs.StartLoc;
SourceLocation LParenLoc = Locs.LParenLoc;
SourceLocation EndLoc = Locs.EndLoc;
OMPClause *Res = nullptr;
+ int ExtraModifier = Data.ExtraModifier;
+ SourceLocation ExtraModifierLoc = Data.ExtraModifierLoc;
+ SourceLocation ColonLoc = Data.ColonLoc;
switch (Kind) {
case OMPC_private:
Res = ActOnOpenMPPrivateClause(VarList, StartLoc, LParenLoc, EndLoc);
@@ -15796,28 +17507,28 @@ OMPClause *Sema::ActOnOpenMPVarListClause(
Res = ActOnOpenMPReductionClause(
VarList, static_cast<OpenMPReductionClauseModifier>(ExtraModifier),
StartLoc, LParenLoc, ExtraModifierLoc, ColonLoc, EndLoc,
- ReductionOrMapperIdScopeSpec, ReductionOrMapperId);
+ Data.ReductionOrMapperIdScopeSpec, Data.ReductionOrMapperId);
break;
case OMPC_task_reduction:
- Res = ActOnOpenMPTaskReductionClause(VarList, StartLoc, LParenLoc, ColonLoc,
- EndLoc, ReductionOrMapperIdScopeSpec,
- ReductionOrMapperId);
+ Res = ActOnOpenMPTaskReductionClause(
+ VarList, StartLoc, LParenLoc, ColonLoc, EndLoc,
+ Data.ReductionOrMapperIdScopeSpec, Data.ReductionOrMapperId);
break;
case OMPC_in_reduction:
- Res = ActOnOpenMPInReductionClause(VarList, StartLoc, LParenLoc, ColonLoc,
- EndLoc, ReductionOrMapperIdScopeSpec,
- ReductionOrMapperId);
+ Res = ActOnOpenMPInReductionClause(
+ VarList, StartLoc, LParenLoc, ColonLoc, EndLoc,
+ Data.ReductionOrMapperIdScopeSpec, Data.ReductionOrMapperId);
break;
case OMPC_linear:
assert(0 <= ExtraModifier && ExtraModifier <= OMPC_LINEAR_unknown &&
"Unexpected linear modifier.");
Res = ActOnOpenMPLinearClause(
- VarList, DepModOrTailExpr, StartLoc, LParenLoc,
+ VarList, Data.DepModOrTailExpr, StartLoc, LParenLoc,
static_cast<OpenMPLinearClauseKind>(ExtraModifier), ExtraModifierLoc,
ColonLoc, EndLoc);
break;
case OMPC_aligned:
- Res = ActOnOpenMPAlignedClause(VarList, DepModOrTailExpr, StartLoc,
+ Res = ActOnOpenMPAlignedClause(VarList, Data.DepModOrTailExpr, StartLoc,
LParenLoc, ColonLoc, EndLoc);
break;
case OMPC_copyin:
@@ -15833,26 +17544,30 @@ OMPClause *Sema::ActOnOpenMPVarListClause(
assert(0 <= ExtraModifier && ExtraModifier <= OMPC_DEPEND_unknown &&
"Unexpected depend modifier.");
Res = ActOnOpenMPDependClause(
- DepModOrTailExpr, static_cast<OpenMPDependClauseKind>(ExtraModifier),
- ExtraModifierLoc, ColonLoc, VarList, StartLoc, LParenLoc, EndLoc);
+ {static_cast<OpenMPDependClauseKind>(ExtraModifier), ExtraModifierLoc,
+ ColonLoc, Data.OmpAllMemoryLoc},
+ Data.DepModOrTailExpr, VarList, StartLoc, LParenLoc, EndLoc);
break;
case OMPC_map:
assert(0 <= ExtraModifier && ExtraModifier <= OMPC_MAP_unknown &&
"Unexpected map modifier.");
Res = ActOnOpenMPMapClause(
- MapTypeModifiers, MapTypeModifiersLoc, ReductionOrMapperIdScopeSpec,
- ReductionOrMapperId, static_cast<OpenMPMapClauseKind>(ExtraModifier),
- IsMapTypeImplicit, ExtraModifierLoc, ColonLoc, VarList, Locs);
+ Data.MapTypeModifiers, Data.MapTypeModifiersLoc,
+ Data.ReductionOrMapperIdScopeSpec, Data.ReductionOrMapperId,
+ static_cast<OpenMPMapClauseKind>(ExtraModifier), Data.IsMapTypeImplicit,
+ ExtraModifierLoc, ColonLoc, VarList, Locs);
break;
case OMPC_to:
- Res = ActOnOpenMPToClause(MotionModifiers, MotionModifiersLoc,
- ReductionOrMapperIdScopeSpec, ReductionOrMapperId,
- ColonLoc, VarList, Locs);
+ Res =
+ ActOnOpenMPToClause(Data.MotionModifiers, Data.MotionModifiersLoc,
+ Data.ReductionOrMapperIdScopeSpec,
+ Data.ReductionOrMapperId, ColonLoc, VarList, Locs);
break;
case OMPC_from:
- Res = ActOnOpenMPFromClause(MotionModifiers, MotionModifiersLoc,
- ReductionOrMapperIdScopeSpec,
- ReductionOrMapperId, ColonLoc, VarList, Locs);
+ Res = ActOnOpenMPFromClause(Data.MotionModifiers, Data.MotionModifiersLoc,
+ Data.ReductionOrMapperIdScopeSpec,
+ Data.ReductionOrMapperId, ColonLoc, VarList,
+ Locs);
break;
case OMPC_use_device_ptr:
Res = ActOnOpenMPUseDevicePtrClause(VarList, Locs);
@@ -15863,8 +17578,11 @@ OMPClause *Sema::ActOnOpenMPVarListClause(
case OMPC_is_device_ptr:
Res = ActOnOpenMPIsDevicePtrClause(VarList, Locs);
break;
+ case OMPC_has_device_addr:
+ Res = ActOnOpenMPHasDeviceAddrClause(VarList, Locs);
+ break;
case OMPC_allocate:
- Res = ActOnOpenMPAllocateClause(DepModOrTailExpr, VarList, StartLoc,
+ Res = ActOnOpenMPAllocateClause(Data.DepModOrTailExpr, VarList, StartLoc,
LParenLoc, ColonLoc, EndLoc);
break;
case OMPC_nontemporal:
@@ -15878,7 +17596,7 @@ OMPClause *Sema::ActOnOpenMPVarListClause(
break;
case OMPC_affinity:
Res = ActOnOpenMPAffinityClause(StartLoc, LParenLoc, ColonLoc, EndLoc,
- DepModOrTailExpr, VarList);
+ Data.DepModOrTailExpr, VarList);
break;
case OMPC_if:
case OMPC_depobj:
@@ -15967,6 +17685,8 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation EndLoc) {
SmallVector<Expr *, 8> Vars;
SmallVector<Expr *, 8> PrivateCopies;
+ bool IsImplicitClause =
+ StartLoc.isInvalid() && LParenLoc.isInvalid() && EndLoc.isInvalid();
for (Expr *RefExpr : VarList) {
assert(RefExpr && "NULL expr in OpenMP private clause.");
SourceLocation ELoc;
@@ -16081,9 +17801,17 @@ OMPClause *Sema::ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
*this, VDPrivate, RefExpr->getType().getUnqualifiedType(), ELoc);
DeclRefExpr *Ref = nullptr;
- if (!VD && !CurContext->isDependentContext())
- Ref = buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/false);
- DSAStack->addDSA(D, RefExpr->IgnoreParens(), OMPC_private, Ref);
+ if (!VD && !CurContext->isDependentContext()) {
+ auto *FD = dyn_cast<FieldDecl>(D);
+ VarDecl *VD = FD ? DSAStack->getImplicitFDCapExprDecl(FD) : nullptr;
+ if (VD)
+ Ref = buildDeclRefExpr(*this, VD, VD->getType().getNonReferenceType(),
+ RefExpr->getExprLoc());
+ else
+ Ref = buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/false);
+ }
+ if (!IsImplicitClause)
+ DSAStack->addDSA(D, RefExpr->IgnoreParens(), OMPC_private, Ref);
Vars.push_back((VD || CurContext->isDependentContext())
? RefExpr->IgnoreParens()
: Ref);
@@ -16356,8 +18084,14 @@ OMPClause *Sema::ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
if (TopDVar.CKind == OMPC_lastprivate) {
Ref = TopDVar.PrivateCopy;
} else {
- Ref = buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/true);
- if (!isOpenMPCapturedDecl(D))
+ auto *FD = dyn_cast<FieldDecl>(D);
+ VarDecl *VD = FD ? DSAStack->getImplicitFDCapExprDecl(FD) : nullptr;
+ if (VD)
+ Ref = buildDeclRefExpr(*this, VD, VD->getType().getNonReferenceType(),
+ RefExpr->getExprLoc());
+ else
+ Ref = buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/true);
+ if (VD || !isOpenMPCapturedDecl(D))
ExprCaptures.push_back(Ref->getDecl());
}
}
@@ -16627,7 +18361,7 @@ public:
return true;
DSAStackTy::DSAVarData DVarPrivate = Stack->hasDSA(
VD,
- [](OpenMPClauseKind C, bool AppliedToPointee) {
+ [](OpenMPClauseKind C, bool AppliedToPointee, bool) {
return isOpenMPPrivate(C) && !AppliedToPointee;
},
[](OpenMPDirectiveKind) { return true; },
@@ -18471,10 +20205,12 @@ OMPClause *Sema::ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc,
}
OMPClause *
-Sema::ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind,
- SourceLocation DepLoc, SourceLocation ColonLoc,
- ArrayRef<Expr *> VarList, SourceLocation StartLoc,
- SourceLocation LParenLoc, SourceLocation EndLoc) {
+Sema::ActOnOpenMPDependClause(const OMPDependClause::DependDataTy &Data,
+ Expr *DepModifier, ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc, SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ OpenMPDependClauseKind DepKind = Data.DepKind;
+ SourceLocation DepLoc = Data.DepLoc;
if (DSAStack->getCurrentDirective() == OMPD_ordered &&
DepKind != OMPC_DEPEND_source && DepKind != OMPC_DEPEND_sink) {
Diag(DepLoc, diag::err_omp_unexpected_clause_value)
@@ -18493,11 +20229,13 @@ Sema::ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind,
((LangOpts.OpenMP < 50 ||
DSAStack->getCurrentDirective() == OMPD_depobj) &&
DepKind == OMPC_DEPEND_depobj))) {
- SmallVector<unsigned, 3> Except;
- Except.push_back(OMPC_DEPEND_source);
- Except.push_back(OMPC_DEPEND_sink);
+ SmallVector<unsigned, 6> Except = {OMPC_DEPEND_source, OMPC_DEPEND_sink,
+ OMPC_DEPEND_outallmemory,
+ OMPC_DEPEND_inoutallmemory};
if (LangOpts.OpenMP < 50 || DSAStack->getCurrentDirective() == OMPD_depobj)
Except.push_back(OMPC_DEPEND_depobj);
+ if (LangOpts.OpenMP < 51)
+ Except.push_back(OMPC_DEPEND_inoutset);
std::string Expected = (LangOpts.OpenMP >= 50 && !DepModifier)
? "depend modifier(iterator) or "
: "";
@@ -18668,9 +20406,9 @@ Sema::ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind,
}
// OpenMP 5.0, 2.17.11 depend Clause, Restrictions, C/C++
- // List items used in depend clauses with the in, out, inout or
- // mutexinoutset dependence types cannot be expressions of the
- // omp_depend_t type.
+ // List items used in depend clauses with the in, out, inout,
+ // inoutset, or mutexinoutset dependence types cannot be
+ // expressions of the omp_depend_t type.
if (!RefExpr->isValueDependent() && !RefExpr->isTypeDependent() &&
!RefExpr->isInstantiationDependent() &&
!RefExpr->containsUnexpandedParameterPack() &&
@@ -18719,12 +20457,14 @@ Sema::ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind,
<< 1 << DSAStack->getParentLoopControlVariable(VarList.size() + 1);
}
if (DepKind != OMPC_DEPEND_source && DepKind != OMPC_DEPEND_sink &&
- Vars.empty())
+ DepKind != OMPC_DEPEND_outallmemory &&
+ DepKind != OMPC_DEPEND_inoutallmemory && Vars.empty())
return nullptr;
- auto *C = OMPDependClause::Create(Context, StartLoc, LParenLoc, EndLoc,
- DepModifier, DepKind, DepLoc, ColonLoc,
- Vars, TotalDepCount.getZExtValue());
+ auto *C = OMPDependClause::Create(
+ Context, StartLoc, LParenLoc, EndLoc,
+ {DepKind, DepLoc, Data.ColonLoc, Data.OmpAllMemoryLoc}, DepModifier, Vars,
+ TotalDepCount.getZExtValue());
if ((DepKind == OMPC_DEPEND_sink || DepKind == OMPC_DEPEND_source) &&
DSAStack->isParentOrderedRegion())
DSAStack->addDoacrossDependClause(C, OpsOffs);
@@ -18759,6 +20499,18 @@ OMPClause *Sema::ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier,
if (ErrorFound)
return nullptr;
+ // OpenMP 5.0 [2.12.5, Restrictions]
+ // In case of ancestor device-modifier, a requires directive with
+ // the reverse_offload clause must be specified.
+ if (Modifier == OMPC_DEVICE_ancestor) {
+ if (!DSAStack->hasRequiresDeclWithClause<OMPReverseOffloadClause>()) {
+ targetDiag(
+ StartLoc,
+ diag::err_omp_device_ancestor_without_requires_reverse_offload);
+ ErrorFound = true;
+ }
+ }
+
OpenMPDirectiveKind DKind = DSAStack->getCurrentDirective();
OpenMPDirectiveKind CaptureRegion =
getOpenMPCaptureRegionForClause(DKind, OMPC_device, LangOpts.OpenMP);
@@ -20534,7 +22286,8 @@ OMPClause *Sema::ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
// OpenMP [2.13.2, critical construct, Description]
// ... where hint-expression is an integer constant expression that evaluates
// to a valid lock hint.
- ExprResult HintExpr = VerifyPositiveIntegerConstantInClause(Hint, OMPC_hint);
+ ExprResult HintExpr =
+ VerifyPositiveIntegerConstantInClause(Hint, OMPC_hint, false);
if (HintExpr.isInvalid())
return nullptr;
return new (Context)
@@ -20774,6 +22527,14 @@ void Sema::ActOnFinishedOpenMPDeclareTargetContext(
ActOnOpenMPDeclareTargetName(It.first, It.second.Loc, It.second.MT, DTCI);
}
+void Sema::DiagnoseUnterminatedOpenMPDeclareTarget() {
+ if (DeclareTargetNesting.empty())
+ return;
+ DeclareTargetContextInfo &DTCI = DeclareTargetNesting.back();
+ Diag(DTCI.Loc, diag::warn_omp_unterminated_declare_target)
+ << getOpenMPDirectiveName(DTCI.Kind);
+}
+
NamedDecl *Sema::lookupOpenMPDeclareTargetName(Scope *CurScope,
CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id) {
@@ -20827,7 +22588,7 @@ void Sema::ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
auto *VD = cast<ValueDecl>(ND);
llvm::Optional<OMPDeclareTargetDeclAttr *> ActiveAttr =
OMPDeclareTargetDeclAttr::getActiveAttr(VD);
- if (ActiveAttr.hasValue() && ActiveAttr.getValue()->getDevType() != DTCI.DT &&
+ if (ActiveAttr && ActiveAttr.getValue()->getDevType() != DTCI.DT &&
ActiveAttr.getValue()->getLevel() == Level) {
Diag(Loc, diag::err_omp_device_type_mismatch)
<< OMPDeclareTargetDeclAttr::ConvertDevTypeTyToStr(DTCI.DT)
@@ -20835,18 +22596,18 @@ void Sema::ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc,
ActiveAttr.getValue()->getDevType());
return;
}
- if (ActiveAttr.hasValue() && ActiveAttr.getValue()->getMapType() != MT &&
+ if (ActiveAttr && ActiveAttr.getValue()->getMapType() != MT &&
ActiveAttr.getValue()->getLevel() == Level) {
Diag(Loc, diag::err_omp_declare_target_to_and_link) << ND;
return;
}
- if (ActiveAttr.hasValue() && ActiveAttr.getValue()->getLevel() == Level)
+ if (ActiveAttr && ActiveAttr.getValue()->getLevel() == Level)
return;
Expr *IndirectE = nullptr;
bool IsIndirect = false;
- if (DTCI.Indirect.hasValue()) {
+ if (DTCI.Indirect) {
IndirectE = DTCI.Indirect.getValue();
if (!IndirectE)
IsIndirect = true;
@@ -20884,7 +22645,7 @@ static void checkDeclInTargetContext(SourceLocation SL, SourceRange SR,
return;
}
}
- if (MapTy.hasValue())
+ if (MapTy)
return;
SemaRef.Diag(VD->getLocation(), diag::warn_omp_not_in_target_context);
SemaRef.Diag(SL, diag::note_used_here) << SR;
@@ -20941,12 +22702,12 @@ void Sema::checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
llvm::Optional<OMPDeclareTargetDeclAttr *> ActiveAttr =
OMPDeclareTargetDeclAttr::getActiveAttr(VD);
unsigned Level = DeclareTargetNesting.size();
- if (ActiveAttr.hasValue() && ActiveAttr.getValue()->getLevel() >= Level)
+ if (ActiveAttr && ActiveAttr.getValue()->getLevel() >= Level)
return;
DeclareTargetContextInfo &DTCI = DeclareTargetNesting.back();
Expr *IndirectE = nullptr;
bool IsIndirect = false;
- if (DTCI.Indirect.hasValue()) {
+ if (DTCI.Indirect) {
IndirectE = DTCI.Indirect.getValue();
if (!IndirectE)
IsIndirect = true;
@@ -21260,6 +23021,88 @@ OMPClause *Sema::ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
MVLI.VarComponents);
}
+OMPClause *Sema::ActOnOpenMPHasDeviceAddrClause(ArrayRef<Expr *> VarList,
+ const OMPVarListLocTy &Locs) {
+ MappableVarListInfo MVLI(VarList);
+ for (Expr *RefExpr : VarList) {
+ assert(RefExpr && "NULL expr in OpenMP has_device_addr clause.");
+ SourceLocation ELoc;
+ SourceRange ERange;
+ Expr *SimpleRefExpr = RefExpr;
+ auto Res = getPrivateItem(*this, SimpleRefExpr, ELoc, ERange,
+ /*AllowArraySection=*/true);
+ if (Res.second) {
+ // It will be analyzed later.
+ MVLI.ProcessedVarList.push_back(RefExpr);
+ }
+ ValueDecl *D = Res.first;
+ if (!D)
+ continue;
+
+ // Check if the declaration in the clause does not show up in any data
+ // sharing attribute.
+ DSAStackTy::DSAVarData DVar = DSAStack->getTopDSA(D, /*FromParent=*/false);
+ if (isOpenMPPrivate(DVar.CKind)) {
+ Diag(ELoc, diag::err_omp_variable_in_given_clause_and_dsa)
+ << getOpenMPClauseName(DVar.CKind)
+ << getOpenMPClauseName(OMPC_has_device_addr)
+ << getOpenMPDirectiveName(DSAStack->getCurrentDirective());
+ reportOriginalDsa(*this, DSAStack, D, DVar);
+ continue;
+ }
+
+ const Expr *ConflictExpr;
+ if (DSAStack->checkMappableExprComponentListsForDecl(
+ D, /*CurrentRegionOnly=*/true,
+ [&ConflictExpr](
+ OMPClauseMappableExprCommon::MappableExprComponentListRef R,
+ OpenMPClauseKind) -> bool {
+ ConflictExpr = R.front().getAssociatedExpression();
+ return true;
+ })) {
+ Diag(ELoc, diag::err_omp_map_shared_storage) << RefExpr->getSourceRange();
+ Diag(ConflictExpr->getExprLoc(), diag::note_used_here)
+ << ConflictExpr->getSourceRange();
+ continue;
+ }
+
+ // Store the components in the stack so that they can be used to check
+ // against other clauses later on.
+ OMPClauseMappableExprCommon::MappableComponent MC(
+ SimpleRefExpr, D, /*IsNonContiguous=*/false);
+ DSAStack->addMappableExpressionComponents(
+ D, MC, /*WhereFoundClauseKind=*/OMPC_has_device_addr);
+
+ // Record the expression we've just processed.
+ auto *VD = dyn_cast<VarDecl>(D);
+ if (!VD && !CurContext->isDependentContext()) {
+ DeclRefExpr *Ref =
+ buildCapture(*this, D, SimpleRefExpr, /*WithInit=*/true);
+ assert(Ref && "has_device_addr capture failed");
+ MVLI.ProcessedVarList.push_back(Ref);
+ } else
+ MVLI.ProcessedVarList.push_back(RefExpr->IgnoreParens());
+
+ // Create a mappable component for the list item. List items in this clause
+ // only need a component. We use a null declaration to signal fields in
+ // 'this'.
+ assert((isa<DeclRefExpr>(SimpleRefExpr) ||
+ isa<CXXThisExpr>(cast<MemberExpr>(SimpleRefExpr)->getBase())) &&
+ "Unexpected device pointer expression!");
+ MVLI.VarBaseDeclarations.push_back(
+ isa<DeclRefExpr>(SimpleRefExpr) ? D : nullptr);
+ MVLI.VarComponents.resize(MVLI.VarComponents.size() + 1);
+ MVLI.VarComponents.back().push_back(MC);
+ }
+
+ if (MVLI.ProcessedVarList.empty())
+ return nullptr;
+
+ return OMPHasDeviceAddrClause::Create(Context, Locs, MVLI.ProcessedVarList,
+ MVLI.VarBaseDeclarations,
+ MVLI.VarComponents);
+}
+
OMPClause *Sema::ActOnOpenMPAllocateClause(
Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc) {
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaOverload.cpp b/contrib/llvm-project/clang/lib/Sema/SemaOverload.cpp
index 3fa192cedfa3..c226ed625479 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaOverload.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaOverload.cpp
@@ -49,11 +49,10 @@ static bool functionHasPassObjectSizeParams(const FunctionDecl *FD) {
}
/// A convenience routine for creating a decayed reference to a function.
-static ExprResult
-CreateFunctionRefExpr(Sema &S, FunctionDecl *Fn, NamedDecl *FoundDecl,
- const Expr *Base, bool HadMultipleCandidates,
- SourceLocation Loc = SourceLocation(),
- const DeclarationNameLoc &LocInfo = DeclarationNameLoc()){
+static ExprResult CreateFunctionRefExpr(
+ Sema &S, FunctionDecl *Fn, NamedDecl *FoundDecl, const Expr *Base,
+ bool HadMultipleCandidates, SourceLocation Loc = SourceLocation(),
+ const DeclarationNameLoc &LocInfo = DeclarationNameLoc()) {
if (S.DiagnoseUseOfDecl(FoundDecl, Loc))
return ExprError();
// If FoundDecl is different from Fn (such as if one is a template
@@ -984,8 +983,7 @@ checkPlaceholderForOverload(Sema &S, Expr *&E,
/// checkArgPlaceholdersForOverload - Check a set of call operands for
/// placeholders.
-static bool checkArgPlaceholdersForOverload(Sema &S,
- MultiExprArg Args,
+static bool checkArgPlaceholdersForOverload(Sema &S, MultiExprArg Args,
UnbridgedCastsSet &unbridged) {
for (unsigned i = 0, e = Args.size(); i != e; ++i)
if (checkPlaceholderForOverload(S, Args[i], &unbridged))
@@ -1619,8 +1617,9 @@ bool Sema::IsFunctionConversion(QualType FromType, QualType ToType,
///
/// \param ICK Will be set to the vector conversion kind, if this is a vector
/// conversion.
-static bool IsVectorConversion(Sema &S, QualType FromType,
- QualType ToType, ImplicitConversionKind &ICK) {
+static bool IsVectorConversion(Sema &S, QualType FromType, QualType ToType,
+ ImplicitConversionKind &ICK, Expr *From,
+ bool InOverloadResolution) {
// We need at least one of these types to be a vector type to have a vector
// conversion.
if (!ToType->isVectorType() && !FromType->isVectorType())
@@ -1662,6 +1661,13 @@ static bool IsVectorConversion(Sema &S, QualType FromType,
if (S.Context.areCompatibleVectorTypes(FromType, ToType) ||
(S.isLaxVectorConversion(FromType, ToType) &&
!ToType->hasAttr(attr::ArmMveStrictPolymorphism))) {
+ if (S.isLaxVectorConversion(FromType, ToType) &&
+ S.anyAltivecTypes(FromType, ToType) &&
+ !S.areSameVectorElemTypes(FromType, ToType) &&
+ !InOverloadResolution) {
+ S.Diag(From->getBeginLoc(), diag::warn_deprecated_lax_vec_conv_all)
+ << FromType << ToType;
+ }
ICK = ICK_Vector_Conversion;
return true;
}
@@ -1749,13 +1755,6 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
"Non-address-of operator for overloaded function expression");
FromType = S.Context.getPointerType(FromType);
}
-
- // Check that we've computed the proper type after overload resolution.
- // FIXME: FixOverloadedFunctionReference has side-effects; we shouldn't
- // be calling it from within an NDEBUG block.
- assert(S.Context.hasSameType(
- FromType,
- S.FixOverloadedFunctionReference(From, AccessPair, Fn)->getType()));
} else {
return false;
}
@@ -1917,7 +1916,8 @@ static bool IsStandardConversion(Sema &S, Expr* From, QualType ToType,
InOverloadResolution, FromType)) {
// Pointer to member conversions (4.11).
SCS.Second = ICK_Pointer_Member;
- } else if (IsVectorConversion(S, FromType, ToType, SecondICK)) {
+ } else if (IsVectorConversion(S, FromType, ToType, SecondICK, From,
+ InOverloadResolution)) {
SCS.Second = SecondICK;
FromType = ToType.getUnqualifiedType();
} else if (!S.getLangOpts().CPlusPlus &&
@@ -2954,24 +2954,30 @@ void Sema::HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
}
/// FunctionParamTypesAreEqual - This routine checks two function proto types
-/// for equality of their argument types. Caller has already checked that
-/// they have same number of arguments. If the parameters are different,
+/// for equality of their parameter types. Caller has already checked that
+/// they have same number of parameters. If the parameters are different,
/// ArgPos will have the parameter index of the first different parameter.
+/// If `Reversed` is true, the parameters of `NewType` will be compared in
+/// reverse order. That's useful if one of the functions is being used as a C++20
+/// synthesized operator overload with a reversed parameter order.
bool Sema::FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
- unsigned *ArgPos) {
- for (FunctionProtoType::param_type_iterator O = OldType->param_type_begin(),
- N = NewType->param_type_begin(),
- E = OldType->param_type_end();
- O && (O != E); ++O, ++N) {
+ unsigned *ArgPos, bool Reversed) {
+ assert(OldType->getNumParams() == NewType->getNumParams() &&
+ "Can't compare parameters of functions with different number of "
+ "parameters!");
+ for (size_t I = 0; I < OldType->getNumParams(); I++) {
+ // Reverse iterate over the parameters of `OldType` if `Reversed` is true.
+ size_t J = Reversed ? (OldType->getNumParams() - I - 1) : I;
+
// Ignore address spaces in pointee type. This is to disallow overloading
// on __ptr32/__ptr64 address spaces.
- QualType Old = Context.removePtrSizeAddrSpace(O->getUnqualifiedType());
- QualType New = Context.removePtrSizeAddrSpace(N->getUnqualifiedType());
+ QualType Old = Context.removePtrSizeAddrSpace(OldType->getParamType(I).getUnqualifiedType());
+ QualType New = Context.removePtrSizeAddrSpace(NewType->getParamType(J).getUnqualifiedType());
if (!Context.hasSameType(Old, New)) {
if (ArgPos)
- *ArgPos = O - OldType->param_type_begin();
+ *ArgPos = I;
return false;
}
}
@@ -3197,9 +3203,8 @@ static bool isQualificationConversionStep(QualType FromType, QualType ToType,
Qualifiers FromQuals = FromType.getQualifiers();
Qualifiers ToQuals = ToType.getQualifiers();
- // Ignore __unaligned qualifier if this type is void.
- if (ToType.getUnqualifiedType()->isVoidType())
- FromQuals.removeUnaligned();
+ // Ignore __unaligned qualifier.
+ FromQuals.removeUnaligned();
// Objective-C ARC:
// Check Objective-C lifetime conversions.
@@ -6476,7 +6481,7 @@ void Sema::AddOverloadCandidate(
// (CUDA B.1): Check for invalid calls between targets.
if (getLangOpts().CUDA)
- if (const FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext))
+ if (const FunctionDecl *Caller = getCurFunctionDecl(/*AllowLambda=*/true))
// Skip the check for callers that are implicit members, because in this
// case we may not yet know what the member's target is; the target is
// inferred for the member automatically, based on the bases and fields of
@@ -6986,7 +6991,7 @@ Sema::AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl,
// (CUDA B.1): Check for invalid calls between targets.
if (getLangOpts().CUDA)
- if (const FunctionDecl *Caller = dyn_cast<FunctionDecl>(CurContext))
+ if (const FunctionDecl *Caller = getCurFunctionDecl(/*AllowLambda=*/true))
if (!IsAllowedCUDACall(Caller, Method)) {
Candidate.Viable = false;
Candidate.FailureKind = ovl_fail_bad_target;
@@ -8195,6 +8200,49 @@ static Qualifiers CollectVRQualifiers(ASTContext &Context, Expr* ArgExpr) {
return VRQuals;
}
+// Note: We're currently only handling qualifiers that are meaningful for the
+// LHS of compound assignment overloading.
+static void forAllQualifierCombinationsImpl(
+ QualifiersAndAtomic Available, QualifiersAndAtomic Applied,
+ llvm::function_ref<void(QualifiersAndAtomic)> Callback) {
+ // _Atomic
+ if (Available.hasAtomic()) {
+ Available.removeAtomic();
+ forAllQualifierCombinationsImpl(Available, Applied.withAtomic(), Callback);
+ forAllQualifierCombinationsImpl(Available, Applied, Callback);
+ return;
+ }
+
+ // volatile
+ if (Available.hasVolatile()) {
+ Available.removeVolatile();
+ assert(!Applied.hasVolatile());
+ forAllQualifierCombinationsImpl(Available, Applied.withVolatile(),
+ Callback);
+ forAllQualifierCombinationsImpl(Available, Applied, Callback);
+ return;
+ }
+
+ Callback(Applied);
+}
+
+static void forAllQualifierCombinations(
+ QualifiersAndAtomic Quals,
+ llvm::function_ref<void(QualifiersAndAtomic)> Callback) {
+ return forAllQualifierCombinationsImpl(Quals, QualifiersAndAtomic(),
+ Callback);
+}
+
+static QualType makeQualifiedLValueReferenceType(QualType Base,
+ QualifiersAndAtomic Quals,
+ Sema &S) {
+ if (Quals.hasAtomic())
+ Base = S.Context.getAtomicType(Base);
+ if (Quals.hasVolatile())
+ Base = S.Context.getVolatileType(Base);
+ return S.Context.getLValueReferenceType(Base);
+}
+
namespace {
/// Helper class to manage the addition of builtin operator overload
@@ -8205,7 +8253,7 @@ class BuiltinOperatorOverloadBuilder {
// Common instance state available to all overload candidate addition methods.
Sema &S;
ArrayRef<Expr *> Args;
- Qualifiers VisibleTypeConversionsQuals;
+ QualifiersAndAtomic VisibleTypeConversionsQuals;
bool HasArithmeticOrEnumeralCandidateType;
SmallVectorImpl<BuiltinCandidateTypeSet> &CandidateTypes;
OverloadCandidateSet &CandidateSet;
@@ -8329,7 +8377,7 @@ class BuiltinOperatorOverloadBuilder {
public:
BuiltinOperatorOverloadBuilder(
Sema &S, ArrayRef<Expr *> Args,
- Qualifiers VisibleTypeConversionsQuals,
+ QualifiersAndAtomic VisibleTypeConversionsQuals,
bool HasArithmeticOrEnumeralCandidateType,
SmallVectorImpl<BuiltinCandidateTypeSet> &CandidateTypes,
OverloadCandidateSet &CandidateSet)
@@ -8950,18 +8998,14 @@ public:
ParamTypes[1] = ArithmeticTypes[Right];
auto LeftBaseTy = AdjustAddressSpaceForBuiltinOperandType(
S, ArithmeticTypes[Left], Args[0]);
- // Add this built-in operator as a candidate (VQ is empty).
- ParamTypes[0] = S.Context.getLValueReferenceType(LeftBaseTy);
- S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
- /*IsAssignmentOperator=*/isEqualOp);
- // Add this built-in operator as a candidate (VQ is 'volatile').
- if (VisibleTypeConversionsQuals.hasVolatile()) {
- ParamTypes[0] = S.Context.getVolatileType(LeftBaseTy);
- ParamTypes[0] = S.Context.getLValueReferenceType(ParamTypes[0]);
- S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
- /*IsAssignmentOperator=*/isEqualOp);
- }
+ forAllQualifierCombinations(
+ VisibleTypeConversionsQuals, [&](QualifiersAndAtomic Quals) {
+ ParamTypes[0] =
+ makeQualifiedLValueReferenceType(LeftBaseTy, Quals, S);
+ S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet,
+ /*IsAssignmentOperator=*/isEqualOp);
+ });
}
}
@@ -9008,16 +9052,13 @@ public:
ParamTypes[1] = ArithmeticTypes[Right];
auto LeftBaseTy = AdjustAddressSpaceForBuiltinOperandType(
S, ArithmeticTypes[Left], Args[0]);
- // Add this built-in operator as a candidate (VQ is empty).
- ParamTypes[0] = S.Context.getLValueReferenceType(LeftBaseTy);
- S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet);
- if (VisibleTypeConversionsQuals.hasVolatile()) {
- // Add this built-in operator as a candidate (VQ is 'volatile').
- ParamTypes[0] = LeftBaseTy;
- ParamTypes[0] = S.Context.getVolatileType(ParamTypes[0]);
- ParamTypes[0] = S.Context.getLValueReferenceType(ParamTypes[0]);
- S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet);
- }
+
+ forAllQualifierCombinations(
+ VisibleTypeConversionsQuals, [&](QualifiersAndAtomic Quals) {
+ ParamTypes[0] =
+ makeQualifiedLValueReferenceType(LeftBaseTy, Quals, S);
+ S.AddBuiltinCandidate(ParamTypes, Args, CandidateSet);
+ });
}
}
}
@@ -9181,10 +9222,13 @@ void Sema::AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
// if the operator we're looking at has built-in operator candidates
// that make use of these types. Also record whether we encounter non-record
// candidate types or either arithmetic or enumeral candidate types.
- Qualifiers VisibleTypeConversionsQuals;
+ QualifiersAndAtomic VisibleTypeConversionsQuals;
VisibleTypeConversionsQuals.addConst();
- for (unsigned ArgIdx = 0, N = Args.size(); ArgIdx != N; ++ArgIdx)
+ for (unsigned ArgIdx = 0, N = Args.size(); ArgIdx != N; ++ArgIdx) {
VisibleTypeConversionsQuals += CollectVRQualifiers(Context, Args[ArgIdx]);
+ if (Args[ArgIdx]->getType()->isAtomicType())
+ VisibleTypeConversionsQuals.addAtomic();
+ }
bool HasNonRecordCandidateType = false;
bool HasArithmeticOrEnumeralCandidateType = false;
@@ -9352,7 +9396,8 @@ void Sema::AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
break;
case OO_Subscript:
- OpBuilder.addSubscriptOverloads();
+ if (Args.size() == 2)
+ OpBuilder.addSubscriptOverloads();
break;
case OO_ArrowStar:
@@ -9593,6 +9638,32 @@ static bool haveSameParameterTypes(ASTContext &Context, const FunctionDecl *F1,
return true;
}
+/// We're allowed to use constraints partial ordering only if the candidates
+/// have the same parameter types:
+/// [temp.func.order]p6.2.2 [...] or if the function parameters that
+/// positionally correspond between the two templates are not of the same type,
+/// neither template is more specialized than the other.
+/// [over.match.best]p2.6
+/// F1 and F2 are non-template functions with the same parameter-type-lists,
+/// and F1 is more constrained than F2 [...]
+static bool canCompareFunctionConstraints(Sema &S,
+ const OverloadCandidate &Cand1,
+ const OverloadCandidate &Cand2) {
+ // FIXME: Per P2113R0 we also need to compare the template parameter lists
+ // when comparing template functions.
+ if (Cand1.Function && Cand2.Function && Cand1.Function->hasPrototype() &&
+ Cand2.Function->hasPrototype()) {
+ auto *PT1 = cast<FunctionProtoType>(Cand1.Function->getFunctionType());
+ auto *PT2 = cast<FunctionProtoType>(Cand2.Function->getFunctionType());
+ if (PT1->getNumParams() == PT2->getNumParams() &&
+ PT1->isVariadic() == PT2->isVariadic() &&
+ S.FunctionParamTypesAreEqual(PT1, PT2, nullptr,
+ Cand1.isReversed() ^ Cand2.isReversed()))
+ return true;
+ }
+ return false;
+}
+
/// isBetterOverloadCandidate - Determines whether the first overload
/// candidate is a better candidate than the second (C++ 13.3.3p1).
bool clang::isBetterOverloadCandidate(
@@ -9641,7 +9712,7 @@ bool clang::isBetterOverloadCandidate(
// overloading resolution diagnostics.
if (S.getLangOpts().CUDA && Cand1.Function && Cand2.Function &&
S.getLangOpts().GPUExcludeWrongSideOverloads) {
- if (FunctionDecl *Caller = dyn_cast<FunctionDecl>(S.CurContext)) {
+ if (FunctionDecl *Caller = S.getCurFunctionDecl(/*AllowLambda=*/true)) {
bool IsCallerImplicitHD = Sema::isCUDAImplicitHostDeviceFunction(Caller);
bool IsCand1ImplicitHD =
Sema::isCUDAImplicitHostDeviceFunction(Cand1.Function);
@@ -9824,34 +9895,28 @@ bool clang::isBetterOverloadCandidate(
isa<CXXConversionDecl>(Cand1.Function) ? TPOC_Conversion
: TPOC_Call,
Cand1.ExplicitCallArguments, Cand2.ExplicitCallArguments,
- Cand1.isReversed() ^ Cand2.isReversed()))
+ Cand1.isReversed() ^ Cand2.isReversed(),
+ canCompareFunctionConstraints(S, Cand1, Cand2)))
return BetterTemplate == Cand1.Function->getPrimaryTemplate();
}
// -— F1 and F2 are non-template functions with the same
// parameter-type-lists, and F1 is more constrained than F2 [...],
- if (Cand1.Function && Cand2.Function && !Cand1IsSpecialization &&
- !Cand2IsSpecialization && Cand1.Function->hasPrototype() &&
- Cand2.Function->hasPrototype()) {
- auto *PT1 = cast<FunctionProtoType>(Cand1.Function->getFunctionType());
- auto *PT2 = cast<FunctionProtoType>(Cand2.Function->getFunctionType());
- if (PT1->getNumParams() == PT2->getNumParams() &&
- PT1->isVariadic() == PT2->isVariadic() &&
- S.FunctionParamTypesAreEqual(PT1, PT2)) {
- Expr *RC1 = Cand1.Function->getTrailingRequiresClause();
- Expr *RC2 = Cand2.Function->getTrailingRequiresClause();
- if (RC1 && RC2) {
- bool AtLeastAsConstrained1, AtLeastAsConstrained2;
- if (S.IsAtLeastAsConstrained(Cand1.Function, {RC1}, Cand2.Function,
- {RC2}, AtLeastAsConstrained1) ||
- S.IsAtLeastAsConstrained(Cand2.Function, {RC2}, Cand1.Function,
- {RC1}, AtLeastAsConstrained2))
- return false;
- if (AtLeastAsConstrained1 != AtLeastAsConstrained2)
- return AtLeastAsConstrained1;
- } else if (RC1 || RC2) {
- return RC1 != nullptr;
- }
+ if (!Cand1IsSpecialization && !Cand2IsSpecialization &&
+ canCompareFunctionConstraints(S, Cand1, Cand2)) {
+ Expr *RC1 = Cand1.Function->getTrailingRequiresClause();
+ Expr *RC2 = Cand2.Function->getTrailingRequiresClause();
+ if (RC1 && RC2) {
+ bool AtLeastAsConstrained1, AtLeastAsConstrained2;
+ if (S.IsAtLeastAsConstrained(Cand1.Function, {RC1}, Cand2.Function, {RC2},
+ AtLeastAsConstrained1) ||
+ S.IsAtLeastAsConstrained(Cand2.Function, {RC2}, Cand1.Function, {RC1},
+ AtLeastAsConstrained2))
+ return false;
+ if (AtLeastAsConstrained1 != AtLeastAsConstrained2)
+ return AtLeastAsConstrained1;
+ } else if (RC1 || RC2) {
+ return RC1 != nullptr;
}
}
@@ -9924,7 +9989,7 @@ bool clang::isBetterOverloadCandidate(
// If other rules cannot determine which is better, CUDA preference is used
// to determine which is better.
if (S.getLangOpts().CUDA && Cand1.Function && Cand2.Function) {
- FunctionDecl *Caller = dyn_cast<FunctionDecl>(S.CurContext);
+ FunctionDecl *Caller = S.getCurFunctionDecl(/*AllowLambda=*/true);
return S.IdentifyCUDAPreference(Caller, Cand1.Function) >
S.IdentifyCUDAPreference(Caller, Cand2.Function);
}
@@ -10045,7 +10110,7 @@ OverloadCandidateSet::BestViableFunction(Sema &S, SourceLocation Loc,
// -fgpu-exclude-wrong-side-overloads is on, all candidates are compared
// uniformly in isBetterOverloadCandidate.
if (S.getLangOpts().CUDA && !S.getLangOpts().GPUExcludeWrongSideOverloads) {
- const FunctionDecl *Caller = dyn_cast<FunctionDecl>(S.CurContext);
+ const FunctionDecl *Caller = S.getCurFunctionDecl(/*AllowLambda=*/true);
bool ContainsSameSideCandidate =
llvm::any_of(Candidates, [&](OverloadCandidate *Cand) {
// Check viable function only.
@@ -10258,10 +10323,19 @@ static bool checkAddressOfFunctionIsAvailable(Sema &S, const FunctionDecl *FD,
return false;
if (!Satisfaction.IsSatisfied) {
if (Complain) {
- if (InOverloadResolution)
+ if (InOverloadResolution) {
+ SmallString<128> TemplateArgString;
+ if (FunctionTemplateDecl *FunTmpl = FD->getPrimaryTemplate()) {
+ TemplateArgString += " ";
+ TemplateArgString += S.getTemplateArgumentBindingsText(
+ FunTmpl->getTemplateParameters(),
+ *FD->getTemplateSpecializationArgs());
+ }
+
S.Diag(FD->getBeginLoc(),
- diag::note_ovl_candidate_unsatisfied_constraints);
- else
+ diag::note_ovl_candidate_unsatisfied_constraints)
+ << TemplateArgString;
+ } else
S.Diag(Loc, diag::err_addrof_function_constraints_not_satisfied)
<< FD;
S.DiagnoseUnsatisfiedConstraint(Satisfaction);
@@ -11070,7 +11144,7 @@ static void DiagnoseBadDeduction(Sema &S, OverloadCandidate *Cand,
/// CUDA: diagnose an invalid call across targets.
static void DiagnoseBadTarget(Sema &S, OverloadCandidate *Cand) {
- FunctionDecl *Caller = cast<FunctionDecl>(S.CurContext);
+ FunctionDecl *Caller = S.getCurFunctionDecl(/*AllowLambda=*/true);
FunctionDecl *Callee = Cand->Function;
Sema::CUDAFunctionTarget CallerTarget = S.IdentifyCUDATarget(Caller),
@@ -11192,6 +11266,13 @@ static void NoteFunctionCandidate(Sema &S, OverloadCandidate *Cand,
if (shouldSkipNotingLambdaConversionDecl(Fn))
return;
+ // There is no physical candidate declaration to point to for OpenCL builtins.
+ // Except for failed conversions, the notes are identical for each candidate,
+ // so do not generate such notes.
+ if (S.getLangOpts().OpenCL && Fn->isImplicit() &&
+ Cand->FailureKind != ovl_fail_bad_conversion)
+ return;
+
// Note deleted candidates, but only if they're viable.
if (Cand->Viable) {
if (Fn->isDeleted()) {
@@ -11617,7 +11698,9 @@ CompleteNonViableCandidate(Sema &S, OverloadCandidate *Cand,
// Conversion 0 is 'this', which doesn't have a corresponding parameter.
ConvIdx = 1;
if (CSK == OverloadCandidateSet::CSK_Operator &&
- Cand->Function->getDeclName().getCXXOverloadedOperator() != OO_Call)
+ Cand->Function->getDeclName().getCXXOverloadedOperator() != OO_Call &&
+ Cand->Function->getDeclName().getCXXOverloadedOperator() !=
+ OO_Subscript)
// Argument 0 is 'this', which doesn't have a corresponding parameter.
ArgIdx = 1;
}
@@ -12127,7 +12210,7 @@ private:
if (FunctionDecl *FunDecl = dyn_cast<FunctionDecl>(Fn)) {
if (S.getLangOpts().CUDA)
- if (FunctionDecl *Caller = dyn_cast<FunctionDecl>(S.CurContext))
+ if (FunctionDecl *Caller = S.getCurFunctionDecl(/*AllowLambda=*/true))
if (!Caller->isImplicit() && !S.IsAllowedCUDACall(Caller, FunDecl))
return false;
if (FunDecl->isMultiVersion()) {
@@ -12244,7 +12327,8 @@ private:
}
void EliminateSuboptimalCudaMatches() {
- S.EraseUnwantedCUDAMatches(dyn_cast<FunctionDecl>(S.CurContext), Matches);
+ S.EraseUnwantedCUDAMatches(S.getCurFunctionDecl(/*AllowLambda=*/true),
+ Matches);
}
public:
@@ -12448,7 +12532,7 @@ Sema::resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &Pair) {
// We skipped over some ambiguous declarations which might be ambiguous with
// the selected result.
for (FunctionDecl *Skipped : AmbiguousDecls)
- if (!CheckMoreConstrained(Skipped, Result).hasValue())
+ if (!CheckMoreConstrained(Skipped, Result))
return nullptr;
Pair = DAP;
}
@@ -13119,7 +13203,7 @@ static QualType chooseRecoveryType(OverloadCandidateSet &CS,
if (!Result)
return QualType();
- auto Value = Result.getValue();
+ auto Value = *Result;
if (Value.isNull() || Value->isUndeducedType())
return QualType();
return Value;
@@ -14055,17 +14139,65 @@ ExprResult Sema::BuildSynthesizedThreeWayComparison(
return PseudoObjectExpr::Create(Context, SyntacticForm, SemanticForm, 2);
}
-ExprResult
-Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
- SourceLocation RLoc,
- Expr *Base, Expr *Idx) {
- Expr *Args[2] = { Base, Idx };
+static bool PrepareArgumentsForCallToObjectOfClassType(
+ Sema &S, SmallVectorImpl<Expr *> &MethodArgs, CXXMethodDecl *Method,
+ MultiExprArg Args, SourceLocation LParenLoc) {
+
+ const auto *Proto = Method->getType()->castAs<FunctionProtoType>();
+ unsigned NumParams = Proto->getNumParams();
+ unsigned NumArgsSlots =
+ MethodArgs.size() + std::max<unsigned>(Args.size(), NumParams);
+ // Build the full argument list for the method call (the implicit object
+ // parameter is placed at the beginning of the list).
+ MethodArgs.reserve(MethodArgs.size() + NumArgsSlots);
+ bool IsError = false;
+ // Initialize the implicit object parameter.
+ // Check the argument types.
+ for (unsigned i = 0; i != NumParams; i++) {
+ Expr *Arg;
+ if (i < Args.size()) {
+ Arg = Args[i];
+ ExprResult InputInit =
+ S.PerformCopyInitialization(InitializedEntity::InitializeParameter(
+ S.Context, Method->getParamDecl(i)),
+ SourceLocation(), Arg);
+ IsError |= InputInit.isInvalid();
+ Arg = InputInit.getAs<Expr>();
+ } else {
+ ExprResult DefArg =
+ S.BuildCXXDefaultArgExpr(LParenLoc, Method, Method->getParamDecl(i));
+ if (DefArg.isInvalid()) {
+ IsError = true;
+ break;
+ }
+ Arg = DefArg.getAs<Expr>();
+ }
+
+ MethodArgs.push_back(Arg);
+ }
+ return IsError;
+}
+
+ExprResult Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
+ SourceLocation RLoc,
+ Expr *Base,
+ MultiExprArg ArgExpr) {
+ SmallVector<Expr *, 2> Args;
+ Args.push_back(Base);
+ for (auto e : ArgExpr) {
+ Args.push_back(e);
+ }
DeclarationName OpName =
Context.DeclarationNames.getCXXOperatorName(OO_Subscript);
+ SourceRange Range = ArgExpr.empty()
+ ? SourceRange{}
+ : SourceRange(ArgExpr.front()->getBeginLoc(),
+ ArgExpr.back()->getEndLoc());
+
// If either side is type-dependent, create an appropriate dependent
// expression.
- if (Args[0]->isTypeDependent() || Args[1]->isTypeDependent()) {
+ if (Expr::hasAnyTypeDependentArguments(Args)) {
CXXRecordDecl *NamingClass = nullptr; // lookup ignores member operators
// CHECKME: no 'operator' keyword?
@@ -14082,12 +14214,11 @@ Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
CurFPFeatureOverrides());
}
- // Handle placeholders on both operands.
- if (checkPlaceholderForOverload(*this, Args[0]))
- return ExprError();
- if (checkPlaceholderForOverload(*this, Args[1]))
+ // Handle placeholders
+ UnbridgedCastsSet UnbridgedCasts;
+ if (checkArgPlaceholdersForOverload(*this, Args, UnbridgedCasts)) {
return ExprError();
-
+ }
// Build an empty overload set.
OverloadCandidateSet CandidateSet(LLoc, OverloadCandidateSet::CSK_Operator);
@@ -14097,7 +14228,8 @@ Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
AddMemberOperatorCandidates(OO_Subscript, LLoc, Args, CandidateSet);
// Add builtin operator candidates.
- AddBuiltinOperatorCandidates(OO_Subscript, LLoc, Args, CandidateSet);
+ if (Args.size() == 2)
+ AddBuiltinOperatorCandidates(OO_Subscript, LLoc, Args, CandidateSet);
bool HadMultipleCandidates = (CandidateSet.size() > 1);
@@ -14112,38 +14244,28 @@ Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
// We matched an overloaded operator. Build a call to that
// operator.
- CheckMemberOperatorAccess(LLoc, Args[0], Args[1], Best->FoundDecl);
+ CheckMemberOperatorAccess(LLoc, Args[0], ArgExpr, Best->FoundDecl);
// Convert the arguments.
CXXMethodDecl *Method = cast<CXXMethodDecl>(FnDecl);
- ExprResult Arg0 =
- PerformObjectArgumentInitialization(Args[0], /*Qualifier=*/nullptr,
- Best->FoundDecl, Method);
+ SmallVector<Expr *, 2> MethodArgs;
+ ExprResult Arg0 = PerformObjectArgumentInitialization(
+ Args[0], /*Qualifier=*/nullptr, Best->FoundDecl, Method);
if (Arg0.isInvalid())
return ExprError();
- Args[0] = Arg0.get();
- // Convert the arguments.
- ExprResult InputInit
- = PerformCopyInitialization(InitializedEntity::InitializeParameter(
- Context,
- FnDecl->getParamDecl(0)),
- SourceLocation(),
- Args[1]);
- if (InputInit.isInvalid())
+ MethodArgs.push_back(Arg0.get());
+ bool IsError = PrepareArgumentsForCallToObjectOfClassType(
+ *this, MethodArgs, Method, ArgExpr, LLoc);
+ if (IsError)
return ExprError();
- Args[1] = InputInit.getAs<Expr>();
-
// Build the actual expression node.
DeclarationNameInfo OpLocInfo(OpName, LLoc);
OpLocInfo.setCXXOperatorNameRange(SourceRange(LLoc, RLoc));
- ExprResult FnExpr = CreateFunctionRefExpr(*this, FnDecl,
- Best->FoundDecl,
- Base,
- HadMultipleCandidates,
- OpLocInfo.getLoc(),
- OpLocInfo.getInfo());
+ ExprResult FnExpr = CreateFunctionRefExpr(
+ *this, FnDecl, Best->FoundDecl, Base, HadMultipleCandidates,
+ OpLocInfo.getLoc(), OpLocInfo.getInfo());
if (FnExpr.isInvalid())
return ExprError();
@@ -14153,7 +14275,7 @@ Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
ResultTy = ResultTy.getNonLValueExprType(Context);
CXXOperatorCallExpr *TheCall = CXXOperatorCallExpr::Create(
- Context, OO_Subscript, FnExpr.get(), Args, ResultTy, VK, RLoc,
+ Context, OO_Subscript, FnExpr.get(), MethodArgs, ResultTy, VK, RLoc,
CurFPFeatureOverrides());
if (CheckCallReturnType(FnDecl->getReturnType(), LLoc, TheCall, FnDecl))
return ExprError();
@@ -14187,33 +14309,41 @@ Sema::CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
}
case OR_No_Viable_Function: {
- PartialDiagnostic PD = CandidateSet.empty()
- ? (PDiag(diag::err_ovl_no_oper)
- << Args[0]->getType() << /*subscript*/ 0
- << Args[0]->getSourceRange() << Args[1]->getSourceRange())
- : (PDiag(diag::err_ovl_no_viable_subscript)
- << Args[0]->getType() << Args[0]->getSourceRange()
- << Args[1]->getSourceRange());
+ PartialDiagnostic PD =
+ CandidateSet.empty()
+ ? (PDiag(diag::err_ovl_no_oper)
+ << Args[0]->getType() << /*subscript*/ 0
+ << Args[0]->getSourceRange() << Range)
+ : (PDiag(diag::err_ovl_no_viable_subscript)
+ << Args[0]->getType() << Args[0]->getSourceRange() << Range);
CandidateSet.NoteCandidates(PartialDiagnosticAt(LLoc, PD), *this,
- OCD_AllCandidates, Args, "[]", LLoc);
+ OCD_AllCandidates, ArgExpr, "[]", LLoc);
return ExprError();
}
case OR_Ambiguous:
- CandidateSet.NoteCandidates(
- PartialDiagnosticAt(LLoc, PDiag(diag::err_ovl_ambiguous_oper_binary)
- << "[]" << Args[0]->getType()
- << Args[1]->getType()
- << Args[0]->getSourceRange()
- << Args[1]->getSourceRange()),
- *this, OCD_AmbiguousCandidates, Args, "[]", LLoc);
+ if (Args.size() == 2) {
+ CandidateSet.NoteCandidates(
+ PartialDiagnosticAt(
+ LLoc, PDiag(diag::err_ovl_ambiguous_oper_binary)
+ << "[]" << Args[0]->getType() << Args[1]->getType()
+ << Args[0]->getSourceRange() << Range),
+ *this, OCD_AmbiguousCandidates, Args, "[]", LLoc);
+ } else {
+ CandidateSet.NoteCandidates(
+ PartialDiagnosticAt(LLoc,
+ PDiag(diag::err_ovl_ambiguous_subscript_call)
+ << Args[0]->getType()
+ << Args[0]->getSourceRange() << Range),
+ *this, OCD_AmbiguousCandidates, Args, "[]", LLoc);
+ }
return ExprError();
case OR_Deleted:
CandidateSet.NoteCandidates(
PartialDiagnosticAt(LLoc, PDiag(diag::err_ovl_deleted_oper)
<< "[]" << Args[0]->getSourceRange()
- << Args[1]->getSourceRange()),
+ << Range),
*this, OCD_AllCandidates, Args, "[]", LLoc);
return ExprError();
}
@@ -14298,7 +14428,7 @@ ExprResult Sema::BuildCallToMemberFunction(Scope *S, Expr *MemExprE,
if (!AllowRecovery)
return ExprError();
std::vector<Expr *> SubExprs = {MemExprE};
- llvm::for_each(Args, [&SubExprs](Expr *E) { SubExprs.push_back(E); });
+ llvm::append_range(SubExprs, Args);
return CreateRecoveryExpr(MemExprE->getBeginLoc(), RParenLoc, SubExprs,
Type);
};
@@ -14717,14 +14847,8 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
if (NewFn.isInvalid())
return true;
- // The number of argument slots to allocate in the call. If we have default
- // arguments we need to allocate space for them as well. We additionally
- // need one more slot for the object parameter.
- unsigned NumArgsSlots = 1 + std::max<unsigned>(Args.size(), NumParams);
-
- // Build the full argument list for the method call (the implicit object
- // parameter is placed at the beginning of the list).
- SmallVector<Expr *, 8> MethodArgs(NumArgsSlots);
+ SmallVector<Expr *, 8> MethodArgs;
+ MethodArgs.reserve(NumParams + 1);
bool IsError = false;
@@ -14736,37 +14860,10 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
IsError = true;
else
Object = ObjRes;
- MethodArgs[0] = Object.get();
-
- // Check the argument types.
- for (unsigned i = 0; i != NumParams; i++) {
- Expr *Arg;
- if (i < Args.size()) {
- Arg = Args[i];
-
- // Pass the argument.
+ MethodArgs.push_back(Object.get());
- ExprResult InputInit
- = PerformCopyInitialization(InitializedEntity::InitializeParameter(
- Context,
- Method->getParamDecl(i)),
- SourceLocation(), Arg);
-
- IsError |= InputInit.isInvalid();
- Arg = InputInit.getAs<Expr>();
- } else {
- ExprResult DefArg
- = BuildCXXDefaultArgExpr(LParenLoc, Method, Method->getParamDecl(i));
- if (DefArg.isInvalid()) {
- IsError = true;
- break;
- }
-
- Arg = DefArg.getAs<Expr>();
- }
-
- MethodArgs[i + 1] = Arg;
- }
+ IsError |= PrepareArgumentsForCallToObjectOfClassType(
+ *this, MethodArgs, Method, Args, LParenLoc);
// If this is a variadic call, handle args passed through "...".
if (Proto->isVariadic()) {
@@ -14775,7 +14872,7 @@ Sema::BuildCallToObjectOfClassType(Scope *S, Expr *Obj,
ExprResult Arg = DefaultVariadicArgumentPromotion(Args[i], VariadicMethod,
nullptr);
IsError |= Arg.isInvalid();
- MethodArgs[i + 1] = Arg.get();
+ MethodArgs.push_back(Arg.get());
}
}
@@ -15165,10 +15262,9 @@ Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
if (SubExpr == UnOp->getSubExpr())
return UnOp;
- return UnaryOperator::Create(
- Context, SubExpr, UO_AddrOf, Context.getPointerType(SubExpr->getType()),
- VK_PRValue, OK_Ordinary, UnOp->getOperatorLoc(), false,
- CurFPFeatureOverrides());
+ // FIXME: This can't currently fail, but in principle it could.
+ return CreateBuiltinUnaryOp(UnOp->getOperatorLoc(), UO_AddrOf, SubExpr)
+ .get();
}
if (UnresolvedLookupExpr *ULE = dyn_cast<UnresolvedLookupExpr>(E)) {
@@ -15179,10 +15275,20 @@ Expr *Sema::FixOverloadedFunctionReference(Expr *E, DeclAccessPair Found,
TemplateArgs = &TemplateArgsBuffer;
}
- DeclRefExpr *DRE =
- BuildDeclRefExpr(Fn, Fn->getType(), VK_LValue, ULE->getNameInfo(),
- ULE->getQualifierLoc(), Found.getDecl(),
- ULE->getTemplateKeywordLoc(), TemplateArgs);
+ QualType Type = Fn->getType();
+ ExprValueKind ValueKind = getLangOpts().CPlusPlus ? VK_LValue : VK_PRValue;
+
+ // FIXME: Duplicated from BuildDeclarationNameExpr.
+ if (unsigned BID = Fn->getBuiltinID()) {
+ if (!Context.BuiltinInfo.isDirectlyAddressable(BID)) {
+ Type = Context.BuiltinFnTy;
+ ValueKind = VK_PRValue;
+ }
+ }
+
+ DeclRefExpr *DRE = BuildDeclRefExpr(
+ Fn, Type, ValueKind, ULE->getNameInfo(), ULE->getQualifierLoc(),
+ Found.getDecl(), ULE->getTemplateKeywordLoc(), TemplateArgs);
DRE->setHadMultipleCandidates(ULE->getNumDecls() > 1);
return DRE;
}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaStmt.cpp b/contrib/llvm-project/clang/lib/Sema/SemaStmt.cpp
index 746eb82a5bdc..f25694ce48c9 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaStmt.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaStmt.cpp
@@ -341,7 +341,7 @@ void Sema::DiagnoseUnusedExprResult(const Stmt *S, unsigned DiagID) {
return DiagnoseUnusedExprResult(POE->getSemanticExpr(0), DiagID);
if (isa<ObjCSubscriptRefExpr>(Source))
DiagID = diag::warn_unused_container_subscript_expr;
- else
+ else if (isa<ObjCPropertyRefExpr>(Source))
DiagID = diag::warn_unused_property_expr;
} else if (const CXXFunctionalCastExpr *FC
= dyn_cast<CXXFunctionalCastExpr>(E)) {
@@ -442,7 +442,16 @@ StmtResult Sema::ActOnCompoundStmt(SourceLocation L, SourceLocation R,
DiagnoseEmptyLoopBody(Elts[i], Elts[i + 1]);
}
- return CompoundStmt::Create(Context, Elts, L, R);
+ // Calculate difference between FP options in this compound statement and in
+ // the enclosing one. If this is a function body, take the difference against
+ // default options. In this case the difference will indicate options that are
+ // changed upon entry to the statement.
+ FPOptions FPO = (getCurFunction()->CompoundScopes.size() == 1)
+ ? FPOptions(getLangOpts())
+ : getCurCompoundScope().InitialFPFeatures;
+ FPOptionsOverride FPDiff = getCurFPFeatures().getChangesFrom(FPO);
+
+ return CompoundStmt::Create(Context, Elts, FPDiff, L, R);
}
ExprResult
@@ -587,7 +596,7 @@ StmtResult Sema::BuildAttributedStmt(SourceLocation AttrsLoc,
return AttributedStmt::Create(Context, AttrsLoc, Attrs, SubStmt);
}
-StmtResult Sema::ActOnAttributedStmt(const ParsedAttributesWithRange &Attrs,
+StmtResult Sema::ActOnAttributedStmt(const ParsedAttributes &Attrs,
Stmt *SubStmt) {
SmallVector<const Attr *, 1> SemanticAttrs;
ProcessStmtAttributes(SubStmt, Attrs, SemanticAttrs);
@@ -888,8 +897,7 @@ StmtResult Sema::ActOnIfStmt(SourceLocation IfLoc,
CommaVisitor(*this).Visit(CondExpr);
if (!ConstevalOrNegatedConsteval && !elseStmt)
- DiagnoseEmptyStmtBody(CondExpr->getEndLoc(), thenStmt,
- diag::warn_empty_if_body);
+ DiagnoseEmptyStmtBody(RParenLoc, thenStmt, diag::warn_empty_if_body);
if (ConstevalOrNegatedConsteval ||
StatementKind == IfStatementKind::Constexpr) {
@@ -3312,7 +3320,7 @@ Sema::ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope) {
// C99 6.8.6.2p1: A break shall appear only in or as a loop body.
return StmtError(Diag(ContinueLoc, diag::err_continue_not_in_loop));
}
- if (S->getFlags() & Scope::ConditionVarScope) {
+ if (S->isConditionVarScope()) {
// We cannot 'continue;' from within a statement expression in the
// initializer of a condition variable because we would jump past the
// initialization of that variable.
@@ -3762,8 +3770,8 @@ TypeLoc Sema::getReturnTypeLoc(FunctionDecl *FD) const {
bool Sema::DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr,
- AutoType *AT) {
- // If this is the conversion function for a lambda, we choose to deduce it
+ const AutoType *AT) {
+ // If this is the conversion function for a lambda, we choose to deduce its
// type from the corresponding call operator, not from the synthesized return
// statement within it. See Sema::DeduceReturnType.
if (isLambdaConversionOperator(FD))
@@ -3808,19 +3816,26 @@ bool Sema::DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
LocalTypedefNameReferencer Referencer(*this);
Referencer.TraverseType(RetExpr->getType());
} else {
- // In the case of a return with no operand, the initializer is considered
- // to be void().
- //
- // Deduction here can only succeed if the return type is exactly 'cv auto'
- // or 'decltype(auto)', so just check for that case directly.
+ // For a function with a deduced result type to return void,
+ // the result type as written must be 'auto' or 'decltype(auto)',
+ // possibly cv-qualified or constrained, but not ref-qualified.
if (!OrigResultType.getType()->getAs<AutoType>()) {
Diag(ReturnLoc, diag::err_auto_fn_return_void_but_not_auto)
<< OrigResultType.getType();
return true;
}
- // We always deduce U = void in this case.
- Deduced = SubstAutoType(OrigResultType.getType(), Context.VoidTy);
- if (Deduced.isNull())
+ // In the case of a return with no operand, the initializer is considered
+ // to be 'void()'.
+ Expr *Dummy = new (Context) CXXScalarValueInitExpr(
+ Context.VoidTy,
+ Context.getTrivialTypeSourceInfo(Context.VoidTy, ReturnLoc), ReturnLoc);
+ DeduceAutoResult DAR = DeduceAutoType(OrigResultType, Dummy, Deduced);
+
+ if (DAR == DAR_Failed && !FD->isInvalidDecl())
+ Diag(ReturnLoc, diag::err_auto_fn_deduction_failure)
+ << OrigResultType.getType() << Dummy->getType();
+
+ if (DAR != DAR_Succeeded)
return true;
}
@@ -4098,7 +4113,9 @@ StmtResult Sema::BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
} else if (!RetValExp && !HasDependentReturnType) {
FunctionDecl *FD = getCurFunctionDecl();
- if (getLangOpts().CPlusPlus11 && FD && FD->isConstexpr()) {
+ if ((FD && FD->isInvalidDecl()) || FnRetType->containsErrors()) {
+ // The intended return type might have been "void", so don't warn.
+ } else if (getLangOpts().CPlusPlus11 && FD && FD->isConstexpr()) {
// C++11 [stmt.return]p2
Diag(ReturnLoc, diag::err_constexpr_return_missing_expr)
<< FD << FD->isConsteval();
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaStmtAsm.cpp b/contrib/llvm-project/clang/lib/Sema/SemaStmtAsm.cpp
index 49f7a8d573d5..8dfcf9899e3f 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaStmtAsm.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaStmtAsm.cpp
@@ -254,7 +254,7 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
// The parser verifies that there is a string literal here.
- assert(AsmString->isAscii());
+ assert(AsmString->isOrdinary());
FunctionDecl *FD = dyn_cast<FunctionDecl>(getCurLexicalContext());
llvm::StringMap<bool> FeatureMap;
@@ -262,7 +262,7 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
for (unsigned i = 0; i != NumOutputs; i++) {
StringLiteral *Literal = Constraints[i];
- assert(Literal->isAscii());
+ assert(Literal->isOrdinary());
StringRef OutputName;
if (Names[i])
@@ -353,7 +353,7 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
for (unsigned i = NumOutputs, e = NumOutputs + NumInputs; i != e; i++) {
StringLiteral *Literal = Constraints[i];
- assert(Literal->isAscii());
+ assert(Literal->isOrdinary());
StringRef InputName;
if (Names[i])
@@ -459,7 +459,7 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
// Check that the clobbers are valid.
for (unsigned i = 0; i != NumClobbers; i++) {
StringLiteral *Literal = Clobbers[i];
- assert(Literal->isAscii());
+ assert(Literal->isOrdinary());
StringRef Clobber = Literal->getString();
@@ -667,8 +667,17 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
// output was a register, just extend the shorter one to the size of the
// larger one.
if (!SmallerValueMentioned && InputDomain != AD_Other &&
- OutputConstraintInfos[TiedTo].allowsRegister())
+ OutputConstraintInfos[TiedTo].allowsRegister()) {
+ // FIXME: GCC supports the OutSize to be 128 at maximum. Currently codegen
+ // crash when the size larger than the register size. So we limit it here.
+ if (OutTy->isStructureType() &&
+ Context.getIntTypeForBitwidth(OutSize, /*Signed*/ false).isNull()) {
+ targetDiag(OutputExpr->getExprLoc(), diag::err_store_value_to_reg);
+ return NS;
+ }
+
continue;
+ }
// Either both of the operands were mentioned or the smaller one was
// mentioned. One more special case that we'll allow: if the tied input is
@@ -707,10 +716,7 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
NamedOperandList.emplace_back(
std::make_pair(Names[i]->getName(), Exprs[i]));
// Sort NamedOperandList.
- std::stable_sort(NamedOperandList.begin(), NamedOperandList.end(),
- [](const NamedOperand &LHS, const NamedOperand &RHS) {
- return LHS.first < RHS.first;
- });
+ llvm::stable_sort(NamedOperandList, llvm::less_first());
// Find adjacent duplicate operand.
SmallVector<NamedOperand, 4>::iterator Found =
std::adjacent_find(begin(NamedOperandList), end(NamedOperandList),
@@ -727,6 +733,9 @@ StmtResult Sema::ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
}
if (NS->isAsmGoto())
setFunctionHasBranchIntoScope();
+
+ CleanupVarDeclMarking();
+ DiscardCleanupsInEvaluationContext();
return NS;
}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaStmtAttr.cpp b/contrib/llvm-project/clang/lib/Sema/SemaStmtAttr.cpp
index 4f2977f89ce1..b03c055a4f50 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaStmtAttr.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaStmtAttr.cpp
@@ -175,17 +175,22 @@ static Attr *handleLoopHintAttr(Sema &S, Stmt *St, const ParsedAttr &A,
namespace {
class CallExprFinder : public ConstEvaluatedExprVisitor<CallExprFinder> {
- bool FoundCallExpr = false;
+ bool FoundAsmStmt = false;
+ std::vector<const CallExpr *> CallExprs;
public:
typedef ConstEvaluatedExprVisitor<CallExprFinder> Inherited;
CallExprFinder(Sema &S, const Stmt *St) : Inherited(S.Context) { Visit(St); }
- bool foundCallExpr() { return FoundCallExpr; }
+ bool foundCallExpr() { return !CallExprs.empty(); }
+ const std::vector<const CallExpr *> &getCallExprs() { return CallExprs; }
- void VisitCallExpr(const CallExpr *E) { FoundCallExpr = true; }
- void VisitAsmStmt(const AsmStmt *S) { FoundCallExpr = true; }
+ bool foundAsmStmt() { return FoundAsmStmt; }
+
+ void VisitCallExpr(const CallExpr *E) { CallExprs.push_back(E); }
+
+ void VisitAsmStmt(const AsmStmt *S) { FoundAsmStmt = true; }
void Visit(const Stmt *St) {
if (!St)
@@ -200,15 +205,67 @@ static Attr *handleNoMergeAttr(Sema &S, Stmt *St, const ParsedAttr &A,
NoMergeAttr NMA(S.Context, A);
CallExprFinder CEF(S, St);
- if (!CEF.foundCallExpr()) {
- S.Diag(St->getBeginLoc(), diag::warn_nomerge_attribute_ignored_in_stmt)
- << NMA.getSpelling();
+ if (!CEF.foundCallExpr() && !CEF.foundAsmStmt()) {
+ S.Diag(St->getBeginLoc(), diag::warn_attribute_ignored_no_calls_in_stmt)
+ << A;
return nullptr;
}
return ::new (S.Context) NoMergeAttr(S.Context, A);
}
+static Attr *handleNoInlineAttr(Sema &S, Stmt *St, const ParsedAttr &A,
+ SourceRange Range) {
+ NoInlineAttr NIA(S.Context, A);
+ if (!NIA.isClangNoInline()) {
+ S.Diag(St->getBeginLoc(), diag::warn_function_attribute_ignored_in_stmt)
+ << "[[clang::noinline]]";
+ return nullptr;
+ }
+
+ CallExprFinder CEF(S, St);
+ if (!CEF.foundCallExpr()) {
+ S.Diag(St->getBeginLoc(), diag::warn_attribute_ignored_no_calls_in_stmt)
+ << A;
+ return nullptr;
+ }
+
+ for (const auto *CallExpr : CEF.getCallExprs()) {
+ const Decl *Decl = CallExpr->getCalleeDecl();
+ if (Decl->hasAttr<AlwaysInlineAttr>() || Decl->hasAttr<FlattenAttr>())
+ S.Diag(St->getBeginLoc(), diag::warn_function_stmt_attribute_precedence)
+ << A << (Decl->hasAttr<AlwaysInlineAttr>() ? 0 : 1);
+ }
+
+ return ::new (S.Context) NoInlineAttr(S.Context, A);
+}
+
+static Attr *handleAlwaysInlineAttr(Sema &S, Stmt *St, const ParsedAttr &A,
+ SourceRange Range) {
+ AlwaysInlineAttr AIA(S.Context, A);
+ if (!AIA.isClangAlwaysInline()) {
+ S.Diag(St->getBeginLoc(), diag::warn_function_attribute_ignored_in_stmt)
+ << "[[clang::always_inline]]";
+ return nullptr;
+ }
+
+ CallExprFinder CEF(S, St);
+ if (!CEF.foundCallExpr()) {
+ S.Diag(St->getBeginLoc(), diag::warn_attribute_ignored_no_calls_in_stmt)
+ << A;
+ return nullptr;
+ }
+
+ for (const auto *CallExpr : CEF.getCallExprs()) {
+ const Decl *Decl = CallExpr->getCalleeDecl();
+ if (Decl->hasAttr<NoInlineAttr>() || Decl->hasAttr<FlattenAttr>())
+ S.Diag(St->getBeginLoc(), diag::warn_function_stmt_attribute_precedence)
+ << A << (Decl->hasAttr<NoInlineAttr>() ? 2 : 1);
+ }
+
+ return ::new (S.Context) AlwaysInlineAttr(S.Context, A);
+}
+
static Attr *handleMustTailAttr(Sema &S, Stmt *St, const ParsedAttr &A,
SourceRange Range) {
// Validation is in Sema::ActOnAttributedStmt().
@@ -408,6 +465,8 @@ static Attr *ProcessStmtAttribute(Sema &S, Stmt *St, const ParsedAttr &A,
return nullptr;
switch (A.getKind()) {
+ case ParsedAttr::AT_AlwaysInline:
+ return handleAlwaysInlineAttr(S, St, A, Range);
case ParsedAttr::AT_FallThrough:
return handleFallThroughAttr(S, St, A, Range);
case ParsedAttr::AT_LoopHint:
@@ -418,6 +477,8 @@ static Attr *ProcessStmtAttribute(Sema &S, Stmt *St, const ParsedAttr &A,
return handleSuppressAttr(S, St, A, Range);
case ParsedAttr::AT_NoMerge:
return handleNoMergeAttr(S, St, A, Range);
+ case ParsedAttr::AT_NoInline:
+ return handleNoInlineAttr(S, St, A, Range);
case ParsedAttr::AT_MustTail:
return handleMustTailAttr(S, St, A, Range);
case ParsedAttr::AT_Likely:
@@ -434,8 +495,7 @@ static Attr *ProcessStmtAttribute(Sema &S, Stmt *St, const ParsedAttr &A,
}
}
-void Sema::ProcessStmtAttributes(Stmt *S,
- const ParsedAttributesWithRange &InAttrs,
+void Sema::ProcessStmtAttributes(Stmt *S, const ParsedAttributes &InAttrs,
SmallVectorImpl<const Attr *> &OutAttrs) {
for (const ParsedAttr &AL : InAttrs) {
if (const Attr *A = ProcessStmtAttribute(*this, S, AL, InAttrs.Range))
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp b/contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp
index 64a0b45feb98..dbfe6164bda2 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaTemplate.cpp
@@ -11,11 +11,13 @@
#include "TreeTransform.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
+#include "clang/AST/Decl.h"
#include "clang/AST/DeclFriend.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/RecursiveASTVisitor.h"
+#include "clang/AST/TemplateName.h"
#include "clang/AST/TypeVisitor.h"
#include "clang/Basic/Builtins.h"
#include "clang/Basic/LangOptions.h"
@@ -223,6 +225,7 @@ TemplateNameKind Sema::isTemplateName(Scope *S,
return TNK_Non_template;
NamedDecl *D = nullptr;
+ UsingShadowDecl *FoundUsingShadow = dyn_cast<UsingShadowDecl>(*R.begin());
if (R.isAmbiguous()) {
// If we got an ambiguity involving a non-function template, treat this
// as a template name, and pick an arbitrary template for error recovery.
@@ -233,6 +236,7 @@ TemplateNameKind Sema::isTemplateName(Scope *S,
AnyFunctionTemplates = true;
else {
D = FoundTemplate;
+ FoundUsingShadow = dyn_cast<UsingShadowDecl>(FoundD);
break;
}
}
@@ -280,13 +284,13 @@ TemplateNameKind Sema::isTemplateName(Scope *S,
}
TemplateDecl *TD = cast<TemplateDecl>(D);
-
+ Template =
+ FoundUsingShadow ? TemplateName(FoundUsingShadow) : TemplateName(TD);
+ assert(!FoundUsingShadow || FoundUsingShadow->getTargetDecl() == TD);
if (SS.isSet() && !SS.isInvalid()) {
NestedNameSpecifier *Qualifier = SS.getScopeRep();
- Template = Context.getQualifiedTemplateName(Qualifier,
- hasTemplateKeyword, TD);
- } else {
- Template = TemplateName(TD);
+ Template = Context.getQualifiedTemplateName(Qualifier, hasTemplateKeyword,
+ Template);
}
if (isa<FunctionTemplateDecl>(TD)) {
@@ -795,8 +799,9 @@ bool Sema::DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
if (PatternDef && !IsEntityBeingDefined) {
NamedDecl *SuggestedDef = nullptr;
- if (!hasVisibleDefinition(const_cast<NamedDecl*>(PatternDef), &SuggestedDef,
- /*OnlyNeedComplete*/false)) {
+ if (!hasReachableDefinition(const_cast<NamedDecl *>(PatternDef),
+ &SuggestedDef,
+ /*OnlyNeedComplete*/ false)) {
// If we're allowed to diagnose this and recover, do so.
bool Recover = Complain && !isSFINAEContext();
if (Complain)
@@ -863,7 +868,7 @@ bool Sema::DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
}
}
if (Note) // Diagnostics were emitted.
- Diag(Pattern->getLocation(), Note.getValue());
+ Diag(Pattern->getLocation(), *Note);
// In general, Instantiation isn't marked invalid to get more than one
// error for multiple undefined instantiations. But the code that does
@@ -997,8 +1002,8 @@ ParsedTemplateArgument Sema::ActOnTemplateTypeArgument(TypeResult ParsedType) {
TemplateName Name = DTST.getTypePtr()->getTemplateName();
if (SS.isSet())
Name = Context.getQualifiedTemplateName(SS.getScopeRep(),
- /*HasTemplateKeyword*/ false,
- Name.getAsTemplateDecl());
+ /*HasTemplateKeyword=*/false,
+ Name);
ParsedTemplateArgument Result(SS, TemplateTy::make(Name),
DTST.getTemplateNameLoc());
if (EllipsisLoc.isValid())
@@ -2184,10 +2189,24 @@ struct ConvertConstructorToDeductionGuideTransform {
SubstArgs.push_back(SemaRef.Context.getCanonicalTemplateArgument(
SemaRef.Context.getInjectedTemplateArg(NewParam)));
}
+
+ // Substitute new template parameters into requires-clause if present.
+ Expr *RequiresClause = nullptr;
+ if (Expr *InnerRC = InnerParams->getRequiresClause()) {
+ MultiLevelTemplateArgumentList Args;
+ Args.setKind(TemplateSubstitutionKind::Rewrite);
+ Args.addOuterTemplateArguments(SubstArgs);
+ Args.addOuterRetainedLevel();
+ ExprResult E = SemaRef.SubstExpr(InnerRC, Args);
+ if (E.isInvalid())
+ return nullptr;
+ RequiresClause = E.getAs<Expr>();
+ }
+
TemplateParams = TemplateParameterList::Create(
SemaRef.Context, InnerParams->getTemplateLoc(),
InnerParams->getLAngleLoc(), AllParams, InnerParams->getRAngleLoc(),
- /*FIXME: RequiresClause*/ nullptr);
+ RequiresClause);
}
// If we built a new template-parameter-list, track that we need to
@@ -4280,7 +4299,7 @@ DeclResult Sema::ActOnVarTemplateSpecialization(
bool IsPartialSpecialization) {
// D must be variable template id.
assert(D.getName().getKind() == UnqualifiedIdKind::IK_TemplateId &&
- "Variable template specialization is declared with a template it.");
+ "Variable template specialization is declared with a template id.");
TemplateIdAnnotation *TemplateId = D.getName().TemplateId;
TemplateArgumentListInfo TemplateArgs =
@@ -5237,7 +5256,7 @@ Sema::SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
HasDefaultArg = false;
if (TemplateTypeParmDecl *TypeParm = dyn_cast<TemplateTypeParmDecl>(Param)) {
- if (!hasVisibleDefaultArgument(TypeParm))
+ if (!hasReachableDefaultArgument(TypeParm))
return TemplateArgumentLoc();
HasDefaultArg = true;
@@ -5254,7 +5273,7 @@ Sema::SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
if (NonTypeTemplateParmDecl *NonTypeParm
= dyn_cast<NonTypeTemplateParmDecl>(Param)) {
- if (!hasVisibleDefaultArgument(NonTypeParm))
+ if (!hasReachableDefaultArgument(NonTypeParm))
return TemplateArgumentLoc();
HasDefaultArg = true;
@@ -5272,7 +5291,7 @@ Sema::SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
TemplateTemplateParmDecl *TempTempParm
= cast<TemplateTemplateParmDecl>(Param);
- if (!hasVisibleDefaultArgument(TempTempParm))
+ if (!hasReachableDefaultArgument(TempTempParm))
return TemplateArgumentLoc();
HasDefaultArg = true;
@@ -5610,10 +5629,10 @@ static bool diagnoseMissingArgument(Sema &S, SourceLocation Loc,
->getTemplateParameters()
->getParam(D->getIndex()));
- // If there's a default argument that's not visible, diagnose that we're
+ // If there's a default argument that's not reachable, diagnose that we're
// missing a module import.
llvm::SmallVector<Module*, 8> Modules;
- if (D->hasDefaultArgument() && !S.hasVisibleDefaultArgument(D, &Modules)) {
+ if (D->hasDefaultArgument() && !S.hasReachableDefaultArgument(D, &Modules)) {
S.diagnoseMissingImport(Loc, cast<NamedDecl>(TD),
D->getDefaultArgumentLoc(), Modules,
Sema::MissingImportKind::DefaultArgument,
@@ -5796,7 +5815,7 @@ bool Sema::CheckTemplateArgumentList(
// (when the template parameter was part of a nested template) into
// the default argument.
if (TemplateTypeParmDecl *TTP = dyn_cast<TemplateTypeParmDecl>(*Param)) {
- if (!hasVisibleDefaultArgument(TTP))
+ if (!hasReachableDefaultArgument(TTP))
return diagnoseMissingArgument(*this, TemplateLoc, Template, TTP,
NewArgs);
@@ -5813,7 +5832,7 @@ bool Sema::CheckTemplateArgumentList(
ArgType);
} else if (NonTypeTemplateParmDecl *NTTP
= dyn_cast<NonTypeTemplateParmDecl>(*Param)) {
- if (!hasVisibleDefaultArgument(NTTP))
+ if (!hasReachableDefaultArgument(NTTP))
return diagnoseMissingArgument(*this, TemplateLoc, Template, NTTP,
NewArgs);
@@ -5831,7 +5850,7 @@ bool Sema::CheckTemplateArgumentList(
TemplateTemplateParmDecl *TempParm
= cast<TemplateTemplateParmDecl>(*Param);
- if (!hasVisibleDefaultArgument(TempParm))
+ if (!hasReachableDefaultArgument(TempParm))
return diagnoseMissingArgument(*this, TemplateLoc, Template, TempParm,
NewArgs);
@@ -6995,7 +7014,9 @@ ExprResult Sema::CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
// -- a predefined __func__ variable
APValue::LValueBase Base = Value.getLValueBase();
auto *VD = const_cast<ValueDecl *>(Base.dyn_cast<const ValueDecl *>());
- if (Base && (!VD || isa<LifetimeExtendedTemporaryDecl>(VD))) {
+ if (Base &&
+ (!VD ||
+ isa<LifetimeExtendedTemporaryDecl, UnnamedGlobalConstantDecl>(VD))) {
Diag(Arg->getBeginLoc(), diag::err_template_arg_not_decl_ref)
<< Arg->getSourceRange();
return ExprError();
@@ -9740,7 +9761,7 @@ DeclResult Sema::ActOnExplicitInstantiation(
if (!getDLLAttr(Def) && getDLLAttr(Specialization) &&
(Context.getTargetInfo().shouldDLLImportComdatSymbols() &&
- !Context.getTargetInfo().getTriple().isPS4CPU())) {
+ !Context.getTargetInfo().getTriple().isPS())) {
// An explicit instantiation definition can add a dll attribute to a
// template with a previous instantiation declaration. MinGW doesn't
// allow this.
@@ -9758,7 +9779,7 @@ DeclResult Sema::ActOnExplicitInstantiation(
!PreviouslyDLLExported && Specialization->hasAttr<DLLExportAttr>();
if (Old_TSK == TSK_ImplicitInstantiation && NewlyDLLExported &&
(Context.getTargetInfo().shouldDLLImportComdatSymbols() &&
- !Context.getTargetInfo().getTriple().isPS4CPU())) {
+ !Context.getTargetInfo().getTriple().isPS())) {
// An explicit instantiation definition can add a dll attribute to a
// template with a previous implicit instantiation. MinGW doesn't allow
// this. We limit clang to only adding dllexport, to avoid potentially
@@ -10116,7 +10137,7 @@ DeclResult Sema::ActOnExplicitInstantiation(Scope *S,
}
// Check the new variable specialization against the parsed input.
- if (PrevTemplate && Prev && !Context.hasSameType(Prev->getType(), R)) {
+ if (PrevTemplate && !Context.hasSameType(Prev->getType(), R)) {
Diag(T->getTypeLoc().getBeginLoc(),
diag::err_invalid_var_template_spec_type)
<< 0 << PrevTemplate << R << Prev->getType();
@@ -10986,10 +11007,12 @@ class ExplicitSpecializationVisibilityChecker {
Sema &S;
SourceLocation Loc;
llvm::SmallVector<Module *, 8> Modules;
+ Sema::AcceptableKind Kind;
public:
- ExplicitSpecializationVisibilityChecker(Sema &S, SourceLocation Loc)
- : S(S), Loc(Loc) {}
+ ExplicitSpecializationVisibilityChecker(Sema &S, SourceLocation Loc,
+ Sema::AcceptableKind Kind)
+ : S(S), Loc(Loc), Kind(Kind) {}
void check(NamedDecl *ND) {
if (auto *FD = dyn_cast<FunctionDecl>(ND))
@@ -11017,6 +11040,23 @@ private:
S.diagnoseMissingImport(Loc, D, D->getLocation(), Modules, Kind, Recover);
}
+ bool CheckMemberSpecialization(const NamedDecl *D) {
+ return Kind == Sema::AcceptableKind::Visible
+ ? S.hasVisibleMemberSpecialization(D)
+ : S.hasReachableMemberSpecialization(D);
+ }
+
+ bool CheckExplicitSpecialization(const NamedDecl *D) {
+ return Kind == Sema::AcceptableKind::Visible
+ ? S.hasVisibleExplicitSpecialization(D)
+ : S.hasReachableExplicitSpecialization(D);
+ }
+
+ bool CheckDeclaration(const NamedDecl *D) {
+ return Kind == Sema::AcceptableKind::Visible ? S.hasVisibleDeclaration(D)
+ : S.hasReachableDeclaration(D);
+ }
+
// Check a specific declaration. There are three problematic cases:
//
// 1) The declaration is an explicit specialization of a template
@@ -11033,10 +11073,9 @@ private:
void checkImpl(SpecDecl *Spec) {
bool IsHiddenExplicitSpecialization = false;
if (Spec->getTemplateSpecializationKind() == TSK_ExplicitSpecialization) {
- IsHiddenExplicitSpecialization =
- Spec->getMemberSpecializationInfo()
- ? !S.hasVisibleMemberSpecialization(Spec, &Modules)
- : !S.hasVisibleExplicitSpecialization(Spec, &Modules);
+ IsHiddenExplicitSpecialization = Spec->getMemberSpecializationInfo()
+ ? !CheckMemberSpecialization(Spec)
+ : !CheckExplicitSpecialization(Spec);
} else {
checkInstantiated(Spec);
}
@@ -11060,7 +11099,7 @@ private:
checkTemplate(TD);
else if (auto *TD =
From.dyn_cast<ClassTemplatePartialSpecializationDecl *>()) {
- if (!S.hasVisibleDeclaration(TD))
+ if (!CheckDeclaration(TD))
diagnose(TD, true);
checkTemplate(TD);
}
@@ -11076,7 +11115,7 @@ private:
checkTemplate(TD);
else if (auto *TD =
From.dyn_cast<VarTemplatePartialSpecializationDecl *>()) {
- if (!S.hasVisibleDeclaration(TD))
+ if (!CheckDeclaration(TD))
diagnose(TD, true);
checkTemplate(TD);
}
@@ -11087,7 +11126,7 @@ private:
template<typename TemplDecl>
void checkTemplate(TemplDecl *TD) {
if (TD->isMemberSpecialization()) {
- if (!S.hasVisibleMemberSpecialization(TD, &Modules))
+ if (!CheckMemberSpecialization(TD))
diagnose(TD->getMostRecentDecl(), false);
}
}
@@ -11098,5 +11137,17 @@ void Sema::checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec) {
if (!getLangOpts().Modules)
return;
- ExplicitSpecializationVisibilityChecker(*this, Loc).check(Spec);
+ ExplicitSpecializationVisibilityChecker(*this, Loc,
+ Sema::AcceptableKind::Visible)
+ .check(Spec);
+}
+
+void Sema::checkSpecializationReachability(SourceLocation Loc,
+ NamedDecl *Spec) {
+ if (!getLangOpts().CPlusPlusModules)
+ return checkSpecializationVisibility(Loc, Spec);
+
+ ExplicitSpecializationVisibilityChecker(*this, Loc,
+ Sema::AcceptableKind::Reachable)
+ .check(Spec);
}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp b/contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp
index 22dd395d9943..9ec33e898198 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaTemplateDeduction.cpp
@@ -533,9 +533,9 @@ DeduceTemplateArguments(Sema &S,
///
/// \param TemplateParams the template parameters that we are deducing
///
-/// \param Param the parameter type
+/// \param P the parameter type
///
-/// \param Arg the argument type
+/// \param A the argument type
///
/// \param Info information about the template argument deduction itself
///
@@ -828,7 +828,7 @@ public:
/// Determine whether this pack expansion scope has a known, fixed arity.
/// This happens if it involves a pack from an outer template that has
/// (notionally) already been expanded.
- bool hasFixedArity() { return FixedNumExpansions.hasValue(); }
+ bool hasFixedArity() { return FixedNumExpansions.has_value(); }
/// Determine whether the next element of the argument is still part of this
/// pack. This is the case unless the pack is already expanded to a fixed
@@ -1199,11 +1199,11 @@ static CXXRecordDecl *getCanonicalRD(QualType T) {
///
/// \param S the semantic analysis object within which we are deducing.
///
-/// \param RecordT the top level record object we are deducing against.
+/// \param RD the top level record object we are deducing against.
///
/// \param TemplateParams the template parameters that we are deducing.
///
-/// \param SpecParam the template specialization parameter type.
+/// \param P the template specialization parameter type.
///
/// \param Info information about the template argument deduction itself.
///
@@ -1315,9 +1315,9 @@ DeduceTemplateBases(Sema &S, const CXXRecordDecl *RD,
///
/// \param TemplateParams the template parameters that we are deducing
///
-/// \param ParamIn the parameter type
+/// \param P the parameter type
///
-/// \param ArgIn the argument type
+/// \param A the argument type
///
/// \param Info information about the template argument deduction itself
///
@@ -3406,7 +3406,7 @@ static unsigned getPackIndexForParam(Sema &S,
for (auto *PD : FunctionTemplate->getTemplatedDecl()->parameters()) {
if (PD->isParameterPack()) {
unsigned NumExpansions =
- S.getNumArgumentsInExpansion(PD->getType(), Args).getValueOr(1);
+ S.getNumArgumentsInExpansion(PD->getType(), Args).value_or(1);
if (Idx + NumExpansions > ParamIdx)
return ParamIdx - Idx;
Idx += NumExpansions;
@@ -4637,7 +4637,7 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *&Init, QualType &Result,
}
// Find the depth of template parameter to synthesize.
- unsigned Depth = DependentDeductionDepth.getValueOr(0);
+ unsigned Depth = DependentDeductionDepth.value_or(0);
// If this is a 'decltype(auto)' specifier, do the decltype dance.
// Since 'decltype(auto)' can only occur at the top of the type, we
@@ -4769,12 +4769,13 @@ Sema::DeduceAutoType(TypeLoc Type, Expr *&Init, QualType &Result,
return DAR_FailedAlreadyDiagnosed;
}
- if (const auto *AT = Type.getType()->getAs<AutoType>()) {
+ QualType MaybeAuto = Type.getType().getNonReferenceType();
+ while (MaybeAuto->isPointerType())
+ MaybeAuto = MaybeAuto->getPointeeType();
+ if (const auto *AT = MaybeAuto->getAs<AutoType>()) {
if (AT->isConstrained() && !IgnoreConstraints) {
- auto ConstraintsResult =
- CheckDeducedPlaceholderConstraints(*this, *AT,
- Type.getContainedAutoTypeLoc(),
- DeducedType);
+ auto ConstraintsResult = CheckDeducedPlaceholderConstraints(
+ *this, *AT, Type.getContainedAutoTypeLoc(), DeducedType);
if (ConstraintsResult != DAR_Succeeded)
return ConstraintsResult;
}
@@ -5142,18 +5143,20 @@ static bool isVariadicFunctionTemplate(FunctionTemplateDecl *FunTmpl) {
/// candidate with a reversed parameter order. In this case, the corresponding
/// P/A pairs between FT1 and FT2 are reversed.
///
+/// \param AllowOrderingByConstraints If \c is false, don't check whether one
+/// of the templates is more constrained than the other. Default is true.
+///
/// \returns the more specialized function template. If neither
/// template is more specialized, returns NULL.
-FunctionTemplateDecl *
-Sema::getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
- FunctionTemplateDecl *FT2,
- SourceLocation Loc,
- TemplatePartialOrderingContext TPOC,
- unsigned NumCallArguments1,
- unsigned NumCallArguments2,
- bool Reversed) {
-
- auto JudgeByConstraints = [&] () -> FunctionTemplateDecl * {
+FunctionTemplateDecl *Sema::getMoreSpecializedTemplate(
+ FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc,
+ TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1,
+ unsigned NumCallArguments2, bool Reversed,
+ bool AllowOrderingByConstraints) {
+
+ auto JudgeByConstraints = [&]() -> FunctionTemplateDecl * {
+ if (!AllowOrderingByConstraints)
+ return nullptr;
llvm::SmallVector<const Expr *, 3> AC1, AC2;
FT1->getAssociatedConstraints(AC1);
FT2->getAssociatedConstraints(AC2);
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiate.cpp b/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiate.cpp
index 6de486be8f16..f09b3473c074 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiate.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiate.cpp
@@ -10,12 +10,14 @@
//===----------------------------------------------------------------------===/
#include "TreeTransform.h"
+#include "clang/AST/ASTConcept.h"
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTLambda.h"
#include "clang/AST/ASTMutationListener.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/Expr.h"
+#include "clang/AST/ExprConcepts.h"
#include "clang/AST/PrettyDeclStackTrace.h"
#include "clang/AST/TypeVisitor.h"
#include "clang/Basic/LangOptions.h"
@@ -55,26 +57,23 @@ using namespace sema;
/// instantiating the definition of the given declaration, \p D. This is
/// used to determine the proper set of template instantiation arguments for
/// friend function template specializations.
-MultiLevelTemplateArgumentList
-Sema::getTemplateInstantiationArgs(NamedDecl *D,
- const TemplateArgumentList *Innermost,
- bool RelativeToPrimary,
- const FunctionDecl *Pattern) {
+MultiLevelTemplateArgumentList Sema::getTemplateInstantiationArgs(
+ const NamedDecl *D, const TemplateArgumentList *Innermost,
+ bool RelativeToPrimary, const FunctionDecl *Pattern) {
// Accumulate the set of template argument lists in this structure.
MultiLevelTemplateArgumentList Result;
if (Innermost)
Result.addOuterTemplateArguments(Innermost);
- DeclContext *Ctx = dyn_cast<DeclContext>(D);
+ const auto *Ctx = dyn_cast<DeclContext>(D);
if (!Ctx) {
Ctx = D->getDeclContext();
// Add template arguments from a variable template instantiation. For a
// class-scope explicit specialization, there are no template arguments
// at this level, but there may be enclosing template arguments.
- VarTemplateSpecializationDecl *Spec =
- dyn_cast<VarTemplateSpecializationDecl>(D);
+ const auto *Spec = dyn_cast<VarTemplateSpecializationDecl>(D);
if (Spec && !Spec->isClassScopeExplicitSpecialization()) {
// We're done when we hit an explicit specialization.
if (Spec->getSpecializationKind() == TSK_ExplicitSpecialization &&
@@ -107,8 +106,7 @@ Sema::getTemplateInstantiationArgs(NamedDecl *D,
// use empty template parameter lists for all of the outer templates
// to avoid performing any substitutions.
if (Ctx->isTranslationUnit()) {
- if (TemplateTemplateParmDecl *TTP
- = dyn_cast<TemplateTemplateParmDecl>(D)) {
+ if (const auto *TTP = dyn_cast<TemplateTemplateParmDecl>(D)) {
for (unsigned I = 0, N = TTP->getDepth() + 1; I != N; ++I)
Result.addOuterTemplateArguments(None);
return Result;
@@ -118,8 +116,7 @@ Sema::getTemplateInstantiationArgs(NamedDecl *D,
while (!Ctx->isFileContext()) {
// Add template arguments from a class template instantiation.
- ClassTemplateSpecializationDecl *Spec
- = dyn_cast<ClassTemplateSpecializationDecl>(Ctx);
+ const auto *Spec = dyn_cast<ClassTemplateSpecializationDecl>(Ctx);
if (Spec && !Spec->isClassScopeExplicitSpecialization()) {
// We're done when we hit an explicit specialization.
if (Spec->getSpecializationKind() == TSK_ExplicitSpecialization &&
@@ -135,7 +132,7 @@ Sema::getTemplateInstantiationArgs(NamedDecl *D,
break;
}
// Add template arguments from a function template specialization.
- else if (FunctionDecl *Function = dyn_cast<FunctionDecl>(Ctx)) {
+ else if (const auto *Function = dyn_cast<FunctionDecl>(Ctx)) {
if (!RelativeToPrimary &&
Function->getTemplateSpecializationKindForInstantiation() ==
TSK_ExplicitSpecialization)
@@ -177,7 +174,7 @@ Sema::getTemplateInstantiationArgs(NamedDecl *D,
RelativeToPrimary = false;
continue;
}
- } else if (CXXRecordDecl *Rec = dyn_cast<CXXRecordDecl>(Ctx)) {
+ } else if (const auto *Rec = dyn_cast<CXXRecordDecl>(Ctx)) {
if (ClassTemplateDecl *ClassTemplate = Rec->getDescribedClassTemplate()) {
assert(Result.getNumSubstitutedLevels() == 0 &&
"Outer template not instantiated?");
@@ -218,6 +215,7 @@ bool Sema::CodeSynthesisContext::isInstantiationRecord() const {
case RewritingOperatorAsSpaceship:
case InitializingStructuredBinding:
case MarkingClassDllexported:
+ case BuildingBuiltinDumpStructCall:
return false;
// This function should never be called when Kind's value is Memoization.
@@ -487,6 +485,19 @@ void Sema::InstantiatingTemplate::Clear() {
}
}
+static std::string convertCallArgsToString(Sema &S,
+ llvm::ArrayRef<const Expr *> Args) {
+ std::string Result;
+ llvm::raw_string_ostream OS(Result);
+ llvm::ListSeparator Comma;
+ for (const Expr *Arg : Args) {
+ OS << Comma;
+ Arg->IgnoreParens()->printPretty(OS, nullptr,
+ S.Context.getPrintingPolicy());
+ }
+ return Result;
+}
+
bool Sema::InstantiatingTemplate::CheckInstantiationDepth(
SourceLocation PointOfInstantiation,
SourceRange InstantiationRange) {
@@ -775,6 +786,14 @@ void Sema::PrintInstantiationStack() {
<< cast<CXXRecordDecl>(Active->Entity) << !getLangOpts().CPlusPlus11;
break;
+ case CodeSynthesisContext::BuildingBuiltinDumpStructCall:
+ Diags.Report(Active->PointOfInstantiation,
+ diag::note_building_builtin_dump_struct_call)
+ << convertCallArgsToString(
+ *this,
+ llvm::makeArrayRef(Active->CallArgs, Active->NumCallArgs));
+ break;
+
case CodeSynthesisContext::Memoization:
break;
@@ -879,6 +898,7 @@ Optional<TemplateDeductionInfo *> Sema::isSFINAEContext() const {
case CodeSynthesisContext::DefiningSynthesizedFunction:
case CodeSynthesisContext::InitializingStructuredBinding:
case CodeSynthesisContext::MarkingClassDllexported:
+ case CodeSynthesisContext::BuildingBuiltinDumpStructCall:
// This happens in a context unrelated to template instantiation, so
// there is no SFINAE.
return None;
@@ -1126,12 +1146,12 @@ namespace {
ExprResult TransformLambdaExpr(LambdaExpr *E) {
LocalInstantiationScope Scope(SemaRef, /*CombineWithOuterScope=*/true);
- return TreeTransform<TemplateInstantiator>::TransformLambdaExpr(E);
+ return inherited::TransformLambdaExpr(E);
}
ExprResult TransformRequiresExpr(RequiresExpr *E) {
LocalInstantiationScope Scope(SemaRef, /*CombineWithOuterScope=*/true);
- return TreeTransform<TemplateInstantiator>::TransformRequiresExpr(E);
+ return inherited::TransformRequiresExpr(E);
}
bool TransformRequiresExprRequirements(
@@ -1341,10 +1361,7 @@ TemplateInstantiator::RebuildElaboratedType(SourceLocation KeywordLoc,
}
}
- return TreeTransform<TemplateInstantiator>::RebuildElaboratedType(KeywordLoc,
- Keyword,
- QualifierLoc,
- T);
+ return inherited::RebuildElaboratedType(KeywordLoc, Keyword, QualifierLoc, T);
}
TemplateName TemplateInstantiator::TransformTemplateName(
@@ -1719,7 +1736,7 @@ TemplateInstantiator::TransformDeclRefExpr(DeclRefExpr *E) {
if (PD->isParameterPack())
return TransformFunctionParmPackRefExpr(E, PD);
- return TreeTransform<TemplateInstantiator>::TransformDeclRefExpr(E);
+ return inherited::TransformDeclRefExpr(E);
}
ExprResult TemplateInstantiator::TransformCXXDefaultArgExpr(
@@ -1981,8 +1998,7 @@ TemplateInstantiator::TransformExprRequirement(concepts::ExprRequirement *Req) {
TransRetReq.emplace(TPL);
}
}
- assert(TransRetReq.hasValue() &&
- "All code paths leading here must set TransRetReq");
+ assert(TransRetReq && "All code paths leading here must set TransRetReq");
if (Expr *E = TransExpr.dyn_cast<Expr *>())
return RebuildExprRequirement(E, Req->isSimple(), Req->getNoexceptLoc(),
std::move(*TransRetReq));
@@ -2008,6 +2024,7 @@ TemplateInstantiator::TransformNestedRequirement(
Req->getConstraintExpr()->getSourceRange());
ExprResult TransConstraint;
+ ConstraintSatisfaction Satisfaction;
TemplateDeductionInfo Info(Req->getConstraintExpr()->getBeginLoc());
{
EnterExpressionEvaluationContext ContextRAII(
@@ -2019,6 +2036,25 @@ TemplateInstantiator::TransformNestedRequirement(
if (ConstrInst.isInvalid())
return nullptr;
TransConstraint = TransformExpr(Req->getConstraintExpr());
+ if (!TransConstraint.isInvalid()) {
+ bool CheckSucceeded =
+ SemaRef.CheckConstraintExpression(TransConstraint.get());
+ (void)CheckSucceeded;
+ assert((CheckSucceeded || Trap.hasErrorOccurred()) &&
+ "CheckConstraintExpression failed, but "
+ "did not produce a SFINAE error");
+ }
+ // Use version of CheckConstraintSatisfaction that does no substitutions.
+ if (!TransConstraint.isInvalid() &&
+ !TransConstraint.get()->isInstantiationDependent() &&
+ !Trap.hasErrorOccurred()) {
+ bool CheckFailed = SemaRef.CheckConstraintSatisfaction(
+ TransConstraint.get(), Satisfaction);
+ (void)CheckFailed;
+ assert((!CheckFailed || Trap.hasErrorOccurred()) &&
+ "CheckConstraintSatisfaction failed, "
+ "but did not produce a SFINAE error");
+ }
if (TransConstraint.isInvalid() || Trap.hasErrorOccurred())
return RebuildNestedRequirement(createSubstDiag(SemaRef, Info,
[&] (llvm::raw_ostream& OS) {
@@ -2026,7 +2062,11 @@ TemplateInstantiator::TransformNestedRequirement(
SemaRef.getPrintingPolicy());
}));
}
- return RebuildNestedRequirement(TransConstraint.get());
+ if (TransConstraint.get()->isInstantiationDependent())
+ return new (SemaRef.Context)
+ concepts::NestedRequirement(TransConstraint.get());
+ return new (SemaRef.Context) concepts::NestedRequirement(
+ SemaRef.Context, TransConstraint.get(), Satisfaction);
}
@@ -3235,6 +3275,17 @@ Sema::InstantiateClassMembers(SourceLocation PointOfInstantiation,
if (FunctionDecl *Pattern =
Function->getInstantiatedFromMemberFunction()) {
+ if (Function->isIneligibleOrNotSelected())
+ continue;
+
+ if (Function->getTrailingRequiresClause()) {
+ ConstraintSatisfaction Satisfaction;
+ if (CheckFunctionConstraints(Function, Satisfaction) ||
+ !Satisfaction.IsSatisfied) {
+ continue;
+ }
+ }
+
if (Function->hasAttr<ExcludeFromExplicitInstantiationAttr>())
continue;
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp b/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
index 467372c71496..d7558017948a 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
@@ -188,15 +188,37 @@ static void instantiateDependentAnnotationAttr(
const AnnotateAttr *Attr, Decl *New) {
EnterExpressionEvaluationContext Unevaluated(
S, Sema::ExpressionEvaluationContext::ConstantEvaluated);
+
+ // If the attribute has delayed arguments it will have to instantiate those
+ // and handle them as new arguments for the attribute.
+ bool HasDelayedArgs = Attr->delayedArgs_size();
+
+ ArrayRef<Expr *> ArgsToInstantiate =
+ HasDelayedArgs
+ ? ArrayRef<Expr *>{Attr->delayedArgs_begin(), Attr->delayedArgs_end()}
+ : ArrayRef<Expr *>{Attr->args_begin(), Attr->args_end()};
+
SmallVector<Expr *, 4> Args;
- Args.reserve(Attr->args_size());
- for (auto *E : Attr->args()) {
- ExprResult Result = S.SubstExpr(E, TemplateArgs);
- if (!Result.isUsable())
+ if (S.SubstExprs(ArgsToInstantiate,
+ /*IsCall=*/false, TemplateArgs, Args))
+ return;
+
+ StringRef Str = Attr->getAnnotation();
+ if (HasDelayedArgs) {
+ if (Args.size() < 1) {
+ S.Diag(Attr->getLoc(), diag::err_attribute_too_few_arguments)
+ << Attr << 1;
+ return;
+ }
+
+ if (!S.checkStringLiteralArgumentAttr(*Attr, Args[0], Str))
return;
- Args.push_back(Result.get());
+
+ llvm::SmallVector<Expr *, 4> ActualArgs;
+ ActualArgs.insert(ActualArgs.begin(), Args.begin() + 1, Args.end());
+ std::swap(Args, ActualArgs);
}
- S.AddAnnotationAttr(New, *Attr, Attr->getAnnotation(), Args);
+ S.AddAnnotationAttr(New, *Attr, Str, Args);
}
static Expr *instantiateDependentFunctionAttrCondition(
@@ -498,8 +520,7 @@ static void instantiateOMPDeclareVariantAttr(
continue;
NeedDevicePtrExprs.push_back(ER.get());
}
- for (auto A : Attr.appendArgs())
- AppendArgs.push_back(A);
+ llvm::append_range(AppendArgs, Attr.appendArgs());
S.ActOnOpenMPDeclareVariantDirective(
FD, E, TI, NothingExprs, NeedDevicePtrExprs, AppendArgs, SourceLocation(),
@@ -600,6 +621,31 @@ static bool isRelevantAttr(Sema &S, const Decl *D, const Attr *A) {
return true;
}
+ if (const auto *BA = dyn_cast<BuiltinAttr>(A)) {
+ const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
+ switch (BA->getID()) {
+ case Builtin::BIforward:
+ // Do not treat 'std::forward' as a builtin if it takes an rvalue reference
+ // type and returns an lvalue reference type. The library implementation
+ // will produce an error in this case; don't get in its way.
+ if (FD && FD->getNumParams() >= 1 &&
+ FD->getParamDecl(0)->getType()->isRValueReferenceType() &&
+ FD->getReturnType()->isLValueReferenceType()) {
+ return false;
+ }
+ LLVM_FALLTHROUGH;
+ case Builtin::BImove:
+ case Builtin::BImove_if_noexcept:
+ // HACK: Super-old versions of libc++ (3.1 and earlier) provide
+ // std::forward and std::move overloads that sometimes return by value
+ // instead of by reference when building in C++98 mode. Don't treat such
+ // cases as builtins.
+ if (FD && !FD->getReturnType()->isReferenceType())
+ return false;
+ break;
+ }
+ }
+
return true;
}
@@ -800,7 +846,7 @@ void Sema::InstantiateDefaultCtorDefaultArgs(CXXConstructorDecl *Ctor) {
for (unsigned I = 0; I != NumParams; ++I) {
(void)CheckCXXDefaultArgExpr(Attr->getLocation(), Ctor,
Ctor->getParamDecl(I));
- DiscardCleanupsInEvaluationContext();
+ CleanupVarDeclMarking();
}
}
@@ -846,6 +892,11 @@ Decl *TemplateDeclInstantiator::VisitMSGuidDecl(MSGuidDecl *D) {
llvm_unreachable("GUID declaration cannot be instantiated");
}
+Decl *TemplateDeclInstantiator::VisitUnnamedGlobalConstantDecl(
+ UnnamedGlobalConstantDecl *D) {
+ llvm_unreachable("UnnamedGlobalConstantDecl cannot be instantiated");
+}
+
Decl *TemplateDeclInstantiator::VisitTemplateParamObjectDecl(
TemplateParamObjectDecl *D) {
llvm_unreachable("template parameter objects cannot be instantiated");
@@ -953,6 +1004,7 @@ Decl *TemplateDeclInstantiator::InstantiateTypedefNameDecl(TypedefNameDecl *D,
SemaRef.inferGslPointerAttribute(Typedef);
Typedef->setAccess(D->getAccess());
+ Typedef->setReferenced(D->isReferenced());
return Typedef;
}
@@ -1831,7 +1883,7 @@ Decl *TemplateDeclInstantiator::VisitCXXRecordDecl(CXXRecordDecl *D) {
if (D->isLambda())
Record = CXXRecordDecl::CreateLambda(
SemaRef.Context, Owner, D->getLambdaTypeInfo(), D->getLocation(),
- D->isDependentLambda(), D->isGenericLambda(),
+ D->getLambdaDependencyKind(), D->isGenericLambda(),
D->getLambdaCaptureDefault());
else
Record = CXXRecordDecl::Create(SemaRef.Context, D->getTagKind(), Owner,
@@ -2217,7 +2269,8 @@ Decl *TemplateDeclInstantiator::VisitFunctionDecl(
}
SemaRef.CheckFunctionDeclaration(/*Scope*/ nullptr, Function, Previous,
- IsExplicitSpecialization);
+ IsExplicitSpecialization,
+ Function->isThisDeclarationADefinition());
// Check the template parameter list against the previous declaration. The
// goal here is to pick up default arguments added since the friend was
@@ -2425,6 +2478,7 @@ Decl *TemplateDeclInstantiator::VisitCXXMethodDecl(
SemaRef.Context, Record, StartLoc, NameInfo, T, TInfo,
Destructor->UsesFPIntrin(), Destructor->isInlineSpecified(), false,
Destructor->getConstexprKind(), TrailingRequiresClause);
+ Method->setIneligibleOrNotSelected(true);
Method->setRangeEnd(Destructor->getEndLoc());
Method->setDeclName(SemaRef.Context.DeclarationNames.getCXXDestructorName(
SemaRef.Context.getCanonicalType(
@@ -2542,7 +2596,7 @@ Decl *TemplateDeclInstantiator::VisitCXXMethodDecl(
IsExplicitSpecialization = true;
} else if (const ASTTemplateArgumentListInfo *Info =
- ClassScopeSpecializationArgs.getValueOr(
+ ClassScopeSpecializationArgs.value_or(
D->getTemplateSpecializationArgsAsWritten())) {
SemaRef.LookupQualifiedName(Previous, DC);
@@ -2578,7 +2632,8 @@ Decl *TemplateDeclInstantiator::VisitCXXMethodDecl(
}
SemaRef.CheckFunctionDeclaration(nullptr, Method, Previous,
- IsExplicitSpecialization);
+ IsExplicitSpecialization,
+ Method->isThisDeclarationADefinition());
if (D->isPure())
SemaRef.CheckPureMethod(Method, SourceRange());
@@ -3747,13 +3802,15 @@ Decl *TemplateDeclInstantiator::VisitVarTemplateSpecializationDecl(
return nullptr;
// Substitute the current template arguments.
- const TemplateArgumentListInfo &TemplateArgsInfo = D->getTemplateArgsInfo();
- VarTemplateArgsInfo.setLAngleLoc(TemplateArgsInfo.getLAngleLoc());
- VarTemplateArgsInfo.setRAngleLoc(TemplateArgsInfo.getRAngleLoc());
+ if (const ASTTemplateArgumentListInfo *TemplateArgsInfo =
+ D->getTemplateArgsInfo()) {
+ VarTemplateArgsInfo.setLAngleLoc(TemplateArgsInfo->getLAngleLoc());
+ VarTemplateArgsInfo.setRAngleLoc(TemplateArgsInfo->getRAngleLoc());
- if (SemaRef.SubstTemplateArguments(TemplateArgsInfo.arguments(), TemplateArgs,
- VarTemplateArgsInfo))
- return nullptr;
+ if (SemaRef.SubstTemplateArguments(TemplateArgsInfo->arguments(),
+ TemplateArgs, VarTemplateArgsInfo))
+ return nullptr;
+ }
// Check that the template argument list is well-formed for this template.
SmallVector<TemplateArgument, 4> Converted;
@@ -4355,10 +4412,10 @@ TemplateDeclInstantiator::SubstFunctionType(FunctionDecl *D,
/// Introduce the instantiated function parameters into the local
/// instantiation scope, and set the parameter names to those used
/// in the template.
-static bool addInstantiatedParametersToScope(Sema &S, FunctionDecl *Function,
- const FunctionDecl *PatternDecl,
- LocalInstantiationScope &Scope,
- const MultiLevelTemplateArgumentList &TemplateArgs) {
+bool Sema::addInstantiatedParametersToScope(
+ FunctionDecl *Function, const FunctionDecl *PatternDecl,
+ LocalInstantiationScope &Scope,
+ const MultiLevelTemplateArgumentList &TemplateArgs) {
unsigned FParamIdx = 0;
for (unsigned I = 0, N = PatternDecl->getNumParams(); I != N; ++I) {
const ParmVarDecl *PatternParam = PatternDecl->getParamDecl(I);
@@ -4374,9 +4431,9 @@ static bool addInstantiatedParametersToScope(Sema &S, FunctionDecl *Function,
// it's instantiation-dependent.
// FIXME: Updating the type to work around this is at best fragile.
if (!PatternDecl->getType()->isDependentType()) {
- QualType T = S.SubstType(PatternParam->getType(), TemplateArgs,
- FunctionParam->getLocation(),
- FunctionParam->getDeclName());
+ QualType T = SubstType(PatternParam->getType(), TemplateArgs,
+ FunctionParam->getLocation(),
+ FunctionParam->getDeclName());
if (T.isNull())
return true;
FunctionParam->setType(T);
@@ -4389,8 +4446,8 @@ static bool addInstantiatedParametersToScope(Sema &S, FunctionDecl *Function,
// Expand the parameter pack.
Scope.MakeInstantiatedLocalArgPack(PatternParam);
- Optional<unsigned> NumArgumentsInExpansion
- = S.getNumArgumentsInExpansion(PatternParam->getType(), TemplateArgs);
+ Optional<unsigned> NumArgumentsInExpansion =
+ getNumArgumentsInExpansion(PatternParam->getType(), TemplateArgs);
if (NumArgumentsInExpansion) {
QualType PatternType =
PatternParam->getType()->castAs<PackExpansionType>()->getPattern();
@@ -4398,10 +4455,10 @@ static bool addInstantiatedParametersToScope(Sema &S, FunctionDecl *Function,
ParmVarDecl *FunctionParam = Function->getParamDecl(FParamIdx);
FunctionParam->setDeclName(PatternParam->getDeclName());
if (!PatternDecl->getType()->isDependentType()) {
- Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(S, Arg);
- QualType T = S.SubstType(PatternType, TemplateArgs,
- FunctionParam->getLocation(),
- FunctionParam->getDeclName());
+ Sema::ArgumentPackSubstitutionIndexRAII SubstIndex(*this, Arg);
+ QualType T =
+ SubstType(PatternType, TemplateArgs, FunctionParam->getLocation(),
+ FunctionParam->getDeclName());
if (T.isNull())
return true;
FunctionParam->setType(T);
@@ -4465,8 +4522,7 @@ bool Sema::InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD,
FunctionDecl *Pattern = FD->getTemplateInstantiationPattern(
/*ForDefinition*/ false);
- if (addInstantiatedParametersToScope(*this, FD, Pattern, Local,
- TemplateArgs))
+ if (addInstantiatedParametersToScope(FD, Pattern, Local, TemplateArgs))
return true;
runWithSufficientStackSpace(CallLoc, [&] {
@@ -4539,8 +4595,7 @@ void Sema::InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
// we don't store enough information to map back to the friend declaration in
// the template.
FunctionDecl *Template = Proto->getExceptionSpecTemplate();
- if (addInstantiatedParametersToScope(*this, Decl, Template, Scope,
- TemplateArgs)) {
+ if (addInstantiatedParametersToScope(Decl, Template, Scope, TemplateArgs)) {
UpdateExceptionSpec(Decl, EST_None);
return;
}
@@ -4549,53 +4604,6 @@ void Sema::InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
TemplateArgs);
}
-bool Sema::CheckInstantiatedFunctionTemplateConstraints(
- SourceLocation PointOfInstantiation, FunctionDecl *Decl,
- ArrayRef<TemplateArgument> TemplateArgs,
- ConstraintSatisfaction &Satisfaction) {
- // In most cases we're not going to have constraints, so check for that first.
- FunctionTemplateDecl *Template = Decl->getPrimaryTemplate();
- // Note - code synthesis context for the constraints check is created
- // inside CheckConstraintsSatisfaction.
- SmallVector<const Expr *, 3> TemplateAC;
- Template->getAssociatedConstraints(TemplateAC);
- if (TemplateAC.empty()) {
- Satisfaction.IsSatisfied = true;
- return false;
- }
-
- // Enter the scope of this instantiation. We don't use
- // PushDeclContext because we don't have a scope.
- Sema::ContextRAII savedContext(*this, Decl);
- LocalInstantiationScope Scope(*this);
-
- // If this is not an explicit specialization - we need to get the instantiated
- // version of the template arguments and add them to scope for the
- // substitution.
- if (Decl->isTemplateInstantiation()) {
- InstantiatingTemplate Inst(*this, Decl->getPointOfInstantiation(),
- InstantiatingTemplate::ConstraintsCheck{}, Decl->getPrimaryTemplate(),
- TemplateArgs, SourceRange());
- if (Inst.isInvalid())
- return true;
- MultiLevelTemplateArgumentList MLTAL(
- *Decl->getTemplateSpecializationArgs());
- if (addInstantiatedParametersToScope(
- *this, Decl, Decl->getPrimaryTemplate()->getTemplatedDecl(),
- Scope, MLTAL))
- return true;
- }
- Qualifiers ThisQuals;
- CXXRecordDecl *Record = nullptr;
- if (auto *Method = dyn_cast<CXXMethodDecl>(Decl)) {
- ThisQuals = Method->getMethodQualifiers();
- Record = Method->getParent();
- }
- CXXThisScopeRAII ThisScope(*this, Record, ThisQuals, Record != nullptr);
- return CheckConstraintSatisfaction(Template, TemplateAC, TemplateArgs,
- PointOfInstantiation, Satisfaction);
-}
-
/// Initializes the common fields of an instantiation function
/// declaration (New) from the corresponding fields of its template (Tmpl).
///
@@ -4791,6 +4799,12 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
if (TSK == TSK_ExplicitSpecialization)
return;
+ // Never implicitly instantiate a builtin; we don't actually need a function
+ // body.
+ if (Function->getBuiltinID() && TSK == TSK_ImplicitInstantiation &&
+ !DefinitionRequired)
+ return;
+
// Don't instantiate a definition if we already have one.
const FunctionDecl *ExistingDefn = nullptr;
if (Function->isDefined(ExistingDefn,
@@ -5028,7 +5042,7 @@ void Sema::InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
// PushDeclContext because we don't have a scope.
Sema::ContextRAII savedContext(*this, Function);
- if (addInstantiatedParametersToScope(*this, Function, PatternDecl, Scope,
+ if (addInstantiatedParametersToScope(Function, PatternDecl, Scope,
TemplateArgs))
return;
@@ -5543,8 +5557,18 @@ void Sema::InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
// declaration of the definition.
TemplateDeclInstantiator Instantiator(*this, Var->getDeclContext(),
TemplateArgs);
+
+ TemplateArgumentListInfo TemplateArgInfo;
+ if (const ASTTemplateArgumentListInfo *ArgInfo =
+ VarSpec->getTemplateArgsInfo()) {
+ TemplateArgInfo.setLAngleLoc(ArgInfo->getLAngleLoc());
+ TemplateArgInfo.setRAngleLoc(ArgInfo->getRAngleLoc());
+ for (const TemplateArgumentLoc &Arg : ArgInfo->arguments())
+ TemplateArgInfo.addArgument(Arg);
+ }
+
Var = cast_or_null<VarDecl>(Instantiator.VisitVarTemplateSpecializationDecl(
- VarSpec->getSpecializedTemplate(), Def, VarSpec->getTemplateArgsInfo(),
+ VarSpec->getSpecializedTemplate(), Def, TemplateArgInfo,
VarSpec->getTemplateArgs().asArray(), VarSpec));
if (Var) {
llvm::PointerUnion<VarTemplateDecl *,
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaTemplateVariadic.cpp b/contrib/llvm-project/clang/lib/Sema/SemaTemplateVariadic.cpp
index 51c79e93ab0a..790792f77b24 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaTemplateVariadic.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaTemplateVariadic.cpp
@@ -677,21 +677,19 @@ bool Sema::CheckParameterPacksForExpansion(
Optional<unsigned> NumPartialExpansions;
SourceLocation PartiallySubstitutedPackLoc;
- for (ArrayRef<UnexpandedParameterPack>::iterator i = Unexpanded.begin(),
- end = Unexpanded.end();
- i != end; ++i) {
+ for (UnexpandedParameterPack ParmPack : Unexpanded) {
// Compute the depth and index for this parameter pack.
unsigned Depth = 0, Index = 0;
IdentifierInfo *Name;
bool IsVarDeclPack = false;
- if (const TemplateTypeParmType *TTP
- = i->first.dyn_cast<const TemplateTypeParmType *>()) {
+ if (const TemplateTypeParmType *TTP =
+ ParmPack.first.dyn_cast<const TemplateTypeParmType *>()) {
Depth = TTP->getDepth();
Index = TTP->getIndex();
Name = TTP->getIdentifier();
} else {
- NamedDecl *ND = i->first.get<NamedDecl *>();
+ NamedDecl *ND = ParmPack.first.get<NamedDecl *>();
if (isa<VarDecl>(ND))
IsVarDeclPack = true;
else
@@ -706,9 +704,9 @@ bool Sema::CheckParameterPacksForExpansion(
// Figure out whether we're instantiating to an argument pack or not.
typedef LocalInstantiationScope::DeclArgumentPack DeclArgumentPack;
- llvm::PointerUnion<Decl *, DeclArgumentPack *> *Instantiation
- = CurrentInstantiationScope->findInstantiationOf(
- i->first.get<NamedDecl *>());
+ llvm::PointerUnion<Decl *, DeclArgumentPack *> *Instantiation =
+ CurrentInstantiationScope->findInstantiationOf(
+ ParmPack.first.get<NamedDecl *>());
if (Instantiation->is<DeclArgumentPack *>()) {
// We could expand this function parameter pack.
NewPackSize = Instantiation->get<DeclArgumentPack *>()->size();
@@ -745,7 +743,7 @@ bool Sema::CheckParameterPacksForExpansion(
RetainExpansion = true;
// We don't actually know the new pack size yet.
NumPartialExpansions = NewPackSize;
- PartiallySubstitutedPackLoc = i->second;
+ PartiallySubstitutedPackLoc = ParmPack.second;
continue;
}
}
@@ -756,7 +754,7 @@ bool Sema::CheckParameterPacksForExpansion(
// Record it.
NumExpansions = NewPackSize;
FirstPack.first = Name;
- FirstPack.second = i->second;
+ FirstPack.second = ParmPack.second;
HaveFirstPack = true;
continue;
}
@@ -767,12 +765,12 @@ bool Sema::CheckParameterPacksForExpansion(
// the same number of arguments specified.
if (HaveFirstPack)
Diag(EllipsisLoc, diag::err_pack_expansion_length_conflict)
- << FirstPack.first << Name << *NumExpansions << NewPackSize
- << SourceRange(FirstPack.second) << SourceRange(i->second);
+ << FirstPack.first << Name << *NumExpansions << NewPackSize
+ << SourceRange(FirstPack.second) << SourceRange(ParmPack.second);
else
Diag(EllipsisLoc, diag::err_pack_expansion_length_conflict_multilevel)
- << Name << *NumExpansions << NewPackSize
- << SourceRange(i->second);
+ << Name << *NumExpansions << NewPackSize
+ << SourceRange(ParmPack.second);
return true;
}
}
diff --git a/contrib/llvm-project/clang/lib/Sema/SemaType.cpp b/contrib/llvm-project/clang/lib/Sema/SemaType.cpp
index ab47e9f03eaf..3edce941c381 100644
--- a/contrib/llvm-project/clang/lib/Sema/SemaType.cpp
+++ b/contrib/llvm-project/clang/lib/Sema/SemaType.cpp
@@ -121,6 +121,8 @@ static void diagnoseBadTypeAttribute(Sema &S, const ParsedAttr &attr,
case ParsedAttr::AT_SwiftAsyncCall: \
case ParsedAttr::AT_VectorCall: \
case ParsedAttr::AT_AArch64VectorPcs: \
+ case ParsedAttr::AT_AArch64SVEPcs: \
+ case ParsedAttr::AT_AMDGPUKernelCall: \
case ParsedAttr::AT_MSABI: \
case ParsedAttr::AT_SysVABI: \
case ParsedAttr::AT_Pcs: \
@@ -166,12 +168,6 @@ namespace {
/// DeclSpec.
unsigned chunkIndex;
- /// Whether there are non-trivial modifications to the decl spec.
- bool trivial;
-
- /// Whether we saved the attributes in the decl spec.
- bool hasSavedAttrs;
-
/// The original set of attributes on the DeclSpec.
SmallVector<ParsedAttr *, 2> savedAttrs;
@@ -200,8 +196,7 @@ namespace {
public:
TypeProcessingState(Sema &sema, Declarator &declarator)
: sema(sema), declarator(declarator),
- chunkIndex(declarator.getNumTypeObjects()), trivial(true),
- hasSavedAttrs(false), parsedNoDeref(false) {}
+ chunkIndex(declarator.getNumTypeObjects()), parsedNoDeref(false) {}
Sema &getSema() const {
return sema;
@@ -233,13 +228,12 @@ namespace {
/// Save the current set of attributes on the DeclSpec.
void saveDeclSpecAttrs() {
// Don't try to save them multiple times.
- if (hasSavedAttrs) return;
+ if (!savedAttrs.empty())
+ return;
DeclSpec &spec = getMutableDeclSpec();
- for (ParsedAttr &AL : spec.getAttributes())
- savedAttrs.push_back(&AL);
- trivial &= savedAttrs.empty();
- hasSavedAttrs = true;
+ llvm::append_range(savedAttrs,
+ llvm::make_pointer_range(spec.getAttributes()));
}
/// Record that we had nowhere to put the given type attribute.
@@ -266,6 +260,12 @@ namespace {
return T;
}
+ /// Get a BTFTagAttributed type for the btf_type_tag attribute.
+ QualType getBTFTagAttributedType(const BTFTypeTagAttr *BTFAttr,
+ QualType WrappedType) {
+ return sema.Context.getBTFTagAttributedType(BTFAttr, WrappedType);
+ }
+
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. Also replace \p TypeWithAuto in \c TypeAttrPair if
/// necessary.
@@ -324,23 +324,18 @@ namespace {
bool didParseNoDeref() const { return parsedNoDeref; }
~TypeProcessingState() {
- if (trivial) return;
+ if (savedAttrs.empty())
+ return;
- restoreDeclSpecAttrs();
+ getMutableDeclSpec().getAttributes().clearListOnly();
+ for (ParsedAttr *AL : savedAttrs)
+ getMutableDeclSpec().getAttributes().addAtEnd(AL);
}
private:
DeclSpec &getMutableDeclSpec() const {
return const_cast<DeclSpec&>(declarator.getDeclSpec());
}
-
- void restoreDeclSpecAttrs() {
- assert(hasSavedAttrs);
-
- getMutableDeclSpec().getAttributes().clearListOnly();
- for (ParsedAttr *AL : savedAttrs)
- getMutableDeclSpec().getAttributes().addAtEnd(AL);
- }
};
} // end anonymous namespace
@@ -362,7 +357,8 @@ enum TypeAttrLocation {
};
static void processTypeAttrs(TypeProcessingState &state, QualType &type,
- TypeAttrLocation TAL, ParsedAttributesView &attrs);
+ TypeAttrLocation TAL,
+ const ParsedAttributesView &attrs);
static bool handleFunctionTypeAttr(TypeProcessingState &state, ParsedAttr &attr,
QualType &type);
@@ -633,15 +629,6 @@ static void distributeFunctionTypeAttrFromDeclSpec(TypeProcessingState &state,
QualType &declSpecType) {
state.saveDeclSpecAttrs();
- // C++11 attributes before the decl specifiers actually appertain to
- // the declarators. Move them straight there. We don't support the
- // 'put them wherever you like' semantics we allow for GNU attributes.
- if (attr.isStandardAttributeSyntax()) {
- moveAttrFromListToList(attr, state.getCurrentAttributes(),
- state.getDeclarator().getAttributes());
- return;
- }
-
// Try to distribute to the innermost.
if (distributeFunctionTypeAttrToInnermost(
state, attr, state.getCurrentAttributes(), declSpecType))
@@ -652,8 +639,10 @@ static void distributeFunctionTypeAttrFromDeclSpec(TypeProcessingState &state,
state.addIgnoredTypeAttr(attr);
}
-/// A function type attribute was written on the declarator. Try to
-/// apply it somewhere.
+/// A function type attribute was written on the declarator or declaration.
+/// Try to apply it somewhere.
+/// `Attrs` is the attribute list containing the declaration (either of the
+/// declarator or the declaration).
static void distributeFunctionTypeAttrFromDeclarator(TypeProcessingState &state,
ParsedAttr &attr,
QualType &declSpecType) {
@@ -670,7 +659,7 @@ static void distributeFunctionTypeAttrFromDeclarator(TypeProcessingState &state,
state.addIgnoredTypeAttr(attr);
}
-/// Given that there are attributes written on the declarator
+/// Given that there are attributes written on the declarator or declaration
/// itself, try to distribute any type attributes to the appropriate
/// declarator chunk.
///
@@ -679,11 +668,11 @@ static void distributeFunctionTypeAttrFromDeclarator(TypeProcessingState &state,
/// int (f ATTR)();
/// but not necessarily this:
/// int f() ATTR;
+///
+/// `Attrs` is the attribute list containing the declaration (either of the
+/// declarator or the declaration).
static void distributeTypeAttrsFromDeclarator(TypeProcessingState &state,
QualType &declSpecType) {
- // Collect all the type attributes from the declarator itself.
- assert(!state.getDeclarator().getAttributes().empty() &&
- "declarator has no attrs!");
// The called functions in this loop actually remove things from the current
// list, so iterating over the existing list isn't possible. Instead, make a
// non-owning copy and iterate over that.
@@ -1349,35 +1338,34 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
// allowed to be completely missing a declspec. This is handled in the
// parser already though by it pretending to have seen an 'int' in this
// case.
- if (S.getLangOpts().ImplicitInt) {
- // In C89 mode, we only warn if there is a completely missing declspec
- // when one is not allowed.
- if (DS.isEmpty()) {
- S.Diag(DeclLoc, diag::ext_missing_declspec)
- << DS.getSourceRange()
- << FixItHint::CreateInsertion(DS.getBeginLoc(), "int");
- }
+ if (S.getLangOpts().isImplicitIntRequired()) {
+ S.Diag(DeclLoc, diag::warn_missing_type_specifier)
+ << DS.getSourceRange()
+ << FixItHint::CreateInsertion(DS.getBeginLoc(), "int");
} else if (!DS.hasTypeSpecifier()) {
// C99 and C++ require a type specifier. For example, C99 6.7.2p2 says:
// "At least one type specifier shall be given in the declaration
// specifiers in each declaration, and in the specifier-qualifier list in
// each struct declaration and type name."
- if (S.getLangOpts().CPlusPlus && !DS.isTypeSpecPipe()) {
+ if (!S.getLangOpts().isImplicitIntAllowed() && !DS.isTypeSpecPipe()) {
S.Diag(DeclLoc, diag::err_missing_type_specifier)
- << DS.getSourceRange();
+ << DS.getSourceRange();
- // When this occurs in C++ code, often something is very broken with the
- // value being declared, poison it as invalid so we don't get chains of
+ // When this occurs, often something is very broken with the value
+ // being declared, poison it as invalid so we don't get chains of
// errors.
declarator.setInvalidType(true);
} else if (S.getLangOpts().getOpenCLCompatibleVersion() >= 200 &&
DS.isTypeSpecPipe()) {
S.Diag(DeclLoc, diag::err_missing_actual_pipe_type)
- << DS.getSourceRange();
+ << DS.getSourceRange();
declarator.setInvalidType(true);
} else {
+ assert(S.getLangOpts().isImplicitIntAllowed() &&
+ "implicit int is disabled?");
S.Diag(DeclLoc, diag::ext_missing_type_specifier)
- << DS.getSourceRange();
+ << DS.getSourceRange()
+ << FixItHint::CreateInsertion(DS.getBeginLoc(), "int");
}
}
@@ -1797,8 +1785,42 @@ static QualType ConvertDeclSpecToType(TypeProcessingState &state) {
// list of type attributes to be temporarily saved while the type
// attributes are pushed around.
// pipe attributes will be handled later ( at GetFullTypeForDeclarator )
- if (!DS.isTypeSpecPipe())
+ if (!DS.isTypeSpecPipe()) {
+ // We also apply declaration attributes that "slide" to the decl spec.
+ // Ordering can be important for attributes. The decalaration attributes
+ // come syntactically before the decl spec attributes, so we process them
+ // in that order.
+ ParsedAttributesView SlidingAttrs;
+ for (ParsedAttr &AL : declarator.getDeclarationAttributes()) {
+ if (AL.slidesFromDeclToDeclSpecLegacyBehavior()) {
+ SlidingAttrs.addAtEnd(&AL);
+
+ // For standard syntax attributes, which would normally appertain to the
+ // declaration here, suggest moving them to the type instead. But only
+ // do this for our own vendor attributes; moving other vendors'
+ // attributes might hurt portability.
+ // There's one special case that we need to deal with here: The
+ // `MatrixType` attribute may only be used in a typedef declaration. If
+ // it's being used anywhere else, don't output the warning as
+ // ProcessDeclAttributes() will output an error anyway.
+ if (AL.isStandardAttributeSyntax() && AL.isClangScope() &&
+ !(AL.getKind() == ParsedAttr::AT_MatrixType &&
+ DS.getStorageClassSpec() != DeclSpec::SCS_typedef)) {
+ S.Diag(AL.getLoc(), diag::warn_type_attribute_deprecated_on_decl)
+ << AL;
+ }
+ }
+ }
+ // During this call to processTypeAttrs(),
+ // TypeProcessingState::getCurrentAttributes() will erroneously return a
+ // reference to the DeclSpec attributes, rather than the declaration
+ // attributes. However, this doesn't matter, as getCurrentAttributes()
+ // is only called when distributing attributes from one attribute list
+ // to another. Declaration attributes are always C++11 attributes, and these
+ // are never distributed.
+ processTypeAttrs(state, Result, TAL_DeclSpec, SlidingAttrs);
processTypeAttrs(state, Result, TAL_DeclSpec, DS.getAttributes());
+ }
// Apply const/volatile/restrict qualifiers to T.
if (unsigned TypeQuals = DS.getTypeQualifiers()) {
@@ -1874,6 +1896,14 @@ static std::string getPrintableNameForEntity(DeclarationName Entity) {
return "type name";
}
+static bool isDependentOrGNUAutoType(QualType T) {
+ if (T->isDependentType())
+ return true;
+
+ const auto *AT = dyn_cast<AutoType>(T);
+ return AT && AT->isGNUAutoType();
+}
+
QualType Sema::BuildQualifiedType(QualType T, SourceLocation Loc,
Qualifiers Qs, const DeclSpec *DS) {
if (T.isNull())
@@ -1907,7 +1937,10 @@ QualType Sema::BuildQualifiedType(QualType T, SourceLocation Loc,
DiagID = diag::err_typecheck_invalid_restrict_invalid_pointee;
ProblemTy = EltTy;
}
- } else if (!T->isDependentType()) {
+ } else if (!isDependentOrGNUAutoType(T)) {
+ // For an __auto_type variable, we may not have seen the initializer yet
+ // and so have no idea whether the underlying type is a pointer type or
+ // not.
DiagID = diag::err_typecheck_invalid_restrict_not_pointer;
ProblemTy = T;
}
@@ -2125,6 +2158,11 @@ QualType Sema::BuildPointerType(QualType T,
return QualType();
}
+ if (getLangOpts().HLSL) {
+ Diag(Loc, diag::err_hlsl_pointers_unsupported) << 0;
+ return QualType();
+ }
+
if (checkQualifiedFunction(*this, T, Loc, QFK_Pointer))
return QualType();
@@ -2190,6 +2228,11 @@ QualType Sema::BuildReferenceType(QualType T, bool SpelledAsLValue,
return QualType();
}
+ if (getLangOpts().HLSL) {
+ Diag(Loc, diag::err_hlsl_pointers_unsupported) << 1;
+ return QualType();
+ }
+
if (checkQualifiedFunction(*this, T, Loc, QFK_Reference))
return QualType();
@@ -2458,6 +2501,9 @@ QualType Sema::BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
} else if (isSFINAEContext()) {
VLADiag = diag::err_vla_in_sfinae;
VLAIsError = true;
+ } else if (getLangOpts().OpenMP && isInOpenMPTaskUntiedContext()) {
+ VLADiag = diag::err_openmp_vla_in_task_untied;
+ VLAIsError = true;
} else {
VLADiag = diag::ext_vla;
VLAIsError = false;
@@ -2613,7 +2659,7 @@ QualType Sema::BuildVectorType(QualType CurType, Expr *SizeExpr,
return QualType();
}
- if (VectorSizeBits % TypeSize) {
+ if (!TypeSize || VectorSizeBits % TypeSize) {
Diag(AttrLoc, diag::err_attribute_invalid_size)
<< SizeExpr->getSourceRange();
return QualType();
@@ -2641,9 +2687,12 @@ QualType Sema::BuildExtVectorType(QualType T, Expr *ArraySize,
// reserved data type under OpenCL v2.0 s6.1.4), we don't support selects
// on bitvectors, and we have no well-defined ABI for bitvectors, so vectors
// of bool aren't allowed.
+ //
+ // We explictly allow bool elements in ext_vector_type for C/C++.
+ bool IsNoBoolVecLang = getLangOpts().OpenCL || getLangOpts().OpenCLCPlusPlus;
if ((!T->isDependentType() && !T->isIntegerType() &&
!T->isRealFloatingType()) ||
- T->isBooleanType()) {
+ (IsNoBoolVecLang && T->isBooleanType())) {
Diag(AttrLoc, diag::err_attribute_invalid_vector_type) << T;
return QualType();
}
@@ -2943,6 +2992,11 @@ QualType Sema::BuildMemberPointerType(QualType T, QualType Class,
return QualType();
}
+ if (getLangOpts().HLSL) {
+ Diag(Loc, diag::err_hlsl_pointers_unsupported) << 0;
+ return QualType();
+ }
+
// Adjust the default free function calling convention to the default method
// calling convention.
bool IsCtorOrDtor =
@@ -3373,8 +3427,10 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
break;
}
- if (!D.getAttributes().empty())
- distributeTypeAttrsFromDeclarator(state, T);
+ // Note: We don't need to distribute declaration attributes (i.e.
+ // D.getDeclarationAttributes()) because those are always C++11 attributes,
+ // and those don't get distributed.
+ distributeTypeAttrsFromDeclarator(state, T);
// Find the deduced type in this type. Look in the trailing return type if we
// have one, otherwise in the DeclSpec type.
@@ -3506,8 +3562,12 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
case DeclaratorContext::FunctionalCast:
if (isa<DeducedTemplateSpecializationType>(Deduced))
break;
+ if (SemaRef.getLangOpts().CPlusPlus2b && IsCXXAutoType &&
+ !Auto->isDecltypeAuto())
+ break; // auto(x)
LLVM_FALLTHROUGH;
case DeclaratorContext::TypeName:
+ case DeclaratorContext::Association:
Error = 15; // Generic
break;
case DeclaratorContext::File:
@@ -3618,6 +3678,7 @@ static QualType GetDeclSpecTypeForDeclarator(TypeProcessingState &state,
case DeclaratorContext::ObjCCatch:
case DeclaratorContext::TemplateArg:
case DeclaratorContext::TemplateTypeArg:
+ case DeclaratorContext::Association:
DiagID = diag::err_type_defined_in_type_specifier;
break;
case DeclaratorContext::Prototype:
@@ -4670,7 +4731,8 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
AttrList.hasAttribute(ParsedAttr::AT_CFReturnsNotRetained);
};
if (const auto *InnermostChunk = D.getInnermostNonParenChunk()) {
- if (hasCFReturnsAttr(D.getAttributes()) ||
+ if (hasCFReturnsAttr(D.getDeclarationAttributes()) ||
+ hasCFReturnsAttr(D.getAttributes()) ||
hasCFReturnsAttr(InnermostChunk->getAttrs()) ||
hasCFReturnsAttr(D.getDeclSpec().getAttributes())) {
inferNullability = NullabilityKind::Nullable;
@@ -4705,6 +4767,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
case DeclaratorContext::TypeName:
case DeclaratorContext::FunctionalCast:
case DeclaratorContext::RequiresExpr:
+ case DeclaratorContext::Association:
// Don't infer in these contexts.
break;
}
@@ -5228,8 +5291,11 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
FunctionType::ExtInfo EI(
getCCForDeclaratorChunk(S, D, DeclType.getAttrs(), FTI, chunkIndex));
- if (!FTI.NumParams && !FTI.isVariadic && !LangOpts.CPlusPlus
- && !LangOpts.OpenCL) {
+ // OpenCL disallows functions without a prototype, but it doesn't enforce
+ // strict prototypes as in C2x because it allows a function definition to
+ // have an identifier list. See OpenCL 3.0 6.11/g for more details.
+ if (!FTI.NumParams && !FTI.isVariadic &&
+ !LangOpts.requiresStrictPrototypes() && !LangOpts.OpenCL) {
// Simple void foo(), where the incoming T is the result type.
T = Context.getFunctionNoProtoType(T, EI);
} else {
@@ -5237,7 +5303,11 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
// function is marked with the "overloadable" attribute. Scan
// for this attribute now.
if (!FTI.NumParams && FTI.isVariadic && !LangOpts.CPlusPlus)
- if (!D.getAttributes().hasAttribute(ParsedAttr::AT_Overloadable))
+ if (!D.getDeclarationAttributes().hasAttribute(
+ ParsedAttr::AT_Overloadable) &&
+ !D.getAttributes().hasAttribute(ParsedAttr::AT_Overloadable) &&
+ !D.getDeclSpec().getAttributes().hasAttribute(
+ ParsedAttr::AT_Overloadable))
S.Diag(FTI.getEllipsisLoc(), diag::err_ellipsis_first_param);
if (FTI.NumParams && FTI.Params[0].Param == nullptr) {
@@ -5246,8 +5316,10 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
S.Diag(FTI.Params[0].IdentLoc,
diag::err_ident_list_in_fn_declaration);
D.setInvalidType(true);
- // Recover by creating a K&R-style function type.
- T = Context.getFunctionNoProtoType(T, EI);
+ // Recover by creating a K&R-style function type, if possible.
+ T = (!LangOpts.requiresStrictPrototypes() && !LangOpts.OpenCL)
+ ? Context.getFunctionNoProtoType(T, EI)
+ : Context.IntTy;
break;
}
@@ -5519,15 +5591,16 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
diag::warn_noderef_on_non_pointer_or_array);
// GNU warning -Wstrict-prototypes
- // Warn if a function declaration is without a prototype.
+ // Warn if a function declaration or definition is without a prototype.
// This warning is issued for all kinds of unprototyped function
// declarations (i.e. function type typedef, function pointer etc.)
// C99 6.7.5.3p14:
// The empty list in a function declarator that is not part of a definition
// of that function specifies that no information about the number or types
// of the parameters is supplied.
- if (!LangOpts.CPlusPlus &&
- D.getFunctionDefinitionKind() == FunctionDefinitionKind::Declaration) {
+ // See ActOnFinishFunctionBody() and MergeFunctionDecl() for handling of
+ // function declarations whose behavior changes in C2x.
+ if (!LangOpts.requiresStrictPrototypes()) {
bool IsBlock = false;
for (const DeclaratorChunk &DeclType : D.type_objects()) {
switch (DeclType.Kind) {
@@ -5538,8 +5611,10 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
const DeclaratorChunk::FunctionTypeInfo &FTI = DeclType.Fun;
// We suppress the warning when there's no LParen location, as this
// indicates the declaration was an implicit declaration, which gets
- // warned about separately via -Wimplicit-function-declaration.
- if (FTI.NumParams == 0 && !FTI.isVariadic && FTI.getLParenLoc().isValid())
+ // warned about separately via -Wimplicit-function-declaration. We also
+ // suppress the warning when we know the function has a prototype.
+ if (!FTI.hasPrototype && FTI.NumParams == 0 && !FTI.isVariadic &&
+ FTI.getLParenLoc().isValid())
S.Diag(DeclType.Loc, diag::warn_strict_prototypes)
<< IsBlock
<< FixItHint::CreateInsertion(FTI.getRParenLoc(), "void");
@@ -5646,7 +5721,14 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
}
}
- // Apply any undistributed attributes from the declarator.
+ // Apply any undistributed attributes from the declaration or declarator.
+ ParsedAttributesView NonSlidingAttrs;
+ for (ParsedAttr &AL : D.getDeclarationAttributes()) {
+ if (!AL.slidesFromDeclToDeclSpecLegacyBehavior()) {
+ NonSlidingAttrs.addAtEnd(&AL);
+ }
+ }
+ processTypeAttrs(state, T, TAL_DeclName, NonSlidingAttrs);
processTypeAttrs(state, T, TAL_DeclName, D.getAttributes());
// Diagnose any ignored type attributes.
@@ -5737,6 +5819,7 @@ static TypeSourceInfo *GetFullTypeForDeclarator(TypeProcessingState &state,
case DeclaratorContext::TrailingReturnVar:
case DeclaratorContext::TemplateArg:
case DeclaratorContext::TemplateTypeArg:
+ case DeclaratorContext::Association:
// FIXME: We may want to allow parameter packs in block-literal contexts
// in the future.
S.Diag(D.getEllipsisLoc(),
@@ -5907,6 +5990,9 @@ namespace {
Visit(TL.getModifiedLoc());
fillAttributedTypeLoc(TL, State);
}
+ void VisitBTFTagAttributedTypeLoc(BTFTagAttributedTypeLoc TL) {
+ Visit(TL.getWrappedLoc());
+ }
void VisitMacroQualifiedTypeLoc(MacroQualifiedTypeLoc TL) {
Visit(TL.getInnerLoc());
TL.setExpansionLoc(
@@ -6133,6 +6219,9 @@ namespace {
void VisitAttributedTypeLoc(AttributedTypeLoc TL) {
fillAttributedTypeLoc(TL, State);
}
+ void VisitBTFTagAttributedTypeLoc(BTFTagAttributedTypeLoc TL) {
+ // nothing
+ }
void VisitAdjustedTypeLoc(AdjustedTypeLoc TL) {
// nothing
}
@@ -6548,8 +6637,8 @@ static void HandleBTFTypeTagAttribute(QualType &Type, const ParsedAttr &Attr,
ASTContext &Ctx = S.Context;
StringRef BTFTypeTag = StrLiteral->getString();
- Type = State.getAttributedType(
- ::new (Ctx) BTFTypeTagAttr(Ctx, Attr, BTFTypeTag), Type, Type);
+ Type = State.getBTFTagAttributedType(
+ ::new (Ctx) BTFTypeTagAttr(Ctx, Attr, BTFTypeTag), Type);
}
/// HandleAddressSpaceTypeAttribute - Process an address_space attribute on the
@@ -7439,6 +7528,10 @@ static Attr *getCCTypeAttr(ASTContext &Ctx, ParsedAttr &Attr) {
return createSimpleAttr<VectorCallAttr>(Ctx, Attr);
case ParsedAttr::AT_AArch64VectorPcs:
return createSimpleAttr<AArch64VectorPcsAttr>(Ctx, Attr);
+ case ParsedAttr::AT_AArch64SVEPcs:
+ return createSimpleAttr<AArch64SVEPcsAttr>(Ctx, Attr);
+ case ParsedAttr::AT_AMDGPUKernelCall:
+ return createSimpleAttr<AMDGPUKernelCallAttr>(Ctx, Attr);
case ParsedAttr::AT_Pcs: {
// The attribute may have had a fixit applied where we treated an
// identifier as a string literal. The contents of the string are valid,
@@ -8090,6 +8183,34 @@ static void HandleMatrixTypeAttr(QualType &CurType, const ParsedAttr &Attr,
CurType = T;
}
+static void HandleAnnotateTypeAttr(TypeProcessingState &State,
+ QualType &CurType, const ParsedAttr &PA) {
+ Sema &S = State.getSema();
+
+ if (PA.getNumArgs() < 1) {
+ S.Diag(PA.getLoc(), diag::err_attribute_too_few_arguments) << PA << 1;
+ return;
+ }
+
+ // Make sure that there is a string literal as the annotation's first
+ // argument.
+ StringRef Str;
+ if (!S.checkStringLiteralArgumentAttr(PA, 0, Str))
+ return;
+
+ llvm::SmallVector<Expr *, 4> Args;
+ Args.reserve(PA.getNumArgs() - 1);
+ for (unsigned Idx = 1; Idx < PA.getNumArgs(); Idx++) {
+ assert(!PA.isArgIdent(Idx));
+ Args.push_back(PA.getArgAsExpr(Idx));
+ }
+ if (!S.ConstantFoldAttrArgs(PA, Args))
+ return;
+ auto *AnnotateTypeAttr =
+ AnnotateTypeAttr::Create(S.Context, Str, Args.data(), Args.size(), PA);
+ CurType = State.getAttributedType(AnnotateTypeAttr, CurType, CurType);
+}
+
static void HandleLifetimeBoundAttr(TypeProcessingState &State,
QualType &CurType,
ParsedAttr &Attr) {
@@ -8100,22 +8221,14 @@ static void HandleLifetimeBoundAttr(TypeProcessingState &State,
}
}
-static bool isAddressSpaceKind(const ParsedAttr &attr) {
- auto attrKind = attr.getKind();
-
- return attrKind == ParsedAttr::AT_AddressSpace ||
- attrKind == ParsedAttr::AT_OpenCLPrivateAddressSpace ||
- attrKind == ParsedAttr::AT_OpenCLGlobalAddressSpace ||
- attrKind == ParsedAttr::AT_OpenCLGlobalDeviceAddressSpace ||
- attrKind == ParsedAttr::AT_OpenCLGlobalHostAddressSpace ||
- attrKind == ParsedAttr::AT_OpenCLLocalAddressSpace ||
- attrKind == ParsedAttr::AT_OpenCLConstantAddressSpace ||
- attrKind == ParsedAttr::AT_OpenCLGenericAddressSpace;
-}
-
static void processTypeAttrs(TypeProcessingState &state, QualType &type,
TypeAttrLocation TAL,
- ParsedAttributesView &attrs) {
+ const ParsedAttributesView &attrs) {
+
+ state.setParsedNoDeref(false);
+ if (attrs.empty())
+ return;
+
// Scan through and apply attributes to this type where it makes sense. Some
// attributes (such as __address_space__, __vector_size__, etc) apply to the
// type, but others can be present in the type specifiers even though they
@@ -8125,9 +8238,6 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
// sure we visit every element once. Copy the attributes list, and iterate
// over that.
ParsedAttributesView AttrsCopy{attrs};
-
- state.setParsedNoDeref(false);
-
for (ParsedAttr &attr : AttrsCopy) {
// Skip attributes that were marked to be invalid.
@@ -8150,11 +8260,14 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
if (!IsTypeAttr)
continue;
}
- } else if (TAL != TAL_DeclChunk && !isAddressSpaceKind(attr)) {
+ } else if (TAL != TAL_DeclSpec && TAL != TAL_DeclChunk &&
+ !attr.isTypeAttr()) {
// Otherwise, only consider type processing for a C++11 attribute if
- // it's actually been applied to a type.
- // We also allow C++11 address_space and
- // OpenCL language address space attributes to pass through.
+ // - it has actually been applied to a type (decl-specifier-seq or
+ // declarator chunk), or
+ // - it is a type attribute, irrespective of where it was applied (so
+ // that we can support the legacy behavior of some type attributes
+ // that can be applied to the declaration name).
continue;
}
}
@@ -8172,10 +8285,14 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
break;
case ParsedAttr::UnknownAttribute:
- if (attr.isStandardAttributeSyntax() && TAL == TAL_DeclChunk)
+ if (attr.isStandardAttributeSyntax()) {
state.getSema().Diag(attr.getLoc(),
diag::warn_unknown_attribute_ignored)
<< attr << attr.getRange();
+ // Mark the attribute as invalid so we don't emit the same diagnostic
+ // multiple times.
+ attr.setInvalid();
+ }
break;
case ParsedAttr::IgnoredAttribute:
@@ -8244,6 +8361,15 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
break;
case ParsedAttr::AT_NoDeref: {
+ // FIXME: `noderef` currently doesn't work correctly in [[]] syntax.
+ // See https://github.com/llvm/llvm-project/issues/55790 for details.
+ // For the time being, we simply emit a warning that the attribute is
+ // ignored.
+ if (attr.isStandardAttributeSyntax()) {
+ state.getSema().Diag(attr.getLoc(), diag::warn_attribute_ignored)
+ << attr;
+ break;
+ }
ASTContext &Ctx = state.getSema().Context;
type = state.getAttributedType(createSimpleAttr<NoDerefAttr>(Ctx, attr),
type, type);
@@ -8321,6 +8447,16 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
FUNCTION_TYPE_ATTRS_CASELIST:
attr.setUsedAsTypeAttr();
+ // Attributes with standard syntax have strict rules for what they
+ // appertain to and hence should not use the "distribution" logic below.
+ if (attr.isStandardAttributeSyntax()) {
+ if (!handleFunctionTypeAttr(state, attr, type)) {
+ diagnoseBadTypeAttribute(state.getSema(), attr, type);
+ attr.setInvalid();
+ }
+ break;
+ }
+
// Never process function type attributes as part of the
// declaration-specifiers.
if (TAL == TAL_DeclSpec)
@@ -8351,6 +8487,11 @@ static void processTypeAttrs(TypeProcessingState &state, QualType &type,
attr.setUsedAsTypeAttr();
break;
}
+ case ParsedAttr::AT_AnnotateType: {
+ HandleAnnotateTypeAttr(state, type, attr);
+ attr.setUsedAsTypeAttr();
+ break;
+ }
}
// Handle attributes that are defined in a macro. We do not want this to be
@@ -8501,17 +8642,8 @@ bool Sema::hasStructuralCompatLayout(Decl *D, Decl *Suggested) {
return Ctx.IsEquivalent(D, Suggested);
}
-/// Determine whether there is any declaration of \p D that was ever a
-/// definition (perhaps before module merging) and is currently visible.
-/// \param D The definition of the entity.
-/// \param Suggested Filled in with the declaration that should be made visible
-/// in order to provide a definition of this entity.
-/// \param OnlyNeedComplete If \c true, we only need the type to be complete,
-/// not defined. This only matters for enums with a fixed underlying
-/// type, since in all other cases, a type is complete if and only if it
-/// is defined.
-bool Sema::hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
- bool OnlyNeedComplete) {
+bool Sema::hasAcceptableDefinition(NamedDecl *D, NamedDecl **Suggested,
+ AcceptableKind Kind, bool OnlyNeedComplete) {
// Easy case: if we don't have modules, all declarations are visible.
if (!getLangOpts().Modules && !getLangOpts().ModulesLocalVisibility)
return true;
@@ -8555,13 +8687,14 @@ bool Sema::hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
VD = Pattern;
D = VD->getDefinition();
}
+
assert(D && "missing definition for pattern of instantiated definition");
*Suggested = D;
- auto DefinitionIsVisible = [&] {
+ auto DefinitionIsAcceptable = [&] {
// The (primary) definition might be in a visible module.
- if (isVisible(D))
+ if (isAcceptable(D, Kind))
return true;
// A visible module might have a merged definition instead.
@@ -8579,19 +8712,51 @@ bool Sema::hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
return false;
};
- if (DefinitionIsVisible())
+ if (DefinitionIsAcceptable())
return true;
// The external source may have additional definitions of this entity that are
// visible, so complete the redeclaration chain now and ask again.
if (auto *Source = Context.getExternalSource()) {
Source->CompleteRedeclChain(D);
- return DefinitionIsVisible();
+ return DefinitionIsAcceptable();
}
return false;
}
+/// Determine whether there is any declaration of \p D that was ever a
+/// definition (perhaps before module merging) and is currently visible.
+/// \param D The definition of the entity.
+/// \param Suggested Filled in with the declaration that should be made visible
+/// in order to provide a definition of this entity.
+/// \param OnlyNeedComplete If \c true, we only need the type to be complete,
+/// not defined. This only matters for enums with a fixed underlying
+/// type, since in all other cases, a type is complete if and only if it
+/// is defined.
+bool Sema::hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
+ bool OnlyNeedComplete) {
+ return hasAcceptableDefinition(D, Suggested, Sema::AcceptableKind::Visible,
+ OnlyNeedComplete);
+}
+
+/// Determine whether there is any declaration of \p D that was ever a
+/// definition (perhaps before module merging) and is currently
+/// reachable.
+/// \param D The definition of the entity.
+/// \param Suggested Filled in with the declaration that should be made
+/// reachable
+/// in order to provide a definition of this entity.
+/// \param OnlyNeedComplete If \c true, we only need the type to be complete,
+/// not defined. This only matters for enums with a fixed underlying
+/// type, since in all other cases, a type is complete if and only if it
+/// is defined.
+bool Sema::hasReachableDefinition(NamedDecl *D, NamedDecl **Suggested,
+ bool OnlyNeedComplete) {
+ return hasAcceptableDefinition(D, Suggested, Sema::AcceptableKind::Reachable,
+ OnlyNeedComplete);
+}
+
/// Locks in the inheritance model for the given class and all of its bases.
static void assignInheritanceModel(Sema &S, CXXRecordDecl *RD) {
RD = RD->getMostRecentNonInjectedDecl();
@@ -8661,20 +8826,19 @@ bool Sema::RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
// Check that any necessary explicit specializations are visible. For an
// enum, we just need the declaration, so don't check this.
if (Def && !isa<EnumDecl>(Def))
- checkSpecializationVisibility(Loc, Def);
+ checkSpecializationReachability(Loc, Def);
// If we have a complete type, we're done.
if (!Incomplete) {
- // If we know about the definition but it is not visible, complain.
- NamedDecl *SuggestedDef = nullptr;
+ NamedDecl *Suggested = nullptr;
if (Def &&
- !hasVisibleDefinition(Def, &SuggestedDef, /*OnlyNeedComplete*/true)) {
+ !hasReachableDefinition(Def, &Suggested, /*OnlyNeedComplete=*/true)) {
// If the user is going to see an error here, recover by making the
// definition visible.
bool TreatAsComplete = Diagnoser && !isSFINAEContext();
- if (Diagnoser && SuggestedDef)
- diagnoseMissingImport(Loc, SuggestedDef, MissingImportKind::Definition,
- /*Recover*/TreatAsComplete);
+ if (Diagnoser && Suggested)
+ diagnoseMissingImport(Loc, Suggested, MissingImportKind::Definition,
+ /*Recover*/ TreatAsComplete);
return !TreatAsComplete;
} else if (Def && !TemplateInstCallbacks.empty()) {
CodeSynthesisContext TempInst;
@@ -9078,7 +9242,7 @@ QualType Sema::BuildUnaryTransformType(QualType BaseType,
}
QualType Sema::BuildAtomicType(QualType T, SourceLocation Loc) {
- if (!T->isDependentType()) {
+ if (!isDependentOrGNUAutoType(T)) {
// FIXME: It isn't entirely clear whether incomplete atomic types
// are allowed or not; for simplicity, ban them for the moment.
if (RequireCompleteType(Loc, T, diag::err_atomic_specifier_bad_type, 0))
diff --git a/contrib/llvm-project/clang/lib/Sema/TreeTransform.h b/contrib/llvm-project/clang/lib/Sema/TreeTransform.h
index 5c37fcaaea13..a8589191fc91 100644
--- a/contrib/llvm-project/clang/lib/Sema/TreeTransform.h
+++ b/contrib/llvm-project/clang/lib/Sema/TreeTransform.h
@@ -1470,9 +1470,28 @@ public:
///
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
- ExprResult RebuildCoawaitExpr(SourceLocation CoawaitLoc, Expr *Result,
+ ExprResult RebuildCoawaitExpr(SourceLocation CoawaitLoc, Expr *Operand,
+ UnresolvedLookupExpr *OpCoawaitLookup,
bool IsImplicit) {
- return getSema().BuildResolvedCoawaitExpr(CoawaitLoc, Result, IsImplicit);
+ // This function rebuilds a coawait-expr given its operator.
+ // For an explicit coawait-expr, the rebuild involves the full set
+ // of transformations performed by BuildUnresolvedCoawaitExpr(),
+ // including calling await_transform().
+ // For an implicit coawait-expr, we need to rebuild the "operator
+ // coawait" but not await_transform(), so use BuildResolvedCoawaitExpr().
+ // This mirrors how the implicit CoawaitExpr is originally created
+ // in Sema::ActOnCoroutineBodyStart().
+ if (IsImplicit) {
+ ExprResult Suspend = getSema().BuildOperatorCoawaitCall(
+ CoawaitLoc, Operand, OpCoawaitLookup);
+ if (Suspend.isInvalid())
+ return ExprError();
+ return getSema().BuildResolvedCoawaitExpr(CoawaitLoc, Operand,
+ Suspend.get(), true);
+ }
+
+ return getSema().BuildUnresolvedCoawaitExpr(CoawaitLoc, Operand,
+ OpCoawaitLookup);
}
/// Build a new co_await expression.
@@ -1900,14 +1919,13 @@ public:
///
/// By default, performs semantic analysis to build the new OpenMP clause.
/// Subclasses may override this routine to provide different behavior.
- OMPClause *
- RebuildOMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind,
- SourceLocation DepLoc, SourceLocation ColonLoc,
- ArrayRef<Expr *> VarList, SourceLocation StartLoc,
- SourceLocation LParenLoc, SourceLocation EndLoc) {
- return getSema().ActOnOpenMPDependClause(DepModifier, DepKind, DepLoc,
- ColonLoc, VarList, StartLoc,
- LParenLoc, EndLoc);
+ OMPClause *RebuildOMPDependClause(OMPDependClause::DependDataTy Data,
+ Expr *DepModifier, ArrayRef<Expr *> VarList,
+ SourceLocation StartLoc,
+ SourceLocation LParenLoc,
+ SourceLocation EndLoc) {
+ return getSema().ActOnOpenMPDependClause(Data, DepModifier, VarList,
+ StartLoc, LParenLoc, EndLoc);
}
/// Build a new OpenMP 'device' clause.
@@ -2101,6 +2119,15 @@ public:
return getSema().ActOnOpenMPIsDevicePtrClause(VarList, Locs);
}
+ /// Build a new OpenMP 'has_device_addr' clause.
+ ///
+ /// By default, performs semantic analysis to build the new OpenMP clause.
+ /// Subclasses may override this routine to provide different behavior.
+ OMPClause *RebuildOMPHasDeviceAddrClause(ArrayRef<Expr *> VarList,
+ const OMPVarListLocTy &Locs) {
+ return getSema().ActOnOpenMPHasDeviceAddrClause(VarList, Locs);
+ }
+
/// Build a new OpenMP 'defaultmap' clause.
///
/// By default, performs semantic analysis to build the new OpenMP clause.
@@ -2629,6 +2656,13 @@ public:
/*Scope=*/nullptr, Callee, LParenLoc, Args, RParenLoc, ExecConfig);
}
+ ExprResult RebuildCxxSubscriptExpr(Expr *Callee, SourceLocation LParenLoc,
+ MultiExprArg Args,
+ SourceLocation RParenLoc) {
+ return getSema().ActOnArraySubscriptExpr(
+ /*Scope=*/nullptr, Callee, LParenLoc, Args, RParenLoc);
+ }
+
/// Build a new member access expression.
///
/// By default, performs semantic analysis to build the new expression.
@@ -3379,10 +3413,11 @@ public:
/// By default, performs semantic analysis to build the new expression.
/// Subclasses may override this routine to provide different behavior.
ExprResult RebuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
- SourceLocation BuiltinLoc,
+ QualType ResultTy, SourceLocation BuiltinLoc,
SourceLocation RPLoc,
DeclContext *ParentContext) {
- return getSema().BuildSourceLocExpr(Kind, BuiltinLoc, RPLoc, ParentContext);
+ return getSema().BuildSourceLocExpr(Kind, ResultTy, BuiltinLoc, RPLoc,
+ ParentContext);
}
/// Build a new Objective-C boxed expression.
@@ -4263,7 +4298,7 @@ TreeTransform<Derived>::TransformTemplateName(CXXScopeSpec &SS,
NamedDecl *FirstQualifierInScope,
bool AllowInjectedClassName) {
if (QualifiedTemplateName *QTN = Name.getAsQualifiedTemplateName()) {
- TemplateDecl *Template = QTN->getTemplateDecl();
+ TemplateDecl *Template = QTN->getUnderlyingTemplate().getAsTemplateDecl();
assert(Template && "qualified template name must refer to a template");
TemplateDecl *TransTemplate
@@ -6858,6 +6893,13 @@ QualType TreeTransform<Derived>::TransformAttributedType(
return result;
}
+template <typename Derived>
+QualType TreeTransform<Derived>::TransformBTFTagAttributedType(
+ TypeLocBuilder &TLB, BTFTagAttributedTypeLoc TL) {
+ // The BTFTagAttributedType is available for C only.
+ llvm_unreachable("Unexpected TreeTransform for BTFTagAttributedType");
+}
+
template<typename Derived>
QualType
TreeTransform<Derived>::TransformParenType(TypeLocBuilder &TLB,
@@ -7896,12 +7938,6 @@ TreeTransform<Derived>::TransformCoroutineBodyStmt(CoroutineBodyStmt *S) {
return StmtError();
Builder.Deallocate = DeallocRes.get();
- assert(S->getResultDecl() && "ResultDecl must already be built");
- StmtResult ResultDecl = getDerived().TransformStmt(S->getResultDecl());
- if (ResultDecl.isInvalid())
- return StmtError();
- Builder.ResultDecl = ResultDecl.get();
-
if (auto *ReturnStmt = S->getReturnStmt()) {
StmtResult Res = getDerived().TransformStmt(ReturnStmt);
if (Res.isInvalid())
@@ -7927,18 +7963,27 @@ TreeTransform<Derived>::TransformCoreturnStmt(CoreturnStmt *S) {
S->isImplicit());
}
-template<typename Derived>
-ExprResult
-TreeTransform<Derived>::TransformCoawaitExpr(CoawaitExpr *E) {
- ExprResult Result = getDerived().TransformInitializer(E->getOperand(),
- /*NotCopyInit*/false);
- if (Result.isInvalid())
+template <typename Derived>
+ExprResult TreeTransform<Derived>::TransformCoawaitExpr(CoawaitExpr *E) {
+ ExprResult Operand = getDerived().TransformInitializer(E->getOperand(),
+ /*NotCopyInit*/ false);
+ if (Operand.isInvalid())
return ExprError();
+ // Rebuild the common-expr from the operand rather than transforming it
+ // separately.
+
+ // FIXME: getCurScope() should not be used during template instantiation.
+ // We should pick up the set of unqualified lookup results for operator
+ // co_await during the initial parse.
+ ExprResult Lookup = getSema().BuildOperatorCoawaitLookupExpr(
+ getSema().getCurScope(), E->getKeywordLoc());
+
// Always rebuild; we don't know if this needs to be injected into a new
// context or if the promise type has changed.
- return getDerived().RebuildCoawaitExpr(E->getKeywordLoc(), Result.get(),
- E->isImplicit());
+ return getDerived().RebuildCoawaitExpr(
+ E->getKeywordLoc(), Operand.get(),
+ cast<UnresolvedLookupExpr>(Lookup.get()), E->isImplicit());
}
template <typename Derived>
@@ -8728,6 +8773,17 @@ StmtResult TreeTransform<Derived>::TransformOMPParallelMasterDirective(
}
template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformOMPParallelMaskedDirective(
+ OMPParallelMaskedDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_parallel_masked, DirName,
+ nullptr, D->getBeginLoc());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPParallelSectionsDirective(
OMPParallelSectionsDirective *D) {
DeclarationNameInfo DirName;
@@ -8992,6 +9048,17 @@ StmtResult TreeTransform<Derived>::TransformOMPMasterTaskLoopDirective(
}
template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformOMPMaskedTaskLoopDirective(
+ OMPMaskedTaskLoopDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_masked_taskloop, DirName,
+ nullptr, D->getBeginLoc());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPMasterTaskLoopSimdDirective(
OMPMasterTaskLoopSimdDirective *D) {
DeclarationNameInfo DirName;
@@ -9003,6 +9070,17 @@ StmtResult TreeTransform<Derived>::TransformOMPMasterTaskLoopSimdDirective(
}
template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformOMPMaskedTaskLoopSimdDirective(
+ OMPMaskedTaskLoopSimdDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_masked_taskloop_simd, DirName,
+ nullptr, D->getBeginLoc());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPParallelMasterTaskLoopDirective(
OMPParallelMasterTaskLoopDirective *D) {
DeclarationNameInfo DirName;
@@ -9014,6 +9092,17 @@ StmtResult TreeTransform<Derived>::TransformOMPParallelMasterTaskLoopDirective(
}
template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformOMPParallelMaskedTaskLoopDirective(
+ OMPParallelMaskedTaskLoopDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(
+ OMPD_parallel_masked_taskloop, DirName, nullptr, D->getBeginLoc());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
StmtResult
TreeTransform<Derived>::TransformOMPParallelMasterTaskLoopSimdDirective(
OMPParallelMasterTaskLoopSimdDirective *D) {
@@ -9026,6 +9115,18 @@ TreeTransform<Derived>::TransformOMPParallelMasterTaskLoopSimdDirective(
}
template <typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPParallelMaskedTaskLoopSimdDirective(
+ OMPParallelMaskedTaskLoopSimdDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(
+ OMPD_parallel_masked_taskloop_simd, DirName, nullptr, D->getBeginLoc());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
StmtResult TreeTransform<Derived>::TransformOMPDistributeDirective(
OMPDistributeDirective *D) {
DeclarationNameInfo DirName;
@@ -9241,6 +9342,51 @@ StmtResult TreeTransform<Derived>::TransformOMPGenericLoopDirective(
return Res;
}
+template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformOMPTeamsGenericLoopDirective(
+ OMPTeamsGenericLoopDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_teams_loop, DirName, nullptr,
+ D->getBeginLoc());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformOMPTargetTeamsGenericLoopDirective(
+ OMPTargetTeamsGenericLoopDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_target_teams_loop, DirName,
+ nullptr, D->getBeginLoc());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult TreeTransform<Derived>::TransformOMPParallelGenericLoopDirective(
+ OMPParallelGenericLoopDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_parallel_loop, DirName,
+ nullptr, D->getBeginLoc());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
+template <typename Derived>
+StmtResult
+TreeTransform<Derived>::TransformOMPTargetParallelGenericLoopDirective(
+ OMPTargetParallelGenericLoopDirective *D) {
+ DeclarationNameInfo DirName;
+ getDerived().getSema().StartOpenMPDSABlock(OMPD_target_parallel_loop, DirName,
+ nullptr, D->getBeginLoc());
+ StmtResult Res = getDerived().TransformOMPExecutableDirective(D);
+ getDerived().getSema().EndOpenMPDSABlock(Res.get());
+ return Res;
+}
+
//===----------------------------------------------------------------------===//
// OpenMP clause transformation
//===----------------------------------------------------------------------===//
@@ -9949,9 +10095,9 @@ TreeTransform<Derived>::TransformOMPDependClause(OMPDependClause *C) {
Vars.push_back(EVar.get());
}
return getDerived().RebuildOMPDependClause(
- DepModifier, C->getDependencyKind(), C->getDependencyLoc(),
- C->getColonLoc(), Vars, C->getBeginLoc(), C->getLParenLoc(),
- C->getEndLoc());
+ {C->getDependencyKind(), C->getDependencyLoc(), C->getColonLoc(),
+ C->getOmpAllMemoryLoc()},
+ DepModifier, Vars, C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
}
template <typename Derived>
@@ -10217,6 +10363,21 @@ TreeTransform<Derived>::TransformOMPIsDevicePtrClause(OMPIsDevicePtrClause *C) {
}
template <typename Derived>
+OMPClause *TreeTransform<Derived>::TransformOMPHasDeviceAddrClause(
+ OMPHasDeviceAddrClause *C) {
+ llvm::SmallVector<Expr *, 16> Vars;
+ Vars.reserve(C->varlist_size());
+ for (auto *VE : C->varlists()) {
+ ExprResult EVar = getDerived().TransformExpr(cast<Expr>(VE));
+ if (EVar.isInvalid())
+ return nullptr;
+ Vars.push_back(EVar.get());
+ }
+ OMPVarListLocTy Locs(C->getBeginLoc(), C->getLParenLoc(), C->getEndLoc());
+ return getDerived().RebuildOMPHasDeviceAddrClause(Vars, Locs);
+}
+
+template <typename Derived>
OMPClause *
TreeTransform<Derived>::TransformOMPNontemporalClause(OMPNontemporalClause *C) {
llvm::SmallVector<Expr *, 16> Vars;
@@ -10460,9 +10621,7 @@ TreeTransform<Derived>::TransformCharacterLiteral(CharacterLiteral *E) {
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformUserDefinedLiteral(UserDefinedLiteral *E) {
- if (FunctionDecl *FD = E->getDirectCallee())
- SemaRef.MarkFunctionReferenced(E->getBeginLoc(), FD);
- return SemaRef.MaybeBindToTemporary(E);
+ return getDerived().TransformCallExpr(E);
}
template<typename Derived>
@@ -10961,11 +11120,15 @@ TreeTransform<Derived>::TransformMemberExpr(MemberExpr *E) {
FoundDecl == E->getFoundDecl() &&
!E->hasExplicitTemplateArgs()) {
- // Mark it referenced in the new context regardless.
- // FIXME: this is a bit instantiation-specific.
- SemaRef.MarkMemberReferenced(E);
-
- return E;
+ // Skip for member expression of (this->f), rebuilt thisi->f is needed
+ // for Openmp where the field need to be privatizized in the case.
+ if (!(isa<CXXThisExpr>(E->getBase()) &&
+ getSema().isOpenMPRebuildMemberExpr(cast<ValueDecl>(Member)))) {
+ // Mark it referenced in the new context regardless.
+ // FIXME: this is a bit instantiation-specific.
+ SemaRef.MarkMemberReferenced(E);
+ return E;
+ }
}
TemplateArgumentListInfo TransArgs;
@@ -11507,6 +11670,7 @@ TreeTransform<Derived>::TransformCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
case OO_Array_Delete:
llvm_unreachable("new and delete operators cannot use CXXOperatorCallExpr");
+ case OO_Subscript:
case OO_Call: {
// This is a call to an object's operator().
assert(E->getNumArgs() >= 1 && "Object call is missing arguments");
@@ -11526,17 +11690,20 @@ TreeTransform<Derived>::TransformCXXOperatorCallExpr(CXXOperatorCallExpr *E) {
Args))
return ExprError();
+ if (E->getOperator() == OO_Subscript)
+ return getDerived().RebuildCxxSubscriptExpr(Object.get(), FakeLParenLoc,
+ Args, E->getEndLoc());
+
return getDerived().RebuildCallExpr(Object.get(), FakeLParenLoc, Args,
E->getEndLoc());
}
-#define OVERLOADED_OPERATOR(Name,Spelling,Token,Unary,Binary,MemberOnly) \
- case OO_##Name:
+#define OVERLOADED_OPERATOR(Name, Spelling, Token, Unary, Binary, MemberOnly) \
+ case OO_##Name: \
+ break;
+
#define OVERLOADED_OPERATOR_MULTI(Name,Spelling,Unary,Binary,MemberOnly)
#include "clang/Basic/OperatorKinds.def"
- case OO_Subscript:
- // Handled below.
- break;
case OO_Conditional:
llvm_unreachable("conditional operator is not actually overloadable");
@@ -11598,8 +11765,8 @@ ExprResult TreeTransform<Derived>::TransformSourceLocExpr(SourceLocExpr *E) {
if (!getDerived().AlwaysRebuild() && !NeedRebuildFunc)
return E;
- return getDerived().RebuildSourceLocExpr(E->getIdentKind(), E->getBeginLoc(),
- E->getEndLoc(),
+ return getDerived().RebuildSourceLocExpr(E->getIdentKind(), E->getType(),
+ E->getBeginLoc(), E->getEndLoc(),
getSema().CurContext);
}
@@ -11907,9 +12074,9 @@ TreeTransform<Derived>::TransformCXXNewExpr(CXXNewExpr *E) {
// Transform the size of the array we're allocating (if any).
Optional<Expr *> ArraySize;
- if (Optional<Expr *> OldArraySize = E->getArraySize()) {
+ if (E->isArray()) {
ExprResult NewArraySize;
- if (*OldArraySize) {
+ if (Optional<Expr *> OldArraySize = E->getArraySize()) {
NewArraySize = getDerived().TransformExpr(*OldArraySize);
if (NewArraySize.isInvalid())
return ExprError();
@@ -12516,8 +12683,7 @@ TreeTransform<Derived>::TransformExprRequirement(concepts::ExprRequirement *Req)
return nullptr;
TransRetReq.emplace(TPL);
}
- assert(TransRetReq.hasValue() &&
- "All code paths leading here must set TransRetReq");
+ assert(TransRetReq && "All code paths leading here must set TransRetReq");
if (Expr *E = TransExpr.dyn_cast<Expr *>())
return getDerived().RebuildExprRequirement(E, Req->isSimple(),
Req->getNoexceptLoc(),
@@ -12743,6 +12909,9 @@ ExprResult TreeTransform<Derived>::TransformCXXInheritedCtorInitExpr(
template<typename Derived>
ExprResult
TreeTransform<Derived>::TransformCXXBindTemporaryExpr(CXXBindTemporaryExpr *E) {
+ if (auto *Dtor = E->getTemporary()->getDestructor())
+ SemaRef.MarkFunctionReferenced(E->getBeginLoc(),
+ const_cast<CXXDestructorDecl *>(Dtor));
return getDerived().TransformExpr(E->getSubExpr());
}
@@ -12922,14 +13091,24 @@ TreeTransform<Derived>::TransformLambdaExpr(LambdaExpr *E) {
NewTrailingRequiresClause = getDerived().TransformExpr(TRC);
// Create the local class that will describe the lambda.
- // FIXME: KnownDependent below is wrong when substituting inside a templated
- // context that isn't a DeclContext (such as a variable template).
+
+ // FIXME: DependencyKind below is wrong when substituting inside a templated
+ // context that isn't a DeclContext (such as a variable template), or when
+ // substituting an unevaluated lambda inside of a function's parameter's type
+ // - as parameter types are not instantiated from within a function's DC. We
+ // use isUnevaluatedContext() to distinguish the function parameter case.
+ CXXRecordDecl::LambdaDependencyKind DependencyKind =
+ CXXRecordDecl::LDK_Unknown;
+ if (getSema().isUnevaluatedContext() &&
+ (getSema().CurContext->isFileContext() ||
+ !getSema().CurContext->getParent()->isDependentContext()))
+ DependencyKind = CXXRecordDecl::LDK_NeverDependent;
+
CXXRecordDecl *OldClass = E->getLambdaClass();
- CXXRecordDecl *Class
- = getSema().createLambdaClosureType(E->getIntroducerRange(),
- NewCallOpTSI,
- /*KnownDependent=*/false,
- E->getCaptureDefault());
+ CXXRecordDecl *Class =
+ getSema().createLambdaClosureType(E->getIntroducerRange(), NewCallOpTSI,
+ DependencyKind, E->getCaptureDefault());
+
getDerived().transformedLocalDecl(OldClass, {Class});
Optional<std::tuple<bool, unsigned, unsigned, Decl *>> Mangling;
@@ -14621,7 +14800,7 @@ TreeTransform<Derived>::RebuildTemplateName(CXXScopeSpec &SS,
bool TemplateKW,
TemplateDecl *Template) {
return SemaRef.Context.getQualifiedTemplateName(SS.getScopeRep(), TemplateKW,
- Template);
+ TemplateName(Template));
}
template<typename Derived>
@@ -14697,6 +14876,10 @@ TreeTransform<Derived>::RebuildCXXOperatorCallExpr(OverloadedOperatorKind Op,
return getSema().CreateBuiltinArraySubscriptExpr(
First, Callee->getBeginLoc(), Second, OpLoc);
} else if (Op == OO_Arrow) {
+ // It is possible that the type refers to a RecoveryExpr created earlier
+ // in the tree transformation.
+ if (First->getType()->isDependentType())
+ return ExprError();
// -> is never a builtin operation.
return SemaRef.BuildOverloadedArrowExpr(nullptr, First, OpLoc);
} else if (Second == nullptr || isPostIncDec) {
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTCommon.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTCommon.cpp
index c60f87a23985..26b722b6b14a 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTCommon.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTCommon.cpp
@@ -391,6 +391,7 @@ bool serialization::isRedeclarableDeclKind(unsigned Kind) {
case Decl::Field:
case Decl::MSProperty:
case Decl::MSGuid:
+ case Decl::UnnamedGlobalConstant:
case Decl::TemplateParamObject:
case Decl::ObjCIvar:
case Decl::ObjCAtDefsField:
@@ -477,7 +478,9 @@ bool serialization::needsAnonymousDeclarationNumber(const NamedDecl *D) {
// Otherwise, we only care about anonymous class members / block-scope decls.
// FIXME: We need to handle lambdas and blocks within inline / templated
// variables too.
- if (D->getDeclName() || !isa<RecordDecl>(D->getLexicalDeclContext()))
+ if (D->getDeclName())
+ return false;
+ if (!isa<RecordDecl, ObjCInterfaceDecl>(D->getLexicalDeclContext()))
return false;
return isa<TagDecl>(D) || isa<FieldDecl>(D);
}
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTReader.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTReader.cpp
index d806fb9e1949..d853805bf97e 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTReader.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTReader.cpp
@@ -15,6 +15,7 @@
#include "clang/AST/ASTConsumer.h"
#include "clang/AST/ASTContext.h"
#include "clang/AST/ASTMutationListener.h"
+#include "clang/AST/ASTStructuralEquivalence.h"
#include "clang/AST/ASTUnresolvedSet.h"
#include "clang/AST/AbstractTypeReader.h"
#include "clang/AST/Decl.h"
@@ -42,6 +43,7 @@
#include "clang/Basic/Diagnostic.h"
#include "clang/Basic/DiagnosticError.h"
#include "clang/Basic/DiagnosticOptions.h"
+#include "clang/Basic/DiagnosticSema.h"
#include "clang/Basic/ExceptionSpecificationType.h"
#include "clang/Basic/FileManager.h"
#include "clang/Basic/FileSystemOptions.h"
@@ -312,7 +314,7 @@ static bool checkLanguageOptions(const LangOptions &LangOpts,
#define BENIGN_LANGOPT(Name, Bits, Default, Description)
#define BENIGN_ENUM_LANGOPT(Name, Type, Bits, Default, Description)
-#define BENIGN_VALUE_LANGOPT(Name, Type, Bits, Default, Description)
+#define BENIGN_VALUE_LANGOPT(Name, Bits, Default, Description)
#include "clang/Basic/LangOptions.def"
if (ExistingLangOpts.ModuleFeatures != LangOpts.ModuleFeatures) {
@@ -1587,14 +1589,13 @@ bool ASTReader::ReadSLocEntry(int ID) {
}
case SM_SLOC_EXPANSION_ENTRY: {
- SourceLocation SpellingLoc = ReadSourceLocation(*F, Record[1]);
- SourceMgr.createExpansionLoc(SpellingLoc,
- ReadSourceLocation(*F, Record[2]),
- ReadSourceLocation(*F, Record[3]),
- Record[5],
- Record[4],
- ID,
- BaseOffset + Record[0]);
+ LocSeq::State Seq;
+ SourceLocation SpellingLoc = ReadSourceLocation(*F, Record[1], Seq);
+ SourceLocation ExpansionBegin = ReadSourceLocation(*F, Record[2], Seq);
+ SourceLocation ExpansionEnd = ReadSourceLocation(*F, Record[3], Seq);
+ SourceMgr.createExpansionLoc(SpellingLoc, ExpansionBegin, ExpansionEnd,
+ Record[5], Record[4], ID,
+ BaseOffset + Record[0]);
break;
}
}
@@ -1694,6 +1695,7 @@ MacroInfo *ASTReader::ReadMacroRecord(ModuleFile &F, uint64_t Offset) {
RecordData Record;
SmallVector<IdentifierInfo*, 16> MacroParams;
MacroInfo *Macro = nullptr;
+ llvm::MutableArrayRef<Token> MacroTokens;
while (true) {
// Advance to the next record, but if we get to the end of the block, don't
@@ -1748,7 +1750,8 @@ MacroInfo *ASTReader::ReadMacroRecord(ModuleFile &F, uint64_t Offset) {
MI->setDefinitionEndLoc(ReadSourceLocation(F, Record, NextIndex));
MI->setIsUsed(Record[NextIndex++]);
MI->setUsedForHeaderGuard(Record[NextIndex++]);
-
+ MacroTokens = MI->allocateTokens(Record[NextIndex++],
+ PP.getPreprocessorAllocator());
if (RecType == PP_MACRO_FUNCTION_LIKE) {
// Decode function-like macro info.
bool isC99VarArgs = Record[NextIndex++];
@@ -1793,10 +1796,14 @@ MacroInfo *ASTReader::ReadMacroRecord(ModuleFile &F, uint64_t Offset) {
// If we see a TOKEN before a PP_MACRO_*, then the file is
// erroneous, just pretend we didn't see this.
if (!Macro) break;
+ if (MacroTokens.empty()) {
+ Error("unexpected number of macro tokens for a macro in AST file");
+ return Macro;
+ }
unsigned Idx = 0;
- Token Tok = ReadToken(F, Record, Idx);
- Macro->AddTokenToBody(Tok);
+ MacroTokens[0] = ReadToken(F, Record, Idx);
+ MacroTokens = MacroTokens.drop_front();
break;
}
}
@@ -2219,7 +2226,7 @@ bool ASTReader::shouldDisableValidationForFile(
// If a PCH is loaded and validation is disabled for PCH then disable
// validation for the PCH and the modules it loads.
- ModuleKind K = CurrentDeserializingModuleKind.getValueOr(M.Kind);
+ ModuleKind K = CurrentDeserializingModuleKind.value_or(M.Kind);
switch (K) {
case MK_MainFile:
@@ -2433,8 +2440,8 @@ InputFile ASTReader::getInputFile(ModuleFile &F, unsigned ID, bool Complain) {
<< Filename << moduleKindForDiagnostic(ImportStack.back()->Kind)
<< TopLevelPCHName << FileChange.Kind
<< (FileChange.Old && FileChange.New)
- << llvm::itostr(FileChange.Old.getValueOr(0))
- << llvm::itostr(FileChange.New.getValueOr(0));
+ << llvm::itostr(FileChange.Old.value_or(0))
+ << llvm::itostr(FileChange.New.value_or(0));
// Print the import stack.
if (ImportStack.size() > 1) {
@@ -2890,6 +2897,7 @@ ASTReader::ReadControlBlock(ModuleFile &F,
case ORIGINAL_PCH_DIR:
F.OriginalDir = std::string(Blob);
+ ResolveImportedPath(F, F.OriginalDir);
break;
case MODULE_NAME:
@@ -3103,6 +3111,7 @@ llvm::Error ASTReader::ReadASTBlock(ModuleFile &F,
case IDENTIFIER_OFFSET:
case INTERESTING_IDENTIFIERS:
case STATISTICS:
+ case PP_ASSUME_NONNULL_LOC:
case PP_CONDITIONAL_STACK:
case PP_COUNTER_VALUE:
case SOURCE_LOCATION_OFFSETS:
@@ -3301,7 +3310,7 @@ llvm::Error ASTReader::ReadASTBlock(ModuleFile &F,
break;
case WEAK_UNDECLARED_IDENTIFIERS:
- if (Record.size() % 4 != 0)
+ if (Record.size() % 3 != 0)
return llvm::createStringError(std::errc::illegal_byte_sequence,
"invalid weak identifiers record");
@@ -3316,8 +3325,7 @@ llvm::Error ASTReader::ReadASTBlock(ModuleFile &F,
WeakUndeclaredIdentifiers.push_back(
getGlobalIdentifierID(F, Record[I++]));
WeakUndeclaredIdentifiers.push_back(
- ReadSourceLocation(F, Record, I).getRawEncoding());
- WeakUndeclaredIdentifiers.push_back(Record[I++]);
+ ReadSourceLocation(F, Record, I).getRawEncoding());
}
break;
@@ -3365,6 +3373,14 @@ llvm::Error ASTReader::ReadASTBlock(ModuleFile &F,
}
break;
+ case PP_ASSUME_NONNULL_LOC: {
+ unsigned Idx = 0;
+ if (!Record.empty())
+ PP.setPreambleRecordedPragmaAssumeNonNullLoc(
+ ReadSourceLocation(F, Record, Idx));
+ break;
+ }
+
case PP_CONDITIONAL_STACK:
if (!Record.empty()) {
unsigned Idx = 0, End = Record.size() - 1;
@@ -5057,7 +5073,8 @@ std::string ASTReader::getOriginalSourceFile(
const std::string &ASTFileName, FileManager &FileMgr,
const PCHContainerReader &PCHContainerRdr, DiagnosticsEngine &Diags) {
// Open the AST file.
- auto Buffer = FileMgr.getBufferForFile(ASTFileName);
+ auto Buffer = FileMgr.getBufferForFile(ASTFileName, /*IsVolatile=*/false,
+ /*RequiresNullTerminator=*/false);
if (!Buffer) {
Diags.Report(diag::err_fe_unable_to_read_pch_file)
<< ASTFileName << Buffer.getError().message();
@@ -5620,9 +5637,12 @@ llvm::Error ASTReader::ReadSubmoduleBlock(ModuleFile &F,
// them here.
break;
- case SUBMODULE_TOPHEADER:
- CurrentModule->addTopHeaderFilename(Blob);
+ case SUBMODULE_TOPHEADER: {
+ std::string HeaderName(Blob);
+ ResolveImportedPath(F, HeaderName);
+ CurrentModule->addTopHeaderFilename(HeaderName);
break;
+ }
case SUBMODULE_UMBRELLA_DIR: {
// See comments in SUBMODULE_UMBRELLA_HEADER
@@ -6029,10 +6049,9 @@ PreprocessedEntity *ASTReader::ReadPreprocessedEntity(unsigned Index) {
case PPD_INCLUSION_DIRECTIVE: {
const char *FullFileNameStart = Blob.data() + Record[0];
StringRef FullFileName(FullFileNameStart, Blob.size() - Record[0]);
- const FileEntry *File = nullptr;
+ Optional<FileEntryRef> File;
if (!FullFileName.empty())
- if (auto FE = PP.getFileManager().getFile(FullFileName))
- File = *FE;
+ File = PP.getFileManager().getOptionalFileRef(FullFileName);
// FIXME: Stable encoding
InclusionDirective::InclusionKind Kind
@@ -6446,11 +6465,13 @@ QualType ASTReader::readTypeRecord(unsigned Index) {
namespace clang {
class TypeLocReader : public TypeLocVisitor<TypeLocReader> {
+ using LocSeq = SourceLocationSequence;
+
ASTRecordReader &Reader;
+ LocSeq *Seq;
- SourceLocation readSourceLocation() {
- return Reader.readSourceLocation();
- }
+ SourceLocation readSourceLocation() { return Reader.readSourceLocation(Seq); }
+ SourceRange readSourceRange() { return Reader.readSourceRange(Seq); }
TypeSourceInfo *GetTypeSourceInfo() {
return Reader.readTypeSourceInfo();
@@ -6465,7 +6486,8 @@ class TypeLocReader : public TypeLocVisitor<TypeLocReader> {
}
public:
- TypeLocReader(ASTRecordReader &Reader) : Reader(Reader) {}
+ TypeLocReader(ASTRecordReader &Reader, LocSeq *Seq)
+ : Reader(Reader), Seq(Seq) {}
// We want compile-time assurance that we've enumerated all of
// these, so unfortunately we have to declare them first, then
@@ -6562,7 +6584,7 @@ void TypeLocReader::VisitDependentAddressSpaceTypeLoc(
DependentAddressSpaceTypeLoc TL) {
TL.setAttrNameLoc(readSourceLocation());
- TL.setAttrOperandParensRange(Reader.readSourceRange());
+ TL.setAttrOperandParensRange(readSourceRange());
TL.setAttrExprOperand(Reader.readExpr());
}
@@ -6586,7 +6608,7 @@ void TypeLocReader::VisitExtVectorTypeLoc(ExtVectorTypeLoc TL) {
void TypeLocReader::VisitConstantMatrixTypeLoc(ConstantMatrixTypeLoc TL) {
TL.setAttrNameLoc(readSourceLocation());
- TL.setAttrOperandParensRange(Reader.readSourceRange());
+ TL.setAttrOperandParensRange(readSourceRange());
TL.setAttrRowOperand(Reader.readExpr());
TL.setAttrColumnOperand(Reader.readExpr());
}
@@ -6594,7 +6616,7 @@ void TypeLocReader::VisitConstantMatrixTypeLoc(ConstantMatrixTypeLoc TL) {
void TypeLocReader::VisitDependentSizedMatrixTypeLoc(
DependentSizedMatrixTypeLoc TL) {
TL.setAttrNameLoc(readSourceLocation());
- TL.setAttrOperandParensRange(Reader.readSourceRange());
+ TL.setAttrOperandParensRange(readSourceRange());
TL.setAttrRowOperand(Reader.readExpr());
TL.setAttrColumnOperand(Reader.readExpr());
}
@@ -6603,7 +6625,7 @@ void TypeLocReader::VisitFunctionTypeLoc(FunctionTypeLoc TL) {
TL.setLocalRangeBegin(readSourceLocation());
TL.setLParenLoc(readSourceLocation());
TL.setRParenLoc(readSourceLocation());
- TL.setExceptionSpecRange(Reader.readSourceRange());
+ TL.setExceptionSpecRange(readSourceRange());
TL.setLocalRangeEnd(readSourceLocation());
for (unsigned i = 0, e = TL.getNumParams(); i != e; ++i) {
TL.setParam(i, Reader.readDeclAs<ParmVarDecl>());
@@ -6689,6 +6711,10 @@ void TypeLocReader::VisitAttributedTypeLoc(AttributedTypeLoc TL) {
TL.setAttr(ReadAttr());
}
+void TypeLocReader::VisitBTFTagAttributedTypeLoc(BTFTagAttributedTypeLoc TL) {
+ // Nothing to do.
+}
+
void TypeLocReader::VisitTemplateTypeParmTypeLoc(TemplateTypeParmTypeLoc TL) {
TL.setNameLoc(readSourceLocation());
}
@@ -6802,9 +6828,9 @@ void TypeLocReader::VisitDependentBitIntTypeLoc(
TL.setNameLoc(readSourceLocation());
}
-
-void ASTRecordReader::readTypeLoc(TypeLoc TL) {
- TypeLocReader TLR(*this);
+void ASTRecordReader::readTypeLoc(TypeLoc TL, LocSeq *ParentSeq) {
+ LocSeq::State Seq(ParentSeq);
+ TypeLocReader TLR(*this, Seq);
for (; !TL.isNull(); TL = TL.getNextTypeLoc())
TLR.Visit(TL);
}
@@ -8386,11 +8412,9 @@ void ASTReader::ReadWeakUndeclaredIdentifiers(
= DecodeIdentifierInfo(WeakUndeclaredIdentifiers[I++]);
IdentifierInfo *AliasId
= DecodeIdentifierInfo(WeakUndeclaredIdentifiers[I++]);
- SourceLocation Loc
- = SourceLocation::getFromRawEncoding(WeakUndeclaredIdentifiers[I++]);
- bool Used = WeakUndeclaredIdentifiers[I++];
+ SourceLocation Loc =
+ SourceLocation::getFromRawEncoding(WeakUndeclaredIdentifiers[I++]);
WeakInfo WI(AliasId, Loc);
- WI.setUsed(Used);
WeakIDs.push_back(std::make_pair(WeakId, WI));
}
WeakUndeclaredIdentifiers.clear();
@@ -8973,11 +8997,10 @@ ASTRecordReader::readNestedNameSpecifierLoc() {
return Builder.getWithLocInContext(Context);
}
-SourceRange
-ASTReader::ReadSourceRange(ModuleFile &F, const RecordData &Record,
- unsigned &Idx) {
- SourceLocation beg = ReadSourceLocation(F, Record, Idx);
- SourceLocation end = ReadSourceLocation(F, Record, Idx);
+SourceRange ASTReader::ReadSourceRange(ModuleFile &F, const RecordData &Record,
+ unsigned &Idx, LocSeq *Seq) {
+ SourceLocation beg = ReadSourceLocation(F, Record, Idx, Seq);
+ SourceLocation end = ReadSourceLocation(F, Record, Idx, Seq);
return SourceRange(beg, end);
}
@@ -9177,7 +9200,8 @@ void ASTReader::finishPendingActions() {
while (!PendingIdentifierInfos.empty() || !PendingFunctionTypes.empty() ||
!PendingIncompleteDeclChains.empty() || !PendingDeclChains.empty() ||
!PendingMacroIDs.empty() || !PendingDeclContextInfos.empty() ||
- !PendingUpdateRecords.empty()) {
+ !PendingUpdateRecords.empty() ||
+ !PendingObjCExtensionIvarRedeclarations.empty()) {
// If any identifiers with corresponding top-level declarations have
// been loaded, load those declarations now.
using TopLevelDeclsMap =
@@ -9268,6 +9292,43 @@ void ASTReader::finishPendingActions() {
ReadingKindTracker ReadingKind(Read_Decl, *this);
loadDeclUpdateRecords(Update);
}
+
+ while (!PendingObjCExtensionIvarRedeclarations.empty()) {
+ auto ExtensionsPair = PendingObjCExtensionIvarRedeclarations.back().first;
+ auto DuplicateIvars =
+ PendingObjCExtensionIvarRedeclarations.back().second;
+ llvm::DenseSet<std::pair<Decl *, Decl *>> NonEquivalentDecls;
+ StructuralEquivalenceContext Ctx(
+ ExtensionsPair.first->getASTContext(),
+ ExtensionsPair.second->getASTContext(), NonEquivalentDecls,
+ StructuralEquivalenceKind::Default, /*StrictTypeSpelling =*/false,
+ /*Complain =*/false,
+ /*ErrorOnTagTypeMismatch =*/true);
+ if (Ctx.IsEquivalent(ExtensionsPair.first, ExtensionsPair.second)) {
+ // Merge redeclared ivars with their predecessors.
+ for (auto IvarPair : DuplicateIvars) {
+ ObjCIvarDecl *Ivar = IvarPair.first, *PrevIvar = IvarPair.second;
+ // Change semantic DeclContext but keep the lexical one.
+ Ivar->setDeclContextsImpl(PrevIvar->getDeclContext(),
+ Ivar->getLexicalDeclContext(),
+ getContext());
+ getContext().setPrimaryMergedDecl(Ivar, PrevIvar->getCanonicalDecl());
+ }
+ // Invalidate duplicate extension and the cached ivar list.
+ ExtensionsPair.first->setInvalidDecl();
+ ExtensionsPair.second->getClassInterface()
+ ->getDefinition()
+ ->setIvarList(nullptr);
+ } else {
+ for (auto IvarPair : DuplicateIvars) {
+ Diag(IvarPair.first->getLocation(),
+ diag::err_duplicate_ivar_declaration)
+ << IvarPair.first->getIdentifier();
+ Diag(IvarPair.second->getLocation(), diag::note_previous_definition);
+ }
+ }
+ PendingObjCExtensionIvarRedeclarations.pop_back();
+ }
}
// At this point, all update records for loaded decls are in place, so any
@@ -9576,19 +9637,12 @@ void ASTReader::diagnoseOdrViolations() {
Other
};
- // Used with err_module_odr_violation_mismatch_decl_diff and
- // note_module_odr_violation_mismatch_decl_diff
- enum ODRMismatchDeclDifference {
+ // Used with err_module_odr_violation_record and
+ // note_module_odr_violation_record
+ enum ODRCXXRecordDifference {
StaticAssertCondition,
StaticAssertMessage,
StaticAssertOnlyMessage,
- FieldName,
- FieldTypeName,
- FieldSingleBitField,
- FieldDifferentWidthBitField,
- FieldSingleMutable,
- FieldSingleInitializer,
- FieldDifferentInitializers,
MethodName,
MethodDeleted,
MethodDefaulted,
@@ -9607,13 +9661,6 @@ void ASTReader::diagnoseOdrViolations() {
MethodDifferentTemplateArgument,
MethodSingleBody,
MethodDifferentBody,
- TypedefName,
- TypedefType,
- VarName,
- VarType,
- VarSingleInitializer,
- VarDifferentInitializer,
- VarConstexpr,
FriendTypeFunction,
FriendType,
FriendFunction,
@@ -9629,34 +9676,39 @@ void ASTReader::diagnoseOdrViolations() {
// These lambdas have the common portions of the ODR diagnostics. This
// has the same return as Diag(), so addition parameters can be passed
// in with operator<<
- auto ODRDiagDeclError = [this](NamedDecl *FirstRecord, StringRef FirstModule,
- SourceLocation Loc, SourceRange Range,
- ODRMismatchDeclDifference DiffType) {
- return Diag(Loc, diag::err_module_odr_violation_mismatch_decl_diff)
- << FirstRecord << FirstModule.empty() << FirstModule << Range
- << DiffType;
- };
- auto ODRDiagDeclNote = [this](StringRef SecondModule, SourceLocation Loc,
- SourceRange Range, ODRMismatchDeclDifference DiffType) {
- return Diag(Loc, diag::note_module_odr_violation_mismatch_decl_diff)
- << SecondModule << Range << DiffType;
- };
-
- auto ODRDiagField = [this, &ODRDiagDeclError, &ODRDiagDeclNote,
- &ComputeQualTypeODRHash, &ComputeODRHash](
+ auto ODRDiagField = [this, &ComputeQualTypeODRHash, &ComputeODRHash](
NamedDecl *FirstRecord, StringRef FirstModule,
StringRef SecondModule, FieldDecl *FirstField,
FieldDecl *SecondField) {
+ enum ODRFieldDifference {
+ FieldName,
+ FieldTypeName,
+ FieldSingleBitField,
+ FieldDifferentWidthBitField,
+ FieldSingleMutable,
+ FieldSingleInitializer,
+ FieldDifferentInitializers,
+ };
+
+ auto DiagError = [FirstRecord, FirstField, FirstModule,
+ this](ODRFieldDifference DiffType) {
+ return Diag(FirstField->getLocation(),
+ diag::err_module_odr_violation_field)
+ << FirstRecord << FirstModule.empty() << FirstModule
+ << FirstField->getSourceRange() << DiffType;
+ };
+ auto DiagNote = [SecondField, SecondModule,
+ this](ODRFieldDifference DiffType) {
+ return Diag(SecondField->getLocation(),
+ diag::note_module_odr_violation_field)
+ << SecondModule << SecondField->getSourceRange() << DiffType;
+ };
+
IdentifierInfo *FirstII = FirstField->getIdentifier();
IdentifierInfo *SecondII = SecondField->getIdentifier();
if (FirstII->getName() != SecondII->getName()) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstField->getLocation(),
- FirstField->getSourceRange(), FieldName)
- << FirstII;
- ODRDiagDeclNote(SecondModule, SecondField->getLocation(),
- SecondField->getSourceRange(), FieldName)
- << SecondII;
-
+ DiagError(FieldName) << FirstII;
+ DiagNote(FieldName) << SecondII;
return true;
}
@@ -9667,25 +9719,16 @@ void ASTReader::diagnoseOdrViolations() {
QualType SecondType = SecondField->getType();
if (ComputeQualTypeODRHash(FirstType) !=
ComputeQualTypeODRHash(SecondType)) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstField->getLocation(),
- FirstField->getSourceRange(), FieldTypeName)
- << FirstII << FirstType;
- ODRDiagDeclNote(SecondModule, SecondField->getLocation(),
- SecondField->getSourceRange(), FieldTypeName)
- << SecondII << SecondType;
-
+ DiagError(FieldTypeName) << FirstII << FirstType;
+ DiagNote(FieldTypeName) << SecondII << SecondType;
return true;
}
const bool IsFirstBitField = FirstField->isBitField();
const bool IsSecondBitField = SecondField->isBitField();
if (IsFirstBitField != IsSecondBitField) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstField->getLocation(),
- FirstField->getSourceRange(), FieldSingleBitField)
- << FirstII << IsFirstBitField;
- ODRDiagDeclNote(SecondModule, SecondField->getLocation(),
- SecondField->getSourceRange(), FieldSingleBitField)
- << SecondII << IsSecondBitField;
+ DiagError(FieldSingleBitField) << FirstII << IsFirstBitField;
+ DiagNote(FieldSingleBitField) << SecondII << IsSecondBitField;
return true;
}
@@ -9695,13 +9738,9 @@ void ASTReader::diagnoseOdrViolations() {
unsigned SecondBitWidthHash =
ComputeODRHash(SecondField->getBitWidth());
if (FirstBitWidthHash != SecondBitWidthHash) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstField->getLocation(),
- FirstField->getSourceRange(),
- FieldDifferentWidthBitField)
+ DiagError(FieldDifferentWidthBitField)
<< FirstII << FirstField->getBitWidth()->getSourceRange();
- ODRDiagDeclNote(SecondModule, SecondField->getLocation(),
- SecondField->getSourceRange(),
- FieldDifferentWidthBitField)
+ DiagNote(FieldDifferentWidthBitField)
<< SecondII << SecondField->getBitWidth()->getSourceRange();
return true;
}
@@ -9713,12 +9752,8 @@ void ASTReader::diagnoseOdrViolations() {
const bool IsFirstMutable = FirstField->isMutable();
const bool IsSecondMutable = SecondField->isMutable();
if (IsFirstMutable != IsSecondMutable) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstField->getLocation(),
- FirstField->getSourceRange(), FieldSingleMutable)
- << FirstII << IsFirstMutable;
- ODRDiagDeclNote(SecondModule, SecondField->getLocation(),
- SecondField->getSourceRange(), FieldSingleMutable)
- << SecondII << IsSecondMutable;
+ DiagError(FieldSingleMutable) << FirstII << IsFirstMutable;
+ DiagNote(FieldSingleMutable) << SecondII << IsSecondMutable;
return true;
}
@@ -9726,11 +9761,9 @@ void ASTReader::diagnoseOdrViolations() {
const Expr *SecondInitializer = SecondField->getInClassInitializer();
if ((!FirstInitializer && SecondInitializer) ||
(FirstInitializer && !SecondInitializer)) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstField->getLocation(),
- FirstField->getSourceRange(), FieldSingleInitializer)
+ DiagError(FieldSingleInitializer)
<< FirstII << (FirstInitializer != nullptr);
- ODRDiagDeclNote(SecondModule, SecondField->getLocation(),
- SecondField->getSourceRange(), FieldSingleInitializer)
+ DiagNote(FieldSingleInitializer)
<< SecondII << (SecondInitializer != nullptr);
return true;
}
@@ -9739,13 +9772,9 @@ void ASTReader::diagnoseOdrViolations() {
unsigned FirstInitHash = ComputeODRHash(FirstInitializer);
unsigned SecondInitHash = ComputeODRHash(SecondInitializer);
if (FirstInitHash != SecondInitHash) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstField->getLocation(),
- FirstField->getSourceRange(),
- FieldDifferentInitializers)
+ DiagError(FieldDifferentInitializers)
<< FirstII << FirstInitializer->getSourceRange();
- ODRDiagDeclNote(SecondModule, SecondField->getLocation(),
- SecondField->getSourceRange(),
- FieldDifferentInitializers)
+ DiagNote(FieldDifferentInitializers)
<< SecondII << SecondInitializer->getSourceRange();
return true;
}
@@ -9755,19 +9784,34 @@ void ASTReader::diagnoseOdrViolations() {
};
auto ODRDiagTypeDefOrAlias =
- [&ODRDiagDeclError, &ODRDiagDeclNote, &ComputeQualTypeODRHash](
+ [this, &ComputeQualTypeODRHash](
NamedDecl *FirstRecord, StringRef FirstModule, StringRef SecondModule,
TypedefNameDecl *FirstTD, TypedefNameDecl *SecondTD,
bool IsTypeAlias) {
+ enum ODRTypedefDifference {
+ TypedefName,
+ TypedefType,
+ };
+
+ auto DiagError = [FirstRecord, FirstTD, FirstModule,
+ this](ODRTypedefDifference DiffType) {
+ return Diag(FirstTD->getLocation(),
+ diag::err_module_odr_violation_typedef)
+ << FirstRecord << FirstModule.empty() << FirstModule
+ << FirstTD->getSourceRange() << DiffType;
+ };
+ auto DiagNote = [SecondTD, SecondModule,
+ this](ODRTypedefDifference DiffType) {
+ return Diag(SecondTD->getLocation(),
+ diag::note_module_odr_violation_typedef)
+ << SecondModule << SecondTD->getSourceRange() << DiffType;
+ };
+
auto FirstName = FirstTD->getDeclName();
auto SecondName = SecondTD->getDeclName();
if (FirstName != SecondName) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstTD->getLocation(),
- FirstTD->getSourceRange(), TypedefName)
- << IsTypeAlias << FirstName;
- ODRDiagDeclNote(SecondModule, SecondTD->getLocation(),
- SecondTD->getSourceRange(), TypedefName)
- << IsTypeAlias << SecondName;
+ DiagError(TypedefName) << IsTypeAlias << FirstName;
+ DiagNote(TypedefName) << IsTypeAlias << SecondName;
return true;
}
@@ -9775,32 +9819,44 @@ void ASTReader::diagnoseOdrViolations() {
QualType SecondType = SecondTD->getUnderlyingType();
if (ComputeQualTypeODRHash(FirstType) !=
ComputeQualTypeODRHash(SecondType)) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstTD->getLocation(),
- FirstTD->getSourceRange(), TypedefType)
- << IsTypeAlias << FirstName << FirstType;
- ODRDiagDeclNote(SecondModule, SecondTD->getLocation(),
- SecondTD->getSourceRange(), TypedefType)
- << IsTypeAlias << SecondName << SecondType;
+ DiagError(TypedefType) << IsTypeAlias << FirstName << FirstType;
+ DiagNote(TypedefType) << IsTypeAlias << SecondName << SecondType;
return true;
}
return false;
- };
+ };
- auto ODRDiagVar = [&ODRDiagDeclError, &ODRDiagDeclNote,
- &ComputeQualTypeODRHash, &ComputeODRHash,
+ auto ODRDiagVar = [&ComputeQualTypeODRHash, &ComputeODRHash,
this](NamedDecl *FirstRecord, StringRef FirstModule,
StringRef SecondModule, VarDecl *FirstVD,
VarDecl *SecondVD) {
+ enum ODRVarDifference {
+ VarName,
+ VarType,
+ VarSingleInitializer,
+ VarDifferentInitializer,
+ VarConstexpr,
+ };
+
+ auto DiagError = [FirstRecord, FirstVD, FirstModule,
+ this](ODRVarDifference DiffType) {
+ return Diag(FirstVD->getLocation(),
+ diag::err_module_odr_violation_variable)
+ << FirstRecord << FirstModule.empty() << FirstModule
+ << FirstVD->getSourceRange() << DiffType;
+ };
+ auto DiagNote = [SecondVD, SecondModule, this](ODRVarDifference DiffType) {
+ return Diag(SecondVD->getLocation(),
+ diag::note_module_odr_violation_variable)
+ << SecondModule << SecondVD->getSourceRange() << DiffType;
+ };
+
auto FirstName = FirstVD->getDeclName();
auto SecondName = SecondVD->getDeclName();
if (FirstName != SecondName) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstVD->getLocation(),
- FirstVD->getSourceRange(), VarName)
- << FirstName;
- ODRDiagDeclNote(SecondModule, SecondVD->getLocation(),
- SecondVD->getSourceRange(), VarName)
- << SecondName;
+ DiagError(VarName) << FirstName;
+ DiagNote(VarName) << SecondName;
return true;
}
@@ -9808,12 +9864,8 @@ void ASTReader::diagnoseOdrViolations() {
QualType SecondType = SecondVD->getType();
if (ComputeQualTypeODRHash(FirstType) !=
ComputeQualTypeODRHash(SecondType)) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstVD->getLocation(),
- FirstVD->getSourceRange(), VarType)
- << FirstName << FirstType;
- ODRDiagDeclNote(SecondModule, SecondVD->getLocation(),
- SecondVD->getSourceRange(), VarType)
- << SecondName << SecondType;
+ DiagError(VarType) << FirstName << FirstType;
+ DiagNote(VarType) << SecondName << SecondType;
return true;
}
@@ -9823,12 +9875,10 @@ void ASTReader::diagnoseOdrViolations() {
const Expr *FirstInit = FirstVD->getInit();
const Expr *SecondInit = SecondVD->getInit();
if ((FirstInit == nullptr) != (SecondInit == nullptr)) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstVD->getLocation(),
- FirstVD->getSourceRange(), VarSingleInitializer)
+ DiagError(VarSingleInitializer)
<< FirstName << (FirstInit == nullptr)
<< (FirstInit ? FirstInit->getSourceRange() : SourceRange());
- ODRDiagDeclNote(SecondModule, SecondVD->getLocation(),
- SecondVD->getSourceRange(), VarSingleInitializer)
+ DiagNote(VarSingleInitializer)
<< SecondName << (SecondInit == nullptr)
<< (SecondInit ? SecondInit->getSourceRange() : SourceRange());
return true;
@@ -9836,11 +9886,9 @@ void ASTReader::diagnoseOdrViolations() {
if (FirstInit && SecondInit &&
ComputeODRHash(FirstInit) != ComputeODRHash(SecondInit)) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstVD->getLocation(),
- FirstVD->getSourceRange(), VarDifferentInitializer)
+ DiagError(VarDifferentInitializer)
<< FirstName << FirstInit->getSourceRange();
- ODRDiagDeclNote(SecondModule, SecondVD->getLocation(),
- SecondVD->getSourceRange(), VarDifferentInitializer)
+ DiagNote(VarDifferentInitializer)
<< SecondName << SecondInit->getSourceRange();
return true;
}
@@ -9848,12 +9896,8 @@ void ASTReader::diagnoseOdrViolations() {
const bool FirstIsConstexpr = FirstVD->isConstexpr();
const bool SecondIsConstexpr = SecondVD->isConstexpr();
if (FirstIsConstexpr != SecondIsConstexpr) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstVD->getLocation(),
- FirstVD->getSourceRange(), VarConstexpr)
- << FirstName << FirstIsConstexpr;
- ODRDiagDeclNote(SecondModule, SecondVD->getLocation(),
- SecondVD->getSourceRange(), VarConstexpr)
- << SecondName << SecondIsConstexpr;
+ DiagError(VarConstexpr) << FirstName << FirstIsConstexpr;
+ DiagNote(VarConstexpr) << SecondName << SecondIsConstexpr;
return true;
}
return false;
@@ -9968,34 +10012,35 @@ void ASTReader::diagnoseOdrViolations() {
}
};
- auto DiagnoseODRMismatch =
- [this](DiffResult &DR, NamedDecl *FirstRecord, StringRef FirstModule,
- NamedDecl *SecondRecord, StringRef SecondModule) {
- SourceLocation FirstLoc;
- SourceRange FirstRange;
- auto *FirstTag = dyn_cast<TagDecl>(FirstRecord);
- if (DR.FirstDiffType == EndOfClass && FirstTag) {
- FirstLoc = FirstTag->getBraceRange().getEnd();
- } else {
- FirstLoc = DR.FirstDecl->getLocation();
- FirstRange = DR.FirstDecl->getSourceRange();
- }
- Diag(FirstLoc, diag::err_module_odr_violation_mismatch_decl)
- << FirstRecord << FirstModule.empty() << FirstModule << FirstRange
- << DR.FirstDiffType;
-
- SourceLocation SecondLoc;
- SourceRange SecondRange;
- auto *SecondTag = dyn_cast<TagDecl>(SecondRecord);
- if (DR.SecondDiffType == EndOfClass && SecondTag) {
- SecondLoc = SecondTag->getBraceRange().getEnd();
- } else {
- SecondLoc = DR.SecondDecl->getLocation();
- SecondRange = DR.SecondDecl->getSourceRange();
- }
- Diag(SecondLoc, diag::note_module_odr_violation_mismatch_decl)
- << SecondModule << SecondRange << DR.SecondDiffType;
- };
+ auto DiagnoseODRMismatch = [this](DiffResult &DR, NamedDecl *FirstRecord,
+ StringRef FirstModule,
+ NamedDecl *SecondRecord,
+ StringRef SecondModule) {
+ auto GetMismatchedDeclLoc = [](const NamedDecl *Container,
+ ODRMismatchDecl DiffType, const Decl *D) {
+ SourceLocation Loc;
+ SourceRange Range;
+ auto *Tag = dyn_cast<TagDecl>(Container);
+ if (DiffType == EndOfClass && Tag) {
+ Loc = Tag->getBraceRange().getEnd();
+ } else {
+ Loc = D->getLocation();
+ Range = D->getSourceRange();
+ }
+ return std::make_pair(Loc, Range);
+ };
+
+ auto FirstDiagInfo =
+ GetMismatchedDeclLoc(FirstRecord, DR.FirstDiffType, DR.FirstDecl);
+ Diag(FirstDiagInfo.first, diag::err_module_odr_violation_mismatch_decl)
+ << FirstRecord << FirstModule.empty() << FirstModule
+ << FirstDiagInfo.second << DR.FirstDiffType;
+
+ auto SecondDiagInfo =
+ GetMismatchedDeclLoc(SecondRecord, DR.SecondDiffType, DR.SecondDecl);
+ Diag(SecondDiagInfo.first, diag::note_module_odr_violation_mismatch_decl)
+ << SecondModule << SecondDiagInfo.second << DR.SecondDiffType;
+ };
// Issue any pending ODR-failure diagnostics.
for (auto &Merge : OdrMergeFailures) {
@@ -10015,6 +10060,19 @@ void ASTReader::diagnoseOdrViolations() {
continue;
std::string SecondModule = getOwningModuleNameForDiagnostic(SecondRecord);
+ auto ODRDiagDeclError = [FirstRecord, &FirstModule,
+ this](SourceLocation Loc, SourceRange Range,
+ ODRCXXRecordDifference DiffType) {
+ return Diag(Loc, diag::err_module_odr_violation_record)
+ << FirstRecord << FirstModule.empty() << FirstModule << Range
+ << DiffType;
+ };
+ auto ODRDiagDeclNote = [&SecondModule,
+ this](SourceLocation Loc, SourceRange Range,
+ ODRCXXRecordDifference DiffType) {
+ return Diag(Loc, diag::note_module_odr_violation_record)
+ << SecondModule << Range << DiffType;
+ };
auto *FirstDD = FirstRecord->DefinitionData;
auto *SecondDD = RecordPair.second;
@@ -10134,13 +10192,6 @@ void ASTReader::diagnoseOdrViolations() {
assert(!FirstTemplate == !SecondTemplate &&
"Both pointers should be null or non-null");
- enum ODRTemplateDifference {
- ParamEmptyName,
- ParamName,
- ParamSingleDefaultArgument,
- ParamDifferentDefaultArgument,
- };
-
if (FirstTemplate && SecondTemplate) {
DeclHashes FirstTemplateHashes;
DeclHashes SecondTemplateHashes;
@@ -10166,155 +10217,60 @@ void ASTReader::diagnoseOdrViolations() {
if (FirstIt->second == SecondIt->second)
continue;
- auto ODRDiagTemplateError = [FirstRecord, &FirstModule, this](
- SourceLocation Loc, SourceRange Range,
- ODRTemplateDifference DiffType) {
- return Diag(Loc, diag::err_module_odr_violation_template_parameter)
- << FirstRecord << FirstModule.empty() << FirstModule << Range
- << DiffType;
- };
- auto ODRDiagTemplateNote = [&SecondModule, this](
- SourceLocation Loc, SourceRange Range,
- ODRTemplateDifference DiffType) {
- return Diag(Loc, diag::note_module_odr_violation_template_parameter)
- << SecondModule << Range << DiffType;
- };
-
const NamedDecl* FirstDecl = cast<NamedDecl>(FirstIt->first);
const NamedDecl* SecondDecl = cast<NamedDecl>(SecondIt->first);
assert(FirstDecl->getKind() == SecondDecl->getKind() &&
"Parameter Decl's should be the same kind.");
+ enum ODRTemplateDifference {
+ ParamEmptyName,
+ ParamName,
+ ParamSingleDefaultArgument,
+ ParamDifferentDefaultArgument,
+ };
+
+ auto hasDefaultArg = [](const NamedDecl *D) {
+ if (auto *TTP = dyn_cast<TemplateTypeParmDecl>(D))
+ return TTP->hasDefaultArgument() &&
+ !TTP->defaultArgumentWasInherited();
+ if (auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(D))
+ return NTTP->hasDefaultArgument() &&
+ !NTTP->defaultArgumentWasInherited();
+ auto *TTP = cast<TemplateTemplateParmDecl>(D);
+ return TTP->hasDefaultArgument() &&
+ !TTP->defaultArgumentWasInherited();
+ };
+ bool hasFirstArg = hasDefaultArg(FirstDecl);
+ bool hasSecondArg = hasDefaultArg(SecondDecl);
+
+ ODRTemplateDifference ErrDiffType;
+ ODRTemplateDifference NoteDiffType;
+
DeclarationName FirstName = FirstDecl->getDeclName();
DeclarationName SecondName = SecondDecl->getDeclName();
if (FirstName != SecondName) {
- const bool FirstNameEmpty =
+ bool FirstNameEmpty =
FirstName.isIdentifier() && !FirstName.getAsIdentifierInfo();
- const bool SecondNameEmpty =
- SecondName.isIdentifier() && !SecondName.getAsIdentifierInfo();
- assert((!FirstNameEmpty || !SecondNameEmpty) &&
- "Both template parameters cannot be unnamed.");
- ODRDiagTemplateError(FirstDecl->getLocation(),
- FirstDecl->getSourceRange(),
- FirstNameEmpty ? ParamEmptyName : ParamName)
- << FirstName;
- ODRDiagTemplateNote(SecondDecl->getLocation(),
- SecondDecl->getSourceRange(),
- SecondNameEmpty ? ParamEmptyName : ParamName)
- << SecondName;
- break;
- }
-
- switch (FirstDecl->getKind()) {
- default:
- llvm_unreachable("Invalid template parameter type.");
- case Decl::TemplateTypeParm: {
- const auto *FirstParam = cast<TemplateTypeParmDecl>(FirstDecl);
- const auto *SecondParam = cast<TemplateTypeParmDecl>(SecondDecl);
- const bool HasFirstDefaultArgument =
- FirstParam->hasDefaultArgument() &&
- !FirstParam->defaultArgumentWasInherited();
- const bool HasSecondDefaultArgument =
- SecondParam->hasDefaultArgument() &&
- !SecondParam->defaultArgumentWasInherited();
-
- if (HasFirstDefaultArgument != HasSecondDefaultArgument) {
- ODRDiagTemplateError(FirstDecl->getLocation(),
- FirstDecl->getSourceRange(),
- ParamSingleDefaultArgument)
- << HasFirstDefaultArgument;
- ODRDiagTemplateNote(SecondDecl->getLocation(),
- SecondDecl->getSourceRange(),
- ParamSingleDefaultArgument)
- << HasSecondDefaultArgument;
- break;
- }
-
- assert(HasFirstDefaultArgument && HasSecondDefaultArgument &&
- "Expecting default arguments.");
-
- ODRDiagTemplateError(FirstDecl->getLocation(),
- FirstDecl->getSourceRange(),
- ParamDifferentDefaultArgument);
- ODRDiagTemplateNote(SecondDecl->getLocation(),
- SecondDecl->getSourceRange(),
- ParamDifferentDefaultArgument);
-
- break;
- }
- case Decl::NonTypeTemplateParm: {
- const auto *FirstParam = cast<NonTypeTemplateParmDecl>(FirstDecl);
- const auto *SecondParam = cast<NonTypeTemplateParmDecl>(SecondDecl);
- const bool HasFirstDefaultArgument =
- FirstParam->hasDefaultArgument() &&
- !FirstParam->defaultArgumentWasInherited();
- const bool HasSecondDefaultArgument =
- SecondParam->hasDefaultArgument() &&
- !SecondParam->defaultArgumentWasInherited();
-
- if (HasFirstDefaultArgument != HasSecondDefaultArgument) {
- ODRDiagTemplateError(FirstDecl->getLocation(),
- FirstDecl->getSourceRange(),
- ParamSingleDefaultArgument)
- << HasFirstDefaultArgument;
- ODRDiagTemplateNote(SecondDecl->getLocation(),
- SecondDecl->getSourceRange(),
- ParamSingleDefaultArgument)
- << HasSecondDefaultArgument;
- break;
- }
-
- assert(HasFirstDefaultArgument && HasSecondDefaultArgument &&
- "Expecting default arguments.");
-
- ODRDiagTemplateError(FirstDecl->getLocation(),
- FirstDecl->getSourceRange(),
- ParamDifferentDefaultArgument);
- ODRDiagTemplateNote(SecondDecl->getLocation(),
- SecondDecl->getSourceRange(),
- ParamDifferentDefaultArgument);
-
- break;
- }
- case Decl::TemplateTemplateParm: {
- const auto *FirstParam = cast<TemplateTemplateParmDecl>(FirstDecl);
- const auto *SecondParam =
- cast<TemplateTemplateParmDecl>(SecondDecl);
- const bool HasFirstDefaultArgument =
- FirstParam->hasDefaultArgument() &&
- !FirstParam->defaultArgumentWasInherited();
- const bool HasSecondDefaultArgument =
- SecondParam->hasDefaultArgument() &&
- !SecondParam->defaultArgumentWasInherited();
-
- if (HasFirstDefaultArgument != HasSecondDefaultArgument) {
- ODRDiagTemplateError(FirstDecl->getLocation(),
- FirstDecl->getSourceRange(),
- ParamSingleDefaultArgument)
- << HasFirstDefaultArgument;
- ODRDiagTemplateNote(SecondDecl->getLocation(),
- SecondDecl->getSourceRange(),
- ParamSingleDefaultArgument)
- << HasSecondDefaultArgument;
- break;
- }
-
- assert(HasFirstDefaultArgument && HasSecondDefaultArgument &&
- "Expecting default arguments.");
-
- ODRDiagTemplateError(FirstDecl->getLocation(),
- FirstDecl->getSourceRange(),
- ParamDifferentDefaultArgument);
- ODRDiagTemplateNote(SecondDecl->getLocation(),
- SecondDecl->getSourceRange(),
- ParamDifferentDefaultArgument);
-
- break;
- }
- }
-
+ bool SecondNameEmpty = SecondName.isIdentifier() &&
+ !SecondName.getAsIdentifierInfo();
+ ErrDiffType = FirstNameEmpty ? ParamEmptyName : ParamName;
+ NoteDiffType = SecondNameEmpty ? ParamEmptyName : ParamName;
+ } else if (hasFirstArg == hasSecondArg)
+ ErrDiffType = NoteDiffType = ParamDifferentDefaultArgument;
+ else
+ ErrDiffType = NoteDiffType = ParamSingleDefaultArgument;
+
+ Diag(FirstDecl->getLocation(),
+ diag::err_module_odr_violation_template_parameter)
+ << FirstRecord << FirstModule.empty() << FirstModule
+ << FirstDecl->getSourceRange() << ErrDiffType << hasFirstArg
+ << FirstName;
+ Diag(SecondDecl->getLocation(),
+ diag::note_module_odr_violation_template_parameter)
+ << SecondModule << SecondDecl->getSourceRange() << NoteDiffType
+ << hasSecondArg << SecondName;
break;
}
@@ -10369,9 +10325,9 @@ void ASTReader::diagnoseOdrViolations() {
unsigned FirstODRHash = ComputeODRHash(FirstExpr);
unsigned SecondODRHash = ComputeODRHash(SecondExpr);
if (FirstODRHash != SecondODRHash) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstExpr->getBeginLoc(),
+ ODRDiagDeclError(FirstExpr->getBeginLoc(),
FirstExpr->getSourceRange(), StaticAssertCondition);
- ODRDiagDeclNote(SecondModule, SecondExpr->getBeginLoc(),
+ ODRDiagDeclNote(SecondExpr->getBeginLoc(),
SecondExpr->getSourceRange(), StaticAssertCondition);
Diagnosed = true;
break;
@@ -10397,11 +10353,9 @@ void ASTReader::diagnoseOdrViolations() {
SecondLoc = SecondSA->getBeginLoc();
SecondRange = SecondSA->getSourceRange();
}
- ODRDiagDeclError(FirstRecord, FirstModule, FirstLoc, FirstRange,
- StaticAssertOnlyMessage)
+ ODRDiagDeclError(FirstLoc, FirstRange, StaticAssertOnlyMessage)
<< (FirstStr == nullptr);
- ODRDiagDeclNote(SecondModule, SecondLoc, SecondRange,
- StaticAssertOnlyMessage)
+ ODRDiagDeclNote(SecondLoc, SecondRange, StaticAssertOnlyMessage)
<< (SecondStr == nullptr);
Diagnosed = true;
break;
@@ -10409,10 +10363,10 @@ void ASTReader::diagnoseOdrViolations() {
if (FirstStr && SecondStr &&
FirstStr->getString() != SecondStr->getString()) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstStr->getBeginLoc(),
- FirstStr->getSourceRange(), StaticAssertMessage);
- ODRDiagDeclNote(SecondModule, SecondStr->getBeginLoc(),
- SecondStr->getSourceRange(), StaticAssertMessage);
+ ODRDiagDeclError(FirstStr->getBeginLoc(), FirstStr->getSourceRange(),
+ StaticAssertMessage);
+ ODRDiagDeclNote(SecondStr->getBeginLoc(), SecondStr->getSourceRange(),
+ StaticAssertMessage);
Diagnosed = true;
break;
}
@@ -10440,16 +10394,24 @@ void ASTReader::diagnoseOdrViolations() {
const CXXMethodDecl *SecondMethod = cast<CXXMethodDecl>(SecondDecl);
FirstMethodType = GetMethodTypeForDiagnostics(FirstMethod);
SecondMethodType = GetMethodTypeForDiagnostics(SecondMethod);
- auto FirstName = FirstMethod->getDeclName();
- auto SecondName = SecondMethod->getDeclName();
- if (FirstMethodType != SecondMethodType || FirstName != SecondName) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodName)
- << FirstMethodType << FirstName;
- ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodName)
- << SecondMethodType << SecondName;
+ DeclarationName FirstName = FirstMethod->getDeclName();
+ DeclarationName SecondName = SecondMethod->getDeclName();
+ auto DiagMethodError = [&ODRDiagDeclError, FirstMethod, FirstMethodType,
+ FirstName](ODRCXXRecordDifference DiffType) {
+ return ODRDiagDeclError(FirstMethod->getLocation(),
+ FirstMethod->getSourceRange(), DiffType)
+ << FirstMethodType << FirstName;
+ };
+ auto DiagMethodNote = [&ODRDiagDeclNote, SecondMethod, SecondMethodType,
+ SecondName](ODRCXXRecordDifference DiffType) {
+ return ODRDiagDeclNote(SecondMethod->getLocation(),
+ SecondMethod->getSourceRange(), DiffType)
+ << SecondMethodType << SecondName;
+ };
+ if (FirstMethodType != SecondMethodType || FirstName != SecondName) {
+ DiagMethodError(MethodName);
+ DiagMethodNote(MethodName);
Diagnosed = true;
break;
}
@@ -10457,13 +10419,8 @@ void ASTReader::diagnoseOdrViolations() {
const bool FirstDeleted = FirstMethod->isDeletedAsWritten();
const bool SecondDeleted = SecondMethod->isDeletedAsWritten();
if (FirstDeleted != SecondDeleted) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodDeleted)
- << FirstMethodType << FirstName << FirstDeleted;
-
- ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodDeleted)
- << SecondMethodType << SecondName << SecondDeleted;
+ DiagMethodError(MethodDeleted) << FirstDeleted;
+ DiagMethodNote(MethodDeleted) << SecondDeleted;
Diagnosed = true;
break;
}
@@ -10471,13 +10428,8 @@ void ASTReader::diagnoseOdrViolations() {
const bool FirstDefaulted = FirstMethod->isExplicitlyDefaulted();
const bool SecondDefaulted = SecondMethod->isExplicitlyDefaulted();
if (FirstDefaulted != SecondDefaulted) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodDefaulted)
- << FirstMethodType << FirstName << FirstDefaulted;
-
- ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodDefaulted)
- << SecondMethodType << SecondName << SecondDefaulted;
+ DiagMethodError(MethodDefaulted) << FirstDefaulted;
+ DiagMethodNote(MethodDefaulted) << SecondDefaulted;
Diagnosed = true;
break;
}
@@ -10488,12 +10440,8 @@ void ASTReader::diagnoseOdrViolations() {
const bool SecondPure = SecondMethod->isPure();
if ((FirstVirtual || SecondVirtual) &&
(FirstVirtual != SecondVirtual || FirstPure != SecondPure)) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodVirtual)
- << FirstMethodType << FirstName << FirstPure << FirstVirtual;
- ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodVirtual)
- << SecondMethodType << SecondName << SecondPure << SecondVirtual;
+ DiagMethodError(MethodVirtual) << FirstPure << FirstVirtual;
+ DiagMethodNote(MethodVirtual) << SecondPure << SecondVirtual;
Diagnosed = true;
break;
}
@@ -10506,12 +10454,8 @@ void ASTReader::diagnoseOdrViolations() {
const bool FirstStatic = FirstStorage == SC_Static;
const bool SecondStatic = SecondStorage == SC_Static;
if (FirstStatic != SecondStatic) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodStatic)
- << FirstMethodType << FirstName << FirstStatic;
- ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodStatic)
- << SecondMethodType << SecondName << SecondStatic;
+ DiagMethodError(MethodStatic) << FirstStatic;
+ DiagMethodNote(MethodStatic) << SecondStatic;
Diagnosed = true;
break;
}
@@ -10519,12 +10463,8 @@ void ASTReader::diagnoseOdrViolations() {
const bool FirstVolatile = FirstMethod->isVolatile();
const bool SecondVolatile = SecondMethod->isVolatile();
if (FirstVolatile != SecondVolatile) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodVolatile)
- << FirstMethodType << FirstName << FirstVolatile;
- ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodVolatile)
- << SecondMethodType << SecondName << SecondVolatile;
+ DiagMethodError(MethodVolatile) << FirstVolatile;
+ DiagMethodNote(MethodVolatile) << SecondVolatile;
Diagnosed = true;
break;
}
@@ -10532,12 +10472,8 @@ void ASTReader::diagnoseOdrViolations() {
const bool FirstConst = FirstMethod->isConst();
const bool SecondConst = SecondMethod->isConst();
if (FirstConst != SecondConst) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodConst)
- << FirstMethodType << FirstName << FirstConst;
- ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodConst)
- << SecondMethodType << SecondName << SecondConst;
+ DiagMethodError(MethodConst) << FirstConst;
+ DiagMethodNote(MethodConst) << SecondConst;
Diagnosed = true;
break;
}
@@ -10545,12 +10481,8 @@ void ASTReader::diagnoseOdrViolations() {
const bool FirstInline = FirstMethod->isInlineSpecified();
const bool SecondInline = SecondMethod->isInlineSpecified();
if (FirstInline != SecondInline) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodInline)
- << FirstMethodType << FirstName << FirstInline;
- ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodInline)
- << SecondMethodType << SecondName << SecondInline;
+ DiagMethodError(MethodInline) << FirstInline;
+ DiagMethodNote(MethodInline) << SecondInline;
Diagnosed = true;
break;
}
@@ -10558,14 +10490,8 @@ void ASTReader::diagnoseOdrViolations() {
const unsigned FirstNumParameters = FirstMethod->param_size();
const unsigned SecondNumParameters = SecondMethod->param_size();
if (FirstNumParameters != SecondNumParameters) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
- FirstMethod->getSourceRange(),
- MethodNumberParameters)
- << FirstMethodType << FirstName << FirstNumParameters;
- ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
- SecondMethod->getSourceRange(),
- MethodNumberParameters)
- << SecondMethodType << SecondName << SecondNumParameters;
+ DiagMethodError(MethodNumberParameters) << FirstNumParameters;
+ DiagMethodNote(MethodNumberParameters) << SecondNumParameters;
Diagnosed = true;
break;
}
@@ -10583,33 +10509,22 @@ void ASTReader::diagnoseOdrViolations() {
ComputeQualTypeODRHash(SecondParamType)) {
if (const DecayedType *ParamDecayedType =
FirstParamType->getAs<DecayedType>()) {
- ODRDiagDeclError(
- FirstRecord, FirstModule, FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodParameterType)
- << FirstMethodType << FirstName << (I + 1) << FirstParamType
- << true << ParamDecayedType->getOriginalType();
+ DiagMethodError(MethodParameterType)
+ << (I + 1) << FirstParamType << true
+ << ParamDecayedType->getOriginalType();
} else {
- ODRDiagDeclError(
- FirstRecord, FirstModule, FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodParameterType)
- << FirstMethodType << FirstName << (I + 1) << FirstParamType
- << false;
+ DiagMethodError(MethodParameterType)
+ << (I + 1) << FirstParamType << false;
}
if (const DecayedType *ParamDecayedType =
SecondParamType->getAs<DecayedType>()) {
- ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
- SecondMethod->getSourceRange(),
- MethodParameterType)
- << SecondMethodType << SecondName << (I + 1)
- << SecondParamType << true
+ DiagMethodNote(MethodParameterType)
+ << (I + 1) << SecondParamType << true
<< ParamDecayedType->getOriginalType();
} else {
- ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
- SecondMethod->getSourceRange(),
- MethodParameterType)
- << SecondMethodType << SecondName << (I + 1)
- << SecondParamType << false;
+ DiagMethodNote(MethodParameterType)
+ << (I + 1) << SecondParamType << false;
}
ParameterMismatch = true;
break;
@@ -10618,13 +10533,8 @@ void ASTReader::diagnoseOdrViolations() {
DeclarationName FirstParamName = FirstParam->getDeclName();
DeclarationName SecondParamName = SecondParam->getDeclName();
if (FirstParamName != SecondParamName) {
- ODRDiagDeclError(FirstRecord, FirstModule,
- FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodParameterName)
- << FirstMethodType << FirstName << (I + 1) << FirstParamName;
- ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodParameterName)
- << SecondMethodType << SecondName << (I + 1) << SecondParamName;
+ DiagMethodError(MethodParameterName) << (I + 1) << FirstParamName;
+ DiagMethodNote(MethodParameterName) << (I + 1) << SecondParamName;
ParameterMismatch = true;
break;
}
@@ -10632,18 +10542,11 @@ void ASTReader::diagnoseOdrViolations() {
const Expr *FirstInit = FirstParam->getInit();
const Expr *SecondInit = SecondParam->getInit();
if ((FirstInit == nullptr) != (SecondInit == nullptr)) {
- ODRDiagDeclError(FirstRecord, FirstModule,
- FirstMethod->getLocation(),
- FirstMethod->getSourceRange(),
- MethodParameterSingleDefaultArgument)
- << FirstMethodType << FirstName << (I + 1)
- << (FirstInit == nullptr)
+ DiagMethodError(MethodParameterSingleDefaultArgument)
+ << (I + 1) << (FirstInit == nullptr)
<< (FirstInit ? FirstInit->getSourceRange() : SourceRange());
- ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
- SecondMethod->getSourceRange(),
- MethodParameterSingleDefaultArgument)
- << SecondMethodType << SecondName << (I + 1)
- << (SecondInit == nullptr)
+ DiagMethodNote(MethodParameterSingleDefaultArgument)
+ << (I + 1) << (SecondInit == nullptr)
<< (SecondInit ? SecondInit->getSourceRange() : SourceRange());
ParameterMismatch = true;
break;
@@ -10651,17 +10554,10 @@ void ASTReader::diagnoseOdrViolations() {
if (FirstInit && SecondInit &&
ComputeODRHash(FirstInit) != ComputeODRHash(SecondInit)) {
- ODRDiagDeclError(FirstRecord, FirstModule,
- FirstMethod->getLocation(),
- FirstMethod->getSourceRange(),
- MethodParameterDifferentDefaultArgument)
- << FirstMethodType << FirstName << (I + 1)
- << FirstInit->getSourceRange();
- ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
- SecondMethod->getSourceRange(),
- MethodParameterDifferentDefaultArgument)
- << SecondMethodType << SecondName << (I + 1)
- << SecondInit->getSourceRange();
+ DiagMethodError(MethodParameterDifferentDefaultArgument)
+ << (I + 1) << FirstInit->getSourceRange();
+ DiagMethodNote(MethodParameterDifferentDefaultArgument)
+ << (I + 1) << SecondInit->getSourceRange();
ParameterMismatch = true;
break;
@@ -10680,16 +10576,10 @@ void ASTReader::diagnoseOdrViolations() {
if ((FirstTemplateArgs && !SecondTemplateArgs) ||
(!FirstTemplateArgs && SecondTemplateArgs)) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
- FirstMethod->getSourceRange(),
- MethodNoTemplateArguments)
- << FirstMethodType << FirstName << (FirstTemplateArgs != nullptr);
- ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
- SecondMethod->getSourceRange(),
- MethodNoTemplateArguments)
- << SecondMethodType << SecondName
+ DiagMethodError(MethodNoTemplateArguments)
+ << (FirstTemplateArgs != nullptr);
+ DiagMethodNote(MethodNoTemplateArguments)
<< (SecondTemplateArgs != nullptr);
-
Diagnosed = true;
break;
}
@@ -10704,9 +10594,8 @@ void ASTReader::diagnoseOdrViolations() {
ExpandedList.push_back(&TA);
continue;
}
- for (const TemplateArgument &PackTA : TA.getPackAsArray()) {
- ExpandedList.push_back(&PackTA);
- }
+ llvm::append_range(ExpandedList, llvm::make_pointer_range(
+ TA.getPackAsArray()));
}
return ExpandedList;
};
@@ -10716,18 +10605,10 @@ void ASTReader::diagnoseOdrViolations() {
ExpandTemplateArgumentList(SecondTemplateArgs);
if (FirstExpandedList.size() != SecondExpandedList.size()) {
- ODRDiagDeclError(FirstRecord, FirstModule,
- FirstMethod->getLocation(),
- FirstMethod->getSourceRange(),
- MethodDifferentNumberTemplateArguments)
- << FirstMethodType << FirstName
+ DiagMethodError(MethodDifferentNumberTemplateArguments)
<< (unsigned)FirstExpandedList.size();
- ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
- SecondMethod->getSourceRange(),
- MethodDifferentNumberTemplateArguments)
- << SecondMethodType << SecondName
+ DiagMethodNote(MethodDifferentNumberTemplateArguments)
<< (unsigned)SecondExpandedList.size();
-
Diagnosed = true;
break;
}
@@ -10741,15 +10622,10 @@ void ASTReader::diagnoseOdrViolations() {
continue;
}
- ODRDiagDeclError(
- FirstRecord, FirstModule, FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodDifferentTemplateArgument)
- << FirstMethodType << FirstName << FirstTA << i + 1;
- ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
- SecondMethod->getSourceRange(),
- MethodDifferentTemplateArgument)
- << SecondMethodType << SecondName << SecondTA << i + 1;
-
+ DiagMethodError(MethodDifferentTemplateArgument)
+ << FirstTA << i + 1;
+ DiagMethodNote(MethodDifferentTemplateArgument)
+ << SecondTA << i + 1;
TemplateArgumentMismatch = true;
break;
}
@@ -10769,30 +10645,22 @@ void ASTReader::diagnoseOdrViolations() {
// Compare the hash generated to the hash stored. A difference means
// that a body was present in the original source. Due to merging,
- // the stardard way of detecting a body will not work.
+ // the standard way of detecting a body will not work.
const bool HasFirstBody =
ComputeCXXMethodODRHash(FirstMethod) != FirstMethod->getODRHash();
const bool HasSecondBody =
ComputeCXXMethodODRHash(SecondMethod) != SecondMethod->getODRHash();
if (HasFirstBody != HasSecondBody) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodSingleBody)
- << FirstMethodType << FirstName << HasFirstBody;
- ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodSingleBody)
- << SecondMethodType << SecondName << HasSecondBody;
+ DiagMethodError(MethodSingleBody) << HasFirstBody;
+ DiagMethodNote(MethodSingleBody) << HasSecondBody;
Diagnosed = true;
break;
}
if (HasFirstBody && HasSecondBody) {
- ODRDiagDeclError(FirstRecord, FirstModule, FirstMethod->getLocation(),
- FirstMethod->getSourceRange(), MethodDifferentBody)
- << FirstMethodType << FirstName;
- ODRDiagDeclNote(SecondModule, SecondMethod->getLocation(),
- SecondMethod->getSourceRange(), MethodDifferentBody)
- << SecondMethodType << SecondName;
+ DiagMethodError(MethodDifferentBody);
+ DiagMethodNote(MethodDifferentBody);
Diagnosed = true;
break;
}
@@ -10824,14 +10692,12 @@ void ASTReader::diagnoseOdrViolations() {
TypeSourceInfo *SecondTSI = SecondFriend->getFriendType();
if (FirstND && SecondND) {
- ODRDiagDeclError(FirstRecord, FirstModule,
- FirstFriend->getFriendLoc(),
+ ODRDiagDeclError(FirstFriend->getFriendLoc(),
FirstFriend->getSourceRange(), FriendFunction)
<< FirstND;
- ODRDiagDeclNote(SecondModule, SecondFriend->getFriendLoc(),
+ ODRDiagDeclNote(SecondFriend->getFriendLoc(),
SecondFriend->getSourceRange(), FriendFunction)
<< SecondND;
-
Diagnosed = true;
break;
}
@@ -10841,24 +10707,22 @@ void ASTReader::diagnoseOdrViolations() {
QualType SecondFriendType = SecondTSI->getType();
assert(ComputeQualTypeODRHash(FirstFriendType) !=
ComputeQualTypeODRHash(SecondFriendType));
- ODRDiagDeclError(FirstRecord, FirstModule,
- FirstFriend->getFriendLoc(),
+ ODRDiagDeclError(FirstFriend->getFriendLoc(),
FirstFriend->getSourceRange(), FriendType)
<< FirstFriendType;
- ODRDiagDeclNote(SecondModule, SecondFriend->getFriendLoc(),
+ ODRDiagDeclNote(SecondFriend->getFriendLoc(),
SecondFriend->getSourceRange(), FriendType)
<< SecondFriendType;
Diagnosed = true;
break;
}
- ODRDiagDeclError(FirstRecord, FirstModule, FirstFriend->getFriendLoc(),
+ ODRDiagDeclError(FirstFriend->getFriendLoc(),
FirstFriend->getSourceRange(), FriendTypeFunction)
<< (FirstTSI == nullptr);
- ODRDiagDeclNote(SecondModule, SecondFriend->getFriendLoc(),
+ ODRDiagDeclNote(SecondFriend->getFriendLoc(),
SecondFriend->getSourceRange(), FriendTypeFunction)
<< (SecondTSI == nullptr);
-
Diagnosed = true;
break;
}
@@ -10873,17 +10737,24 @@ void ASTReader::diagnoseOdrViolations() {
TemplateParameterList *SecondTPL =
SecondTemplate->getTemplateParameters();
- if (FirstTPL->size() != SecondTPL->size()) {
- ODRDiagDeclError(FirstRecord, FirstModule,
- FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplateDifferentNumberParameters)
- << FirstTemplate << FirstTPL->size();
- ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplateDifferentNumberParameters)
- << SecondTemplate << SecondTPL->size();
+ auto DiagTemplateError = [&ODRDiagDeclError, FirstTemplate](
+ ODRCXXRecordDifference DiffType) {
+ return ODRDiagDeclError(FirstTemplate->getLocation(),
+ FirstTemplate->getSourceRange(), DiffType)
+ << FirstTemplate;
+ };
+ auto DiagTemplateNote = [&ODRDiagDeclNote, SecondTemplate](
+ ODRCXXRecordDifference DiffType) {
+ return ODRDiagDeclNote(SecondTemplate->getLocation(),
+ SecondTemplate->getSourceRange(), DiffType)
+ << SecondTemplate;
+ };
+ if (FirstTPL->size() != SecondTPL->size()) {
+ DiagTemplateError(FunctionTemplateDifferentNumberParameters)
+ << FirstTPL->size();
+ DiagTemplateNote(FunctionTemplateDifferentNumberParameters)
+ << SecondTPL->size();
Diagnosed = true;
break;
}
@@ -10912,31 +10783,19 @@ void ASTReader::diagnoseOdrViolations() {
}
};
- ODRDiagDeclError(FirstRecord, FirstModule,
- FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplateParameterDifferentKind)
- << FirstTemplate << (i + 1) << GetParamType(FirstParam);
- ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplateParameterDifferentKind)
- << SecondTemplate << (i + 1) << GetParamType(SecondParam);
-
+ DiagTemplateError(FunctionTemplateParameterDifferentKind)
+ << (i + 1) << GetParamType(FirstParam);
+ DiagTemplateNote(FunctionTemplateParameterDifferentKind)
+ << (i + 1) << GetParamType(SecondParam);
ParameterMismatch = true;
break;
}
if (FirstParam->getName() != SecondParam->getName()) {
- ODRDiagDeclError(
- FirstRecord, FirstModule, FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(), FunctionTemplateParameterName)
- << FirstTemplate << (i + 1) << (bool)FirstParam->getIdentifier()
- << FirstParam;
- ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplateParameterName)
- << SecondTemplate << (i + 1)
- << (bool)SecondParam->getIdentifier() << SecondParam;
+ DiagTemplateError(FunctionTemplateParameterName)
+ << (i + 1) << (bool)FirstParam->getIdentifier() << FirstParam;
+ DiagTemplateNote(FunctionTemplateParameterName)
+ << (i + 1) << (bool)SecondParam->getIdentifier() << SecondParam;
ParameterMismatch = true;
break;
}
@@ -10954,15 +10813,10 @@ void ASTReader::diagnoseOdrViolations() {
SecondTTPD->hasDefaultArgument() &&
!SecondTTPD->defaultArgumentWasInherited();
if (HasFirstDefaultArgument != HasSecondDefaultArgument) {
- ODRDiagDeclError(FirstRecord, FirstModule,
- FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplateParameterSingleDefaultArgument)
- << FirstTemplate << (i + 1) << HasFirstDefaultArgument;
- ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplateParameterSingleDefaultArgument)
- << SecondTemplate << (i + 1) << HasSecondDefaultArgument;
+ DiagTemplateError(FunctionTemplateParameterSingleDefaultArgument)
+ << (i + 1) << HasFirstDefaultArgument;
+ DiagTemplateNote(FunctionTemplateParameterSingleDefaultArgument)
+ << (i + 1) << HasSecondDefaultArgument;
ParameterMismatch = true;
break;
}
@@ -10972,16 +10826,12 @@ void ASTReader::diagnoseOdrViolations() {
QualType SecondType = SecondTTPD->getDefaultArgument();
if (ComputeQualTypeODRHash(FirstType) !=
ComputeQualTypeODRHash(SecondType)) {
- ODRDiagDeclError(
- FirstRecord, FirstModule, FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
+ DiagTemplateError(
FunctionTemplateParameterDifferentDefaultArgument)
- << FirstTemplate << (i + 1) << FirstType;
- ODRDiagDeclNote(
- SecondModule, SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
+ << (i + 1) << FirstType;
+ DiagTemplateNote(
FunctionTemplateParameterDifferentDefaultArgument)
- << SecondTemplate << (i + 1) << SecondType;
+ << (i + 1) << SecondType;
ParameterMismatch = true;
break;
}
@@ -10989,15 +10839,10 @@ void ASTReader::diagnoseOdrViolations() {
if (FirstTTPD->isParameterPack() !=
SecondTTPD->isParameterPack()) {
- ODRDiagDeclError(FirstRecord, FirstModule,
- FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplatePackParameter)
- << FirstTemplate << (i + 1) << FirstTTPD->isParameterPack();
- ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplatePackParameter)
- << SecondTemplate << (i + 1) << SecondTTPD->isParameterPack();
+ DiagTemplateError(FunctionTemplatePackParameter)
+ << (i + 1) << FirstTTPD->isParameterPack();
+ DiagTemplateNote(FunctionTemplatePackParameter)
+ << (i + 1) << SecondTTPD->isParameterPack();
ParameterMismatch = true;
break;
}
@@ -11017,15 +10862,10 @@ void ASTReader::diagnoseOdrViolations() {
if (ComputeTemplateParameterListODRHash(FirstTPL) !=
ComputeTemplateParameterListODRHash(SecondTPL)) {
- ODRDiagDeclError(FirstRecord, FirstModule,
- FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplateParameterDifferentType)
- << FirstTemplate << (i + 1);
- ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplateParameterDifferentType)
- << SecondTemplate << (i + 1);
+ DiagTemplateError(FunctionTemplateParameterDifferentType)
+ << (i + 1);
+ DiagTemplateNote(FunctionTemplateParameterDifferentType)
+ << (i + 1);
ParameterMismatch = true;
break;
}
@@ -11037,15 +10877,10 @@ void ASTReader::diagnoseOdrViolations() {
SecondTTPD->hasDefaultArgument() &&
!SecondTTPD->defaultArgumentWasInherited();
if (HasFirstDefaultArgument != HasSecondDefaultArgument) {
- ODRDiagDeclError(FirstRecord, FirstModule,
- FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplateParameterSingleDefaultArgument)
- << FirstTemplate << (i + 1) << HasFirstDefaultArgument;
- ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplateParameterSingleDefaultArgument)
- << SecondTemplate << (i + 1) << HasSecondDefaultArgument;
+ DiagTemplateError(FunctionTemplateParameterSingleDefaultArgument)
+ << (i + 1) << HasFirstDefaultArgument;
+ DiagTemplateNote(FunctionTemplateParameterSingleDefaultArgument)
+ << (i + 1) << HasSecondDefaultArgument;
ParameterMismatch = true;
break;
}
@@ -11057,16 +10892,12 @@ void ASTReader::diagnoseOdrViolations() {
SecondTTPD->getDefaultArgument().getArgument();
if (ComputeTemplateArgumentODRHash(FirstTA) !=
ComputeTemplateArgumentODRHash(SecondTA)) {
- ODRDiagDeclError(
- FirstRecord, FirstModule, FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
+ DiagTemplateError(
FunctionTemplateParameterDifferentDefaultArgument)
- << FirstTemplate << (i + 1) << FirstTA;
- ODRDiagDeclNote(
- SecondModule, SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
+ << (i + 1) << FirstTA;
+ DiagTemplateNote(
FunctionTemplateParameterDifferentDefaultArgument)
- << SecondTemplate << (i + 1) << SecondTA;
+ << (i + 1) << SecondTA;
ParameterMismatch = true;
break;
}
@@ -11074,15 +10905,10 @@ void ASTReader::diagnoseOdrViolations() {
if (FirstTTPD->isParameterPack() !=
SecondTTPD->isParameterPack()) {
- ODRDiagDeclError(FirstRecord, FirstModule,
- FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplatePackParameter)
- << FirstTemplate << (i + 1) << FirstTTPD->isParameterPack();
- ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplatePackParameter)
- << SecondTemplate << (i + 1) << SecondTTPD->isParameterPack();
+ DiagTemplateError(FunctionTemplatePackParameter)
+ << (i + 1) << FirstTTPD->isParameterPack();
+ DiagTemplateNote(FunctionTemplatePackParameter)
+ << (i + 1) << SecondTTPD->isParameterPack();
ParameterMismatch = true;
break;
}
@@ -11099,15 +10925,10 @@ void ASTReader::diagnoseOdrViolations() {
QualType SecondType = SecondNTTPD->getType();
if (ComputeQualTypeODRHash(FirstType) !=
ComputeQualTypeODRHash(SecondType)) {
- ODRDiagDeclError(FirstRecord, FirstModule,
- FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplateParameterDifferentType)
- << FirstTemplate << (i + 1);
- ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplateParameterDifferentType)
- << SecondTemplate << (i + 1);
+ DiagTemplateError(FunctionTemplateParameterDifferentType)
+ << (i + 1);
+ DiagTemplateNote(FunctionTemplateParameterDifferentType)
+ << (i + 1);
ParameterMismatch = true;
break;
}
@@ -11119,15 +10940,10 @@ void ASTReader::diagnoseOdrViolations() {
SecondNTTPD->hasDefaultArgument() &&
!SecondNTTPD->defaultArgumentWasInherited();
if (HasFirstDefaultArgument != HasSecondDefaultArgument) {
- ODRDiagDeclError(FirstRecord, FirstModule,
- FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplateParameterSingleDefaultArgument)
- << FirstTemplate << (i + 1) << HasFirstDefaultArgument;
- ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplateParameterSingleDefaultArgument)
- << SecondTemplate << (i + 1) << HasSecondDefaultArgument;
+ DiagTemplateError(FunctionTemplateParameterSingleDefaultArgument)
+ << (i + 1) << HasFirstDefaultArgument;
+ DiagTemplateNote(FunctionTemplateParameterSingleDefaultArgument)
+ << (i + 1) << HasSecondDefaultArgument;
ParameterMismatch = true;
break;
}
@@ -11137,16 +10953,12 @@ void ASTReader::diagnoseOdrViolations() {
Expr *SecondDefaultArgument = SecondNTTPD->getDefaultArgument();
if (ComputeODRHash(FirstDefaultArgument) !=
ComputeODRHash(SecondDefaultArgument)) {
- ODRDiagDeclError(
- FirstRecord, FirstModule, FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
+ DiagTemplateError(
FunctionTemplateParameterDifferentDefaultArgument)
- << FirstTemplate << (i + 1) << FirstDefaultArgument;
- ODRDiagDeclNote(
- SecondModule, SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
+ << (i + 1) << FirstDefaultArgument;
+ DiagTemplateNote(
FunctionTemplateParameterDifferentDefaultArgument)
- << SecondTemplate << (i + 1) << SecondDefaultArgument;
+ << (i + 1) << SecondDefaultArgument;
ParameterMismatch = true;
break;
}
@@ -11154,16 +10966,10 @@ void ASTReader::diagnoseOdrViolations() {
if (FirstNTTPD->isParameterPack() !=
SecondNTTPD->isParameterPack()) {
- ODRDiagDeclError(FirstRecord, FirstModule,
- FirstTemplate->getLocation(),
- FirstTemplate->getSourceRange(),
- FunctionTemplatePackParameter)
- << FirstTemplate << (i + 1) << FirstNTTPD->isParameterPack();
- ODRDiagDeclNote(SecondModule, SecondTemplate->getLocation(),
- SecondTemplate->getSourceRange(),
- FunctionTemplatePackParameter)
- << SecondTemplate << (i + 1)
- << SecondNTTPD->isParameterPack();
+ DiagTemplateError(FunctionTemplatePackParameter)
+ << (i + 1) << FirstNTTPD->isParameterPack();
+ DiagTemplateNote(FunctionTemplatePackParameter)
+ << (i + 1) << SecondNTTPD->isParameterPack();
ParameterMismatch = true;
break;
}
@@ -11369,8 +11175,8 @@ void ASTReader::diagnoseOdrViolations() {
DifferentSpecifiedTypes,
DifferentNumberEnumConstants,
EnumConstantName,
- EnumConstantSingleInitilizer,
- EnumConstantDifferentInitilizer,
+ EnumConstantSingleInitializer,
+ EnumConstantDifferentInitializer,
};
// If we've already pointed out a specific problem with this enum, don't
@@ -11407,26 +11213,23 @@ void ASTReader::diagnoseOdrViolations() {
getOwningModuleNameForDiagnostic(SecondEnum);
auto ODRDiagError = [FirstEnum, &FirstModule,
- this](SourceLocation Loc, SourceRange Range,
+ this](const auto *DiagAnchor,
ODREnumDifference DiffType) {
- return Diag(Loc, diag::err_module_odr_violation_enum)
- << FirstEnum << FirstModule.empty() << FirstModule << Range
- << DiffType;
+ return Diag(DiagAnchor->getLocation(),
+ diag::err_module_odr_violation_enum)
+ << FirstEnum << FirstModule.empty() << FirstModule
+ << DiagAnchor->getSourceRange() << DiffType;
};
- auto ODRDiagNote = [&SecondModule, this](SourceLocation Loc,
- SourceRange Range,
+ auto ODRDiagNote = [&SecondModule, this](const auto *DiagAnchor,
ODREnumDifference DiffType) {
- return Diag(Loc, diag::note_module_odr_violation_enum)
- << SecondModule << Range << DiffType;
+ return Diag(DiagAnchor->getLocation(),
+ diag::note_module_odr_violation_enum)
+ << SecondModule << DiagAnchor->getSourceRange() << DiffType;
};
if (FirstEnum->isScoped() != SecondEnum->isScoped()) {
- ODRDiagError(FirstEnum->getLocation(), FirstEnum->getSourceRange(),
- SingleScopedEnum)
- << FirstEnum->isScoped();
- ODRDiagNote(SecondEnum->getLocation(), SecondEnum->getSourceRange(),
- SingleScopedEnum)
- << SecondEnum->isScoped();
+ ODRDiagError(FirstEnum, SingleScopedEnum) << FirstEnum->isScoped();
+ ODRDiagNote(SecondEnum, SingleScopedEnum) << SecondEnum->isScoped();
Diagnosed = true;
continue;
}
@@ -11434,11 +11237,9 @@ void ASTReader::diagnoseOdrViolations() {
if (FirstEnum->isScoped() && SecondEnum->isScoped()) {
if (FirstEnum->isScopedUsingClassTag() !=
SecondEnum->isScopedUsingClassTag()) {
- ODRDiagError(FirstEnum->getLocation(), FirstEnum->getSourceRange(),
- EnumTagKeywordMismatch)
+ ODRDiagError(FirstEnum, EnumTagKeywordMismatch)
<< FirstEnum->isScopedUsingClassTag();
- ODRDiagNote(SecondEnum->getLocation(), SecondEnum->getSourceRange(),
- EnumTagKeywordMismatch)
+ ODRDiagNote(SecondEnum, EnumTagKeywordMismatch)
<< SecondEnum->isScopedUsingClassTag();
Diagnosed = true;
continue;
@@ -11454,24 +11255,20 @@ void ASTReader::diagnoseOdrViolations() {
? SecondEnum->getIntegerTypeSourceInfo()->getType()
: QualType();
if (FirstUnderlyingType.isNull() != SecondUnderlyingType.isNull()) {
- ODRDiagError(FirstEnum->getLocation(), FirstEnum->getSourceRange(),
- SingleSpecifiedType)
- << !FirstUnderlyingType.isNull();
- ODRDiagNote(SecondEnum->getLocation(), SecondEnum->getSourceRange(),
- SingleSpecifiedType)
- << !SecondUnderlyingType.isNull();
- Diagnosed = true;
- continue;
+ ODRDiagError(FirstEnum, SingleSpecifiedType)
+ << !FirstUnderlyingType.isNull();
+ ODRDiagNote(SecondEnum, SingleSpecifiedType)
+ << !SecondUnderlyingType.isNull();
+ Diagnosed = true;
+ continue;
}
if (!FirstUnderlyingType.isNull() && !SecondUnderlyingType.isNull()) {
if (ComputeQualTypeODRHash(FirstUnderlyingType) !=
ComputeQualTypeODRHash(SecondUnderlyingType)) {
- ODRDiagError(FirstEnum->getLocation(), FirstEnum->getSourceRange(),
- DifferentSpecifiedTypes)
+ ODRDiagError(FirstEnum, DifferentSpecifiedTypes)
<< FirstUnderlyingType;
- ODRDiagNote(SecondEnum->getLocation(), SecondEnum->getSourceRange(),
- DifferentSpecifiedTypes)
+ ODRDiagNote(SecondEnum, DifferentSpecifiedTypes)
<< SecondUnderlyingType;
Diagnosed = true;
continue;
@@ -11482,11 +11279,9 @@ void ASTReader::diagnoseOdrViolations() {
PopulateHashes(SecondHashes, SecondEnum);
if (FirstHashes.size() != SecondHashes.size()) {
- ODRDiagError(FirstEnum->getLocation(), FirstEnum->getSourceRange(),
- DifferentNumberEnumConstants)
+ ODRDiagError(FirstEnum, DifferentNumberEnumConstants)
<< (int)FirstHashes.size();
- ODRDiagNote(SecondEnum->getLocation(), SecondEnum->getSourceRange(),
- DifferentNumberEnumConstants)
+ ODRDiagNote(SecondEnum, DifferentNumberEnumConstants)
<< (int)SecondHashes.size();
Diagnosed = true;
continue;
@@ -11501,11 +11296,9 @@ void ASTReader::diagnoseOdrViolations() {
if (FirstEnumConstant->getDeclName() !=
SecondEnumConstant->getDeclName()) {
- ODRDiagError(FirstEnumConstant->getLocation(),
- FirstEnumConstant->getSourceRange(), EnumConstantName)
+ ODRDiagError(FirstEnumConstant, EnumConstantName)
<< I + 1 << FirstEnumConstant;
- ODRDiagNote(SecondEnumConstant->getLocation(),
- SecondEnumConstant->getSourceRange(), EnumConstantName)
+ ODRDiagNote(SecondEnumConstant, EnumConstantName)
<< I + 1 << SecondEnumConstant;
Diagnosed = true;
break;
@@ -11517,26 +11310,18 @@ void ASTReader::diagnoseOdrViolations() {
continue;
if (!FirstInit || !SecondInit) {
- ODRDiagError(FirstEnumConstant->getLocation(),
- FirstEnumConstant->getSourceRange(),
- EnumConstantSingleInitilizer)
+ ODRDiagError(FirstEnumConstant, EnumConstantSingleInitializer)
<< I + 1 << FirstEnumConstant << (FirstInit != nullptr);
- ODRDiagNote(SecondEnumConstant->getLocation(),
- SecondEnumConstant->getSourceRange(),
- EnumConstantSingleInitilizer)
+ ODRDiagNote(SecondEnumConstant, EnumConstantSingleInitializer)
<< I + 1 << SecondEnumConstant << (SecondInit != nullptr);
Diagnosed = true;
break;
}
if (ComputeODRHash(FirstInit) != ComputeODRHash(SecondInit)) {
- ODRDiagError(FirstEnumConstant->getLocation(),
- FirstEnumConstant->getSourceRange(),
- EnumConstantDifferentInitilizer)
+ ODRDiagError(FirstEnumConstant, EnumConstantDifferentInitializer)
<< I + 1 << FirstEnumConstant;
- ODRDiagNote(SecondEnumConstant->getLocation(),
- SecondEnumConstant->getSourceRange(),
- EnumConstantDifferentInitilizer)
+ ODRDiagNote(SecondEnumConstant, EnumConstantDifferentInitializer)
<< I + 1 << SecondEnumConstant;
Diagnosed = true;
break;
@@ -11954,6 +11739,15 @@ OMPClause *OMPClauseReader::readClause() {
C = OMPIsDevicePtrClause::CreateEmpty(Context, Sizes);
break;
}
+ case llvm::omp::OMPC_has_device_addr: {
+ OMPMappableExprListSizeTy Sizes;
+ Sizes.NumVars = Record.readInt();
+ Sizes.NumUniqueDeclarations = Record.readInt();
+ Sizes.NumComponentLists = Record.readInt();
+ Sizes.NumComponents = Record.readInt();
+ C = OMPHasDeviceAddrClause::CreateEmpty(Context, Sizes);
+ break;
+ }
case llvm::omp::OMPC_allocate:
C = OMPAllocateClause::CreateEmpty(Context, Record.readInt());
break;
@@ -12523,6 +12317,7 @@ void OMPClauseReader::VisitOMPDependClause(OMPDependClause *C) {
static_cast<OpenMPDependClauseKind>(Record.readInt()));
C->setDependencyLoc(Record.readSourceLocation());
C->setColonLoc(Record.readSourceLocation());
+ C->setOmpAllMemoryLoc(Record.readSourceLocation());
unsigned NumVars = C->varlist_size();
SmallVector<Expr *, 16> Vars;
Vars.reserve(NumVars);
@@ -12914,6 +12709,49 @@ void OMPClauseReader::VisitOMPIsDevicePtrClause(OMPIsDevicePtrClause *C) {
C->setComponents(Components, ListSizes);
}
+void OMPClauseReader::VisitOMPHasDeviceAddrClause(OMPHasDeviceAddrClause *C) {
+ C->setLParenLoc(Record.readSourceLocation());
+ auto NumVars = C->varlist_size();
+ auto UniqueDecls = C->getUniqueDeclarationsNum();
+ auto TotalLists = C->getTotalComponentListNum();
+ auto TotalComponents = C->getTotalComponentsNum();
+
+ SmallVector<Expr *, 16> Vars;
+ Vars.reserve(NumVars);
+ for (unsigned I = 0; I != NumVars; ++I)
+ Vars.push_back(Record.readSubExpr());
+ C->setVarRefs(Vars);
+ Vars.clear();
+
+ SmallVector<ValueDecl *, 16> Decls;
+ Decls.reserve(UniqueDecls);
+ for (unsigned I = 0; I < UniqueDecls; ++I)
+ Decls.push_back(Record.readDeclAs<ValueDecl>());
+ C->setUniqueDecls(Decls);
+
+ SmallVector<unsigned, 16> ListsPerDecl;
+ ListsPerDecl.reserve(UniqueDecls);
+ for (unsigned I = 0; I < UniqueDecls; ++I)
+ ListsPerDecl.push_back(Record.readInt());
+ C->setDeclNumLists(ListsPerDecl);
+
+ SmallVector<unsigned, 32> ListSizes;
+ ListSizes.reserve(TotalLists);
+ for (unsigned i = 0; i < TotalLists; ++i)
+ ListSizes.push_back(Record.readInt());
+ C->setComponentListSizes(ListSizes);
+
+ SmallVector<OMPClauseMappableExprCommon::MappableComponent, 32> Components;
+ Components.reserve(TotalComponents);
+ for (unsigned I = 0; I < TotalComponents; ++I) {
+ Expr *AssociatedExpr = Record.readSubExpr();
+ auto *AssociatedDecl = Record.readDeclAs<ValueDecl>();
+ Components.emplace_back(AssociatedExpr, AssociatedDecl,
+ /*IsNonContiguous=*/false);
+ }
+ C->setComponents(Components, ListSizes);
+}
+
void OMPClauseReader::VisitOMPNontemporalClause(OMPNontemporalClause *C) {
C->setLParenLoc(Record.readSourceLocation());
unsigned NumVars = C->varlist_size();
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp
index 5d63a26132b7..b4506da2bb2b 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTReaderDecl.cpp
@@ -36,6 +36,7 @@
#include "clang/AST/Type.h"
#include "clang/AST/UnresolvedSet.h"
#include "clang/Basic/AttrKinds.h"
+#include "clang/Basic/DiagnosticSema.h"
#include "clang/Basic/ExceptionSpecificationType.h"
#include "clang/Basic/IdentifierTable.h"
#include "clang/Basic/LLVM.h"
@@ -370,6 +371,7 @@ namespace clang {
void VisitFieldDecl(FieldDecl *FD);
void VisitMSPropertyDecl(MSPropertyDecl *FD);
void VisitMSGuidDecl(MSGuidDecl *D);
+ void VisitUnnamedGlobalConstantDecl(UnnamedGlobalConstantDecl *D);
void VisitTemplateParamObjectDecl(TemplateParamObjectDecl *D);
void VisitIndirectFieldDecl(IndirectFieldDecl *FD);
RedeclarableResult VisitVarDeclImpl(VarDecl *D);
@@ -603,15 +605,27 @@ void ASTDeclReader::VisitDecl(Decl *D) {
D->setTopLevelDeclInObjCContainer(Record.readInt());
D->setAccess((AccessSpecifier)Record.readInt());
D->FromASTFile = true;
- bool ModulePrivate = Record.readInt();
+ auto ModuleOwnership = (Decl::ModuleOwnershipKind)Record.readInt();
+ bool ModulePrivate =
+ (ModuleOwnership == Decl::ModuleOwnershipKind::ModulePrivate);
// Determine whether this declaration is part of a (sub)module. If so, it
// may not yet be visible.
if (unsigned SubmoduleID = readSubmoduleID()) {
+
+ switch (ModuleOwnership) {
+ case Decl::ModuleOwnershipKind::Visible:
+ ModuleOwnership = Decl::ModuleOwnershipKind::VisibleWhenImported;
+ break;
+ case Decl::ModuleOwnershipKind::Unowned:
+ case Decl::ModuleOwnershipKind::VisibleWhenImported:
+ case Decl::ModuleOwnershipKind::ReachableWhenImported:
+ case Decl::ModuleOwnershipKind::ModulePrivate:
+ break;
+ }
+
+ D->setModuleOwnershipKind(ModuleOwnership);
// Store the owning submodule ID in the declaration.
- D->setModuleOwnershipKind(
- ModulePrivate ? Decl::ModuleOwnershipKind::ModulePrivate
- : Decl::ModuleOwnershipKind::VisibleWhenImported);
D->setOwningModuleID(SubmoduleID);
if (ModulePrivate) {
@@ -773,7 +787,7 @@ void ASTDeclReader::VisitEnumDecl(EnumDecl *ED) {
}
if (OldDef) {
Reader.MergedDeclContexts.insert(std::make_pair(ED, OldDef));
- ED->setCompleteDefinition(false);
+ ED->demoteThisDefinitionToDeclaration();
Reader.mergeDefinitionVisibility(OldDef, ED);
if (OldDef->getODRHash() != ED->getODRHash())
Reader.PendingEnumOdrMergeFailures[OldDef].push_back(ED);
@@ -828,7 +842,7 @@ void ASTDeclReader::VisitRecordDecl(RecordDecl *RD) {
}
if (OldDef) {
Reader.MergedDeclContexts.insert(std::make_pair(RD, OldDef));
- RD->setCompleteDefinition(false);
+ RD->demoteThisDefinitionToDeclaration();
Reader.mergeDefinitionVisibility(OldDef, RD);
} else {
OldDef = RD;
@@ -1231,6 +1245,39 @@ void ASTDeclReader::VisitObjCIvarDecl(ObjCIvarDecl *IVD) {
IVD->setNextIvar(nullptr);
bool synth = Record.readInt();
IVD->setSynthesize(synth);
+
+ // Check ivar redeclaration.
+ if (IVD->isInvalidDecl())
+ return;
+ // Don't check ObjCInterfaceDecl as interfaces are named and mismatches can be
+ // detected in VisitObjCInterfaceDecl. Here we are looking for redeclarations
+ // in extensions.
+ if (isa<ObjCInterfaceDecl>(IVD->getDeclContext()))
+ return;
+ ObjCInterfaceDecl *CanonIntf =
+ IVD->getContainingInterface()->getCanonicalDecl();
+ IdentifierInfo *II = IVD->getIdentifier();
+ ObjCIvarDecl *PrevIvar = CanonIntf->lookupInstanceVariable(II);
+ if (PrevIvar && PrevIvar != IVD) {
+ auto *ParentExt = dyn_cast<ObjCCategoryDecl>(IVD->getDeclContext());
+ auto *PrevParentExt =
+ dyn_cast<ObjCCategoryDecl>(PrevIvar->getDeclContext());
+ if (ParentExt && PrevParentExt) {
+ // Postpone diagnostic as we should merge identical extensions from
+ // different modules.
+ Reader
+ .PendingObjCExtensionIvarRedeclarations[std::make_pair(ParentExt,
+ PrevParentExt)]
+ .push_back(std::make_pair(IVD, PrevIvar));
+ } else if (ParentExt || PrevParentExt) {
+ // Duplicate ivars in extension + implementation are never compatible.
+ // Compatibility of implementation + implementation should be handled in
+ // VisitObjCImplementationDecl.
+ Reader.Diag(IVD->getLocation(), diag::err_duplicate_ivar_declaration)
+ << II;
+ Reader.Diag(PrevIvar->getLocation(), diag::note_previous_definition);
+ }
+ }
}
void ASTDeclReader::ReadObjCDefinitionData(
@@ -1428,6 +1475,17 @@ void ASTDeclReader::VisitMSGuidDecl(MSGuidDecl *D) {
Reader.getContext().setPrimaryMergedDecl(D, Existing->getCanonicalDecl());
}
+void ASTDeclReader::VisitUnnamedGlobalConstantDecl(
+ UnnamedGlobalConstantDecl *D) {
+ VisitValueDecl(D);
+ D->Value = Record.readAPValue();
+
+ // Add this to the AST context's lookup structure, and merge if needed.
+ if (UnnamedGlobalConstantDecl *Existing =
+ Reader.getContext().UnnamedGlobalConstantDecls.GetOrInsertNode(D))
+ Reader.getContext().setPrimaryMergedDecl(D, Existing->getCanonicalDecl());
+}
+
void ASTDeclReader::VisitTemplateParamObjectDecl(TemplateParamObjectDecl *D) {
VisitValueDecl(D);
D->Value = Record.readAPValue();
@@ -1801,7 +1859,7 @@ void ASTDeclReader::ReadCXXDefinitionData(
using Capture = LambdaCapture;
auto &Lambda = static_cast<CXXRecordDecl::LambdaDefinitionData &>(Data);
- Lambda.Dependent = Record.readInt();
+ Lambda.DependencyKind = Record.readInt();
Lambda.IsGenericLambda = Record.readInt();
Lambda.CaptureDefault = Record.readInt();
Lambda.NumCaptures = Record.readInt();
@@ -1917,8 +1975,8 @@ void ASTDeclReader::ReadCXXRecordDefinition(CXXRecordDecl *D, bool Update) {
// allocate the appropriate DefinitionData structure.
bool IsLambda = Record.readInt();
if (IsLambda)
- DD = new (C) CXXRecordDecl::LambdaDefinitionData(D, nullptr, false, false,
- LCD_None);
+ DD = new (C) CXXRecordDecl::LambdaDefinitionData(
+ D, nullptr, CXXRecordDecl::LDK_Unknown, false, LCD_None);
else
DD = new (C) struct CXXRecordDecl::DefinitionData(D);
@@ -2370,13 +2428,17 @@ ASTDeclReader::VisitVarTemplateSpecializationDeclImpl(
if (writtenAsCanonicalDecl) {
auto *CanonPattern = readDeclAs<VarTemplateDecl>();
if (D->isCanonicalDecl()) { // It's kept in the folding set.
- // FIXME: If it's already present, merge it.
+ VarTemplateSpecializationDecl *CanonSpec;
if (auto *Partial = dyn_cast<VarTemplatePartialSpecializationDecl>(D)) {
- CanonPattern->getCommonPtr()->PartialSpecializations
- .GetOrInsertNode(Partial);
+ CanonSpec = CanonPattern->getCommonPtr()
+ ->PartialSpecializations.GetOrInsertNode(Partial);
} else {
- CanonPattern->getCommonPtr()->Specializations.GetOrInsertNode(D);
+ CanonSpec =
+ CanonPattern->getCommonPtr()->Specializations.GetOrInsertNode(D);
}
+ // If we already have a matching specialization, merge it.
+ if (CanonSpec != D)
+ mergeRedeclarable<VarDecl>(D, CanonSpec, Redecl);
}
}
@@ -3053,6 +3115,8 @@ ASTDeclReader::getPrimaryDCForAnonymousDecl(DeclContext *LexicalDC) {
if (auto *RD = dyn_cast<CXXRecordDecl>(LexicalDC)) {
auto *DD = RD->getCanonicalDecl()->DefinitionData;
return DD ? DD->Definition : nullptr;
+ } else if (auto *OID = dyn_cast<ObjCInterfaceDecl>(LexicalDC)) {
+ return OID->getCanonicalDecl()->getDefinition();
}
// For anything else, walk its merged redeclarations looking for a definition.
@@ -3244,6 +3308,13 @@ void ASTDeclReader::mergeInheritableAttributes(ASTReader &Reader, Decl *D,
NewAttr->setInherited(true);
D->addAttr(NewAttr);
}
+
+ const auto *AA = Previous->getAttr<AvailabilityAttr>();
+ if (AA && !D->hasAttr<AvailabilityAttr>()) {
+ NewAttr = AA->clone(Context);
+ NewAttr->setInherited(true);
+ D->addAttr(NewAttr);
+ }
}
template<typename DeclT>
@@ -3709,6 +3780,9 @@ Decl *ASTReader::ReadDeclRecord(DeclID ID) {
case DECL_MS_GUID:
D = MSGuidDecl::CreateDeserialized(Context, ID);
break;
+ case DECL_UNNAMED_GLOBAL_CONSTANT:
+ D = UnnamedGlobalConstantDecl::CreateDeserialized(Context, ID);
+ break;
case DECL_TEMPLATE_PARAM_OBJECT:
D = TemplateParamObjectDecl::CreateDeserialized(Context, ID);
break;
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTReaderStmt.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTReaderStmt.cpp
index b82a334b763c..e0ae019bf803 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTReaderStmt.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTReaderStmt.cpp
@@ -107,8 +107,7 @@ namespace clang {
/// The number of record fields required for the Expr class
/// itself.
- static const unsigned NumExprFields =
- NumStmtFields + llvm::BitWidth<ExprDependence> + 3;
+ static const unsigned NumExprFields = NumStmtFields + 4;
/// Read and initialize a ExplicitTemplateArgumentList structure.
void ReadTemplateKWAndArgsInfo(ASTTemplateKWAndArgsInfo &Args,
@@ -153,10 +152,15 @@ void ASTStmtReader::VisitCompoundStmt(CompoundStmt *S) {
VisitStmt(S);
SmallVector<Stmt *, 16> Stmts;
unsigned NumStmts = Record.readInt();
+ unsigned HasFPFeatures = Record.readInt();
+ assert(S->hasStoredFPFeatures() == HasFPFeatures);
while (NumStmts--)
Stmts.push_back(Record.readSubStmt());
S->setStmts(Stmts);
- S->CompoundStmtBits.LBraceLoc = readSourceLocation();
+ if (HasFPFeatures)
+ S->setStoredFPFeatures(
+ FPOptionsOverride::getFromOpaqueInt(Record.readInt()));
+ S->LBraceLoc = readSourceLocation();
S->RBraceLoc = readSourceLocation();
}
@@ -521,26 +525,7 @@ void ASTStmtReader::VisitCapturedStmt(CapturedStmt *S) {
void ASTStmtReader::VisitExpr(Expr *E) {
VisitStmt(E);
E->setType(Record.readType());
-
- // FIXME: write and read all DependentFlags with a single call.
- bool TypeDependent = Record.readInt();
- bool ValueDependent = Record.readInt();
- bool InstantiationDependent = Record.readInt();
- bool ContainsUnexpandedTemplateParameters = Record.readInt();
- bool ContainsErrors = Record.readInt();
- auto Deps = ExprDependence::None;
- if (TypeDependent)
- Deps |= ExprDependence::Type;
- if (ValueDependent)
- Deps |= ExprDependence::Value;
- if (InstantiationDependent)
- Deps |= ExprDependence::Instantiation;
- if (ContainsUnexpandedTemplateParameters)
- Deps |= ExprDependence::UnexpandedPack;
- if (ContainsErrors)
- Deps |= ExprDependence::Error;
- E->setDependence(Deps);
-
+ E->setDependence(static_cast<ExprDependence>(Record.readInt()));
E->setValueKind(static_cast<ExprValueKind>(Record.readInt()));
E->setObjectKind(static_cast<ExprObjectKind>(Record.readInt()));
assert(Record.getIdx() == NumExprFields &&
@@ -2391,6 +2376,12 @@ void ASTStmtReader::VisitOMPParallelMasterDirective(
VisitOMPExecutableDirective(D);
}
+void ASTStmtReader::VisitOMPParallelMaskedDirective(
+ OMPParallelMaskedDirective *D) {
+ VisitStmt(D);
+ VisitOMPExecutableDirective(D);
+}
+
void ASTStmtReader::VisitOMPParallelSectionsDirective(
OMPParallelSectionsDirective *D) {
VisitStmt(D);
@@ -2449,8 +2440,9 @@ void ASTStmtReader::VisitOMPOrderedDirective(OMPOrderedDirective *D) {
void ASTStmtReader::VisitOMPAtomicDirective(OMPAtomicDirective *D) {
VisitStmt(D);
VisitOMPExecutableDirective(D);
- D->IsXLHSInRHSPart = Record.readBool();
- D->IsPostfixUpdate = Record.readBool();
+ D->Flags.IsXLHSInRHSPart = Record.readBool() ? 1 : 0;
+ D->Flags.IsPostfixUpdate = Record.readBool() ? 1 : 0;
+ D->Flags.IsFailOnly = Record.readBool() ? 1 : 0;
}
void ASTStmtReader::VisitOMPTargetDirective(OMPTargetDirective *D) {
@@ -2521,22 +2513,44 @@ void ASTStmtReader::VisitOMPMasterTaskLoopDirective(
D->setHasCancel(Record.readBool());
}
+void ASTStmtReader::VisitOMPMaskedTaskLoopDirective(
+ OMPMaskedTaskLoopDirective *D) {
+ VisitOMPLoopDirective(D);
+ D->setHasCancel(Record.readBool());
+}
+
void ASTStmtReader::VisitOMPMasterTaskLoopSimdDirective(
OMPMasterTaskLoopSimdDirective *D) {
VisitOMPLoopDirective(D);
}
+void ASTStmtReader::VisitOMPMaskedTaskLoopSimdDirective(
+ OMPMaskedTaskLoopSimdDirective *D) {
+ VisitOMPLoopDirective(D);
+}
+
void ASTStmtReader::VisitOMPParallelMasterTaskLoopDirective(
OMPParallelMasterTaskLoopDirective *D) {
VisitOMPLoopDirective(D);
D->setHasCancel(Record.readBool());
}
+void ASTStmtReader::VisitOMPParallelMaskedTaskLoopDirective(
+ OMPParallelMaskedTaskLoopDirective *D) {
+ VisitOMPLoopDirective(D);
+ D->setHasCancel(Record.readBool());
+}
+
void ASTStmtReader::VisitOMPParallelMasterTaskLoopSimdDirective(
OMPParallelMasterTaskLoopSimdDirective *D) {
VisitOMPLoopDirective(D);
}
+void ASTStmtReader::VisitOMPParallelMaskedTaskLoopSimdDirective(
+ OMPParallelMaskedTaskLoopSimdDirective *D) {
+ VisitOMPLoopDirective(D);
+}
+
void ASTStmtReader::VisitOMPDistributeDirective(OMPDistributeDirective *D) {
VisitOMPLoopDirective(D);
}
@@ -2638,6 +2652,26 @@ void ASTStmtReader::VisitOMPGenericLoopDirective(OMPGenericLoopDirective *D) {
VisitOMPLoopDirective(D);
}
+void ASTStmtReader::VisitOMPTeamsGenericLoopDirective(
+ OMPTeamsGenericLoopDirective *D) {
+ VisitOMPLoopDirective(D);
+}
+
+void ASTStmtReader::VisitOMPTargetTeamsGenericLoopDirective(
+ OMPTargetTeamsGenericLoopDirective *D) {
+ VisitOMPLoopDirective(D);
+}
+
+void ASTStmtReader::VisitOMPParallelGenericLoopDirective(
+ OMPParallelGenericLoopDirective *D) {
+ VisitOMPLoopDirective(D);
+}
+
+void ASTStmtReader::VisitOMPTargetParallelGenericLoopDirective(
+ OMPTargetParallelGenericLoopDirective *D) {
+ VisitOMPLoopDirective(D);
+}
+
//===----------------------------------------------------------------------===//
// ASTReader Implementation
//===----------------------------------------------------------------------===//
@@ -2739,7 +2773,8 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
case STMT_COMPOUND:
S = CompoundStmt::CreateEmpty(
- Context, /*NumStmts=*/Record[ASTStmtReader::NumStmtFields]);
+ Context, /*NumStmts=*/Record[ASTStmtReader::NumStmtFields],
+ /*HasFPFeatures=*/Record[ASTStmtReader::NumStmtFields + 1]);
break;
case STMT_CASE:
@@ -3296,6 +3331,11 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
Context, Record[ASTStmtReader::NumStmtFields], Empty);
break;
+ case STMT_OMP_PARALLEL_MASKED_DIRECTIVE:
+ S = OMPParallelMaskedDirective::CreateEmpty(
+ Context, Record[ASTStmtReader::NumStmtFields], Empty);
+ break;
+
case STMT_OMP_PARALLEL_SECTIONS_DIRECTIVE:
S = OMPParallelSectionsDirective::CreateEmpty(
Context, Record[ASTStmtReader::NumStmtFields], Empty);
@@ -3427,6 +3467,14 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
CollapsedNum, Empty);
break;
}
+
+ case STMT_OMP_MASKED_TASKLOOP_DIRECTIVE: {
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPMaskedTaskLoopDirective::CreateEmpty(Context, NumClauses,
+ CollapsedNum, Empty);
+ break;
+ }
case STMT_OMP_MASTER_TASKLOOP_SIMD_DIRECTIVE: {
unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
@@ -3436,6 +3484,14 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
}
+ case STMT_OMP_MASKED_TASKLOOP_SIMD_DIRECTIVE: {
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPMaskedTaskLoopSimdDirective::CreateEmpty(Context, NumClauses,
+ CollapsedNum, Empty);
+ break;
+ }
+
case STMT_OMP_PARALLEL_MASTER_TASKLOOP_DIRECTIVE: {
unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
@@ -3444,6 +3500,14 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
}
+ case STMT_OMP_PARALLEL_MASKED_TASKLOOP_DIRECTIVE: {
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPParallelMaskedTaskLoopDirective::CreateEmpty(Context, NumClauses,
+ CollapsedNum, Empty);
+ break;
+ }
+
case STMT_OMP_PARALLEL_MASTER_TASKLOOP_SIMD_DIRECTIVE: {
unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
@@ -3452,6 +3516,14 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
}
+ case STMT_OMP_PARALLEL_MASKED_TASKLOOP_SIMD_DIRECTIVE: {
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPParallelMaskedTaskLoopSimdDirective::CreateEmpty(
+ Context, NumClauses, CollapsedNum, Empty);
+ break;
+ }
+
case STMT_OMP_DISTRIBUTE_DIRECTIVE: {
unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
@@ -3593,6 +3665,38 @@ Stmt *ASTReader::ReadStmtFromStream(ModuleFile &F) {
break;
}
+ case STMT_OMP_TEAMS_GENERIC_LOOP_DIRECTIVE: {
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPTeamsGenericLoopDirective::CreateEmpty(Context, NumClauses,
+ CollapsedNum, Empty);
+ break;
+ }
+
+ case STMT_OMP_TARGET_TEAMS_GENERIC_LOOP_DIRECTIVE: {
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPTargetTeamsGenericLoopDirective::CreateEmpty(Context, NumClauses,
+ CollapsedNum, Empty);
+ break;
+ }
+
+ case STMT_OMP_PARALLEL_GENERIC_LOOP_DIRECTIVE: {
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPParallelGenericLoopDirective::CreateEmpty(Context, NumClauses,
+ CollapsedNum, Empty);
+ break;
+ }
+
+ case STMT_OMP_TARGET_PARALLEL_GENERIC_LOOP_DIRECTIVE: {
+ unsigned CollapsedNum = Record[ASTStmtReader::NumStmtFields];
+ unsigned NumClauses = Record[ASTStmtReader::NumStmtFields + 1];
+ S = OMPTargetParallelGenericLoopDirective::CreateEmpty(
+ Context, NumClauses, CollapsedNum, Empty);
+ break;
+ }
+
case EXPR_CXX_OPERATOR_CALL:
S = CXXOperatorCallExpr::CreateEmpty(
Context, /*NumArgs=*/Record[ASTStmtReader::NumExprFields],
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp
index 763fc9537c04..1787909bb6f7 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTWriter.cpp
@@ -164,8 +164,8 @@ namespace {
std::set<const FileEntry *> GetAllModuleMaps(const HeaderSearch &HS,
Module *RootModule) {
std::set<const FileEntry *> ModuleMaps{};
- std::set<Module *> ProcessedModules;
- SmallVector<Module *> ModulesToProcess{RootModule};
+ std::set<const Module *> ProcessedModules;
+ SmallVector<const Module *> ModulesToProcess{RootModule};
SmallVector<const FileEntry *, 16> FilesByUID;
HS.getFileMgr().GetUniqueIDMapping(FilesByUID);
@@ -209,6 +209,11 @@ std::set<const FileEntry *> GetAllModuleMaps(const HeaderSearch &HS,
}
ModulesToProcess.push_back(ImportedModule);
}
+
+ for (const Module *UndeclaredModule : CurrentModule->UndeclaredUses)
+ if (UndeclaredModule &&
+ ProcessedModules.find(UndeclaredModule) == ProcessedModules.end())
+ ModulesToProcess.push_back(UndeclaredModule);
}
return ModuleMaps;
@@ -240,10 +245,19 @@ public:
};
class TypeLocWriter : public TypeLocVisitor<TypeLocWriter> {
+ using LocSeq = SourceLocationSequence;
+
ASTRecordWriter &Record;
+ LocSeq *Seq;
+
+ void addSourceLocation(SourceLocation Loc) {
+ Record.AddSourceLocation(Loc, Seq);
+ }
+ void addSourceRange(SourceRange Range) { Record.AddSourceRange(Range, Seq); }
public:
- TypeLocWriter(ASTRecordWriter &Record) : Record(Record) {}
+ TypeLocWriter(ASTRecordWriter &Record, LocSeq *Seq)
+ : Record(Record), Seq(Seq) {}
#define ABSTRACT_TYPELOC(CLASS, PARENT)
#define TYPELOC(CLASS, PARENT) \
@@ -261,7 +275,7 @@ void TypeLocWriter::VisitQualifiedTypeLoc(QualifiedTypeLoc TL) {
}
void TypeLocWriter::VisitBuiltinTypeLoc(BuiltinTypeLoc TL) {
- Record.AddSourceLocation(TL.getBuiltinLoc());
+ addSourceLocation(TL.getBuiltinLoc());
if (TL.needsExtraLocalData()) {
Record.push_back(TL.getWrittenTypeSpec());
Record.push_back(static_cast<uint64_t>(TL.getWrittenSignSpec()));
@@ -271,11 +285,11 @@ void TypeLocWriter::VisitBuiltinTypeLoc(BuiltinTypeLoc TL) {
}
void TypeLocWriter::VisitComplexTypeLoc(ComplexTypeLoc TL) {
- Record.AddSourceLocation(TL.getNameLoc());
+ addSourceLocation(TL.getNameLoc());
}
void TypeLocWriter::VisitPointerTypeLoc(PointerTypeLoc TL) {
- Record.AddSourceLocation(TL.getStarLoc());
+ addSourceLocation(TL.getStarLoc());
}
void TypeLocWriter::VisitDecayedTypeLoc(DecayedTypeLoc TL) {
@@ -287,25 +301,25 @@ void TypeLocWriter::VisitAdjustedTypeLoc(AdjustedTypeLoc TL) {
}
void TypeLocWriter::VisitBlockPointerTypeLoc(BlockPointerTypeLoc TL) {
- Record.AddSourceLocation(TL.getCaretLoc());
+ addSourceLocation(TL.getCaretLoc());
}
void TypeLocWriter::VisitLValueReferenceTypeLoc(LValueReferenceTypeLoc TL) {
- Record.AddSourceLocation(TL.getAmpLoc());
+ addSourceLocation(TL.getAmpLoc());
}
void TypeLocWriter::VisitRValueReferenceTypeLoc(RValueReferenceTypeLoc TL) {
- Record.AddSourceLocation(TL.getAmpAmpLoc());
+ addSourceLocation(TL.getAmpAmpLoc());
}
void TypeLocWriter::VisitMemberPointerTypeLoc(MemberPointerTypeLoc TL) {
- Record.AddSourceLocation(TL.getStarLoc());
+ addSourceLocation(TL.getStarLoc());
Record.AddTypeSourceInfo(TL.getClassTInfo());
}
void TypeLocWriter::VisitArrayTypeLoc(ArrayTypeLoc TL) {
- Record.AddSourceLocation(TL.getLBracketLoc());
- Record.AddSourceLocation(TL.getRBracketLoc());
+ addSourceLocation(TL.getLBracketLoc());
+ addSourceLocation(TL.getRBracketLoc());
Record.push_back(TL.getSizeExpr() ? 1 : 0);
if (TL.getSizeExpr())
Record.AddStmt(TL.getSizeExpr());
@@ -330,56 +344,56 @@ void TypeLocWriter::VisitDependentSizedArrayTypeLoc(
void TypeLocWriter::VisitDependentAddressSpaceTypeLoc(
DependentAddressSpaceTypeLoc TL) {
- Record.AddSourceLocation(TL.getAttrNameLoc());
+ addSourceLocation(TL.getAttrNameLoc());
SourceRange range = TL.getAttrOperandParensRange();
- Record.AddSourceLocation(range.getBegin());
- Record.AddSourceLocation(range.getEnd());
+ addSourceLocation(range.getBegin());
+ addSourceLocation(range.getEnd());
Record.AddStmt(TL.getAttrExprOperand());
}
void TypeLocWriter::VisitDependentSizedExtVectorTypeLoc(
DependentSizedExtVectorTypeLoc TL) {
- Record.AddSourceLocation(TL.getNameLoc());
+ addSourceLocation(TL.getNameLoc());
}
void TypeLocWriter::VisitVectorTypeLoc(VectorTypeLoc TL) {
- Record.AddSourceLocation(TL.getNameLoc());
+ addSourceLocation(TL.getNameLoc());
}
void TypeLocWriter::VisitDependentVectorTypeLoc(
DependentVectorTypeLoc TL) {
- Record.AddSourceLocation(TL.getNameLoc());
+ addSourceLocation(TL.getNameLoc());
}
void TypeLocWriter::VisitExtVectorTypeLoc(ExtVectorTypeLoc TL) {
- Record.AddSourceLocation(TL.getNameLoc());
+ addSourceLocation(TL.getNameLoc());
}
void TypeLocWriter::VisitConstantMatrixTypeLoc(ConstantMatrixTypeLoc TL) {
- Record.AddSourceLocation(TL.getAttrNameLoc());
+ addSourceLocation(TL.getAttrNameLoc());
SourceRange range = TL.getAttrOperandParensRange();
- Record.AddSourceLocation(range.getBegin());
- Record.AddSourceLocation(range.getEnd());
+ addSourceLocation(range.getBegin());
+ addSourceLocation(range.getEnd());
Record.AddStmt(TL.getAttrRowOperand());
Record.AddStmt(TL.getAttrColumnOperand());
}
void TypeLocWriter::VisitDependentSizedMatrixTypeLoc(
DependentSizedMatrixTypeLoc TL) {
- Record.AddSourceLocation(TL.getAttrNameLoc());
+ addSourceLocation(TL.getAttrNameLoc());
SourceRange range = TL.getAttrOperandParensRange();
- Record.AddSourceLocation(range.getBegin());
- Record.AddSourceLocation(range.getEnd());
+ addSourceLocation(range.getBegin());
+ addSourceLocation(range.getEnd());
Record.AddStmt(TL.getAttrRowOperand());
Record.AddStmt(TL.getAttrColumnOperand());
}
void TypeLocWriter::VisitFunctionTypeLoc(FunctionTypeLoc TL) {
- Record.AddSourceLocation(TL.getLocalRangeBegin());
- Record.AddSourceLocation(TL.getLParenLoc());
- Record.AddSourceLocation(TL.getRParenLoc());
- Record.AddSourceRange(TL.getExceptionSpecRange());
- Record.AddSourceLocation(TL.getLocalRangeEnd());
+ addSourceLocation(TL.getLocalRangeBegin());
+ addSourceLocation(TL.getLParenLoc());
+ addSourceLocation(TL.getRParenLoc());
+ addSourceRange(TL.getExceptionSpecRange());
+ addSourceLocation(TL.getLocalRangeEnd());
for (unsigned i = 0, e = TL.getNumParams(); i != e; ++i)
Record.AddDeclRef(TL.getParam(i));
}
@@ -393,189 +407,193 @@ void TypeLocWriter::VisitFunctionNoProtoTypeLoc(FunctionNoProtoTypeLoc TL) {
}
void TypeLocWriter::VisitUnresolvedUsingTypeLoc(UnresolvedUsingTypeLoc TL) {
- Record.AddSourceLocation(TL.getNameLoc());
+ addSourceLocation(TL.getNameLoc());
}
void TypeLocWriter::VisitUsingTypeLoc(UsingTypeLoc TL) {
- Record.AddSourceLocation(TL.getNameLoc());
+ addSourceLocation(TL.getNameLoc());
}
void TypeLocWriter::VisitTypedefTypeLoc(TypedefTypeLoc TL) {
- Record.AddSourceLocation(TL.getNameLoc());
+ addSourceLocation(TL.getNameLoc());
}
void TypeLocWriter::VisitObjCTypeParamTypeLoc(ObjCTypeParamTypeLoc TL) {
if (TL.getNumProtocols()) {
- Record.AddSourceLocation(TL.getProtocolLAngleLoc());
- Record.AddSourceLocation(TL.getProtocolRAngleLoc());
+ addSourceLocation(TL.getProtocolLAngleLoc());
+ addSourceLocation(TL.getProtocolRAngleLoc());
}
for (unsigned i = 0, e = TL.getNumProtocols(); i != e; ++i)
- Record.AddSourceLocation(TL.getProtocolLoc(i));
+ addSourceLocation(TL.getProtocolLoc(i));
}
void TypeLocWriter::VisitTypeOfExprTypeLoc(TypeOfExprTypeLoc TL) {
- Record.AddSourceLocation(TL.getTypeofLoc());
- Record.AddSourceLocation(TL.getLParenLoc());
- Record.AddSourceLocation(TL.getRParenLoc());
+ addSourceLocation(TL.getTypeofLoc());
+ addSourceLocation(TL.getLParenLoc());
+ addSourceLocation(TL.getRParenLoc());
}
void TypeLocWriter::VisitTypeOfTypeLoc(TypeOfTypeLoc TL) {
- Record.AddSourceLocation(TL.getTypeofLoc());
- Record.AddSourceLocation(TL.getLParenLoc());
- Record.AddSourceLocation(TL.getRParenLoc());
+ addSourceLocation(TL.getTypeofLoc());
+ addSourceLocation(TL.getLParenLoc());
+ addSourceLocation(TL.getRParenLoc());
Record.AddTypeSourceInfo(TL.getUnderlyingTInfo());
}
void TypeLocWriter::VisitDecltypeTypeLoc(DecltypeTypeLoc TL) {
- Record.AddSourceLocation(TL.getDecltypeLoc());
- Record.AddSourceLocation(TL.getRParenLoc());
+ addSourceLocation(TL.getDecltypeLoc());
+ addSourceLocation(TL.getRParenLoc());
}
void TypeLocWriter::VisitUnaryTransformTypeLoc(UnaryTransformTypeLoc TL) {
- Record.AddSourceLocation(TL.getKWLoc());
- Record.AddSourceLocation(TL.getLParenLoc());
- Record.AddSourceLocation(TL.getRParenLoc());
+ addSourceLocation(TL.getKWLoc());
+ addSourceLocation(TL.getLParenLoc());
+ addSourceLocation(TL.getRParenLoc());
Record.AddTypeSourceInfo(TL.getUnderlyingTInfo());
}
void TypeLocWriter::VisitAutoTypeLoc(AutoTypeLoc TL) {
- Record.AddSourceLocation(TL.getNameLoc());
+ addSourceLocation(TL.getNameLoc());
Record.push_back(TL.isConstrained());
if (TL.isConstrained()) {
Record.AddNestedNameSpecifierLoc(TL.getNestedNameSpecifierLoc());
- Record.AddSourceLocation(TL.getTemplateKWLoc());
- Record.AddSourceLocation(TL.getConceptNameLoc());
+ addSourceLocation(TL.getTemplateKWLoc());
+ addSourceLocation(TL.getConceptNameLoc());
Record.AddDeclRef(TL.getFoundDecl());
- Record.AddSourceLocation(TL.getLAngleLoc());
- Record.AddSourceLocation(TL.getRAngleLoc());
+ addSourceLocation(TL.getLAngleLoc());
+ addSourceLocation(TL.getRAngleLoc());
for (unsigned I = 0; I < TL.getNumArgs(); ++I)
Record.AddTemplateArgumentLocInfo(TL.getTypePtr()->getArg(I).getKind(),
TL.getArgLocInfo(I));
}
Record.push_back(TL.isDecltypeAuto());
if (TL.isDecltypeAuto())
- Record.AddSourceLocation(TL.getRParenLoc());
+ addSourceLocation(TL.getRParenLoc());
}
void TypeLocWriter::VisitDeducedTemplateSpecializationTypeLoc(
DeducedTemplateSpecializationTypeLoc TL) {
- Record.AddSourceLocation(TL.getTemplateNameLoc());
+ addSourceLocation(TL.getTemplateNameLoc());
}
void TypeLocWriter::VisitRecordTypeLoc(RecordTypeLoc TL) {
- Record.AddSourceLocation(TL.getNameLoc());
+ addSourceLocation(TL.getNameLoc());
}
void TypeLocWriter::VisitEnumTypeLoc(EnumTypeLoc TL) {
- Record.AddSourceLocation(TL.getNameLoc());
+ addSourceLocation(TL.getNameLoc());
}
void TypeLocWriter::VisitAttributedTypeLoc(AttributedTypeLoc TL) {
Record.AddAttr(TL.getAttr());
}
+void TypeLocWriter::VisitBTFTagAttributedTypeLoc(BTFTagAttributedTypeLoc TL) {
+ // Nothing to do.
+}
+
void TypeLocWriter::VisitTemplateTypeParmTypeLoc(TemplateTypeParmTypeLoc TL) {
- Record.AddSourceLocation(TL.getNameLoc());
+ addSourceLocation(TL.getNameLoc());
}
void TypeLocWriter::VisitSubstTemplateTypeParmTypeLoc(
SubstTemplateTypeParmTypeLoc TL) {
- Record.AddSourceLocation(TL.getNameLoc());
+ addSourceLocation(TL.getNameLoc());
}
void TypeLocWriter::VisitSubstTemplateTypeParmPackTypeLoc(
SubstTemplateTypeParmPackTypeLoc TL) {
- Record.AddSourceLocation(TL.getNameLoc());
+ addSourceLocation(TL.getNameLoc());
}
void TypeLocWriter::VisitTemplateSpecializationTypeLoc(
TemplateSpecializationTypeLoc TL) {
- Record.AddSourceLocation(TL.getTemplateKeywordLoc());
- Record.AddSourceLocation(TL.getTemplateNameLoc());
- Record.AddSourceLocation(TL.getLAngleLoc());
- Record.AddSourceLocation(TL.getRAngleLoc());
+ addSourceLocation(TL.getTemplateKeywordLoc());
+ addSourceLocation(TL.getTemplateNameLoc());
+ addSourceLocation(TL.getLAngleLoc());
+ addSourceLocation(TL.getRAngleLoc());
for (unsigned i = 0, e = TL.getNumArgs(); i != e; ++i)
Record.AddTemplateArgumentLocInfo(TL.getArgLoc(i).getArgument().getKind(),
TL.getArgLoc(i).getLocInfo());
}
void TypeLocWriter::VisitParenTypeLoc(ParenTypeLoc TL) {
- Record.AddSourceLocation(TL.getLParenLoc());
- Record.AddSourceLocation(TL.getRParenLoc());
+ addSourceLocation(TL.getLParenLoc());
+ addSourceLocation(TL.getRParenLoc());
}
void TypeLocWriter::VisitMacroQualifiedTypeLoc(MacroQualifiedTypeLoc TL) {
- Record.AddSourceLocation(TL.getExpansionLoc());
+ addSourceLocation(TL.getExpansionLoc());
}
void TypeLocWriter::VisitElaboratedTypeLoc(ElaboratedTypeLoc TL) {
- Record.AddSourceLocation(TL.getElaboratedKeywordLoc());
+ addSourceLocation(TL.getElaboratedKeywordLoc());
Record.AddNestedNameSpecifierLoc(TL.getQualifierLoc());
}
void TypeLocWriter::VisitInjectedClassNameTypeLoc(InjectedClassNameTypeLoc TL) {
- Record.AddSourceLocation(TL.getNameLoc());
+ addSourceLocation(TL.getNameLoc());
}
void TypeLocWriter::VisitDependentNameTypeLoc(DependentNameTypeLoc TL) {
- Record.AddSourceLocation(TL.getElaboratedKeywordLoc());
+ addSourceLocation(TL.getElaboratedKeywordLoc());
Record.AddNestedNameSpecifierLoc(TL.getQualifierLoc());
- Record.AddSourceLocation(TL.getNameLoc());
+ addSourceLocation(TL.getNameLoc());
}
void TypeLocWriter::VisitDependentTemplateSpecializationTypeLoc(
DependentTemplateSpecializationTypeLoc TL) {
- Record.AddSourceLocation(TL.getElaboratedKeywordLoc());
+ addSourceLocation(TL.getElaboratedKeywordLoc());
Record.AddNestedNameSpecifierLoc(TL.getQualifierLoc());
- Record.AddSourceLocation(TL.getTemplateKeywordLoc());
- Record.AddSourceLocation(TL.getTemplateNameLoc());
- Record.AddSourceLocation(TL.getLAngleLoc());
- Record.AddSourceLocation(TL.getRAngleLoc());
+ addSourceLocation(TL.getTemplateKeywordLoc());
+ addSourceLocation(TL.getTemplateNameLoc());
+ addSourceLocation(TL.getLAngleLoc());
+ addSourceLocation(TL.getRAngleLoc());
for (unsigned I = 0, E = TL.getNumArgs(); I != E; ++I)
Record.AddTemplateArgumentLocInfo(TL.getArgLoc(I).getArgument().getKind(),
TL.getArgLoc(I).getLocInfo());
}
void TypeLocWriter::VisitPackExpansionTypeLoc(PackExpansionTypeLoc TL) {
- Record.AddSourceLocation(TL.getEllipsisLoc());
+ addSourceLocation(TL.getEllipsisLoc());
}
void TypeLocWriter::VisitObjCInterfaceTypeLoc(ObjCInterfaceTypeLoc TL) {
- Record.AddSourceLocation(TL.getNameLoc());
+ addSourceLocation(TL.getNameLoc());
}
void TypeLocWriter::VisitObjCObjectTypeLoc(ObjCObjectTypeLoc TL) {
Record.push_back(TL.hasBaseTypeAsWritten());
- Record.AddSourceLocation(TL.getTypeArgsLAngleLoc());
- Record.AddSourceLocation(TL.getTypeArgsRAngleLoc());
+ addSourceLocation(TL.getTypeArgsLAngleLoc());
+ addSourceLocation(TL.getTypeArgsRAngleLoc());
for (unsigned i = 0, e = TL.getNumTypeArgs(); i != e; ++i)
Record.AddTypeSourceInfo(TL.getTypeArgTInfo(i));
- Record.AddSourceLocation(TL.getProtocolLAngleLoc());
- Record.AddSourceLocation(TL.getProtocolRAngleLoc());
+ addSourceLocation(TL.getProtocolLAngleLoc());
+ addSourceLocation(TL.getProtocolRAngleLoc());
for (unsigned i = 0, e = TL.getNumProtocols(); i != e; ++i)
- Record.AddSourceLocation(TL.getProtocolLoc(i));
+ addSourceLocation(TL.getProtocolLoc(i));
}
void TypeLocWriter::VisitObjCObjectPointerTypeLoc(ObjCObjectPointerTypeLoc TL) {
- Record.AddSourceLocation(TL.getStarLoc());
+ addSourceLocation(TL.getStarLoc());
}
void TypeLocWriter::VisitAtomicTypeLoc(AtomicTypeLoc TL) {
- Record.AddSourceLocation(TL.getKWLoc());
- Record.AddSourceLocation(TL.getLParenLoc());
- Record.AddSourceLocation(TL.getRParenLoc());
+ addSourceLocation(TL.getKWLoc());
+ addSourceLocation(TL.getLParenLoc());
+ addSourceLocation(TL.getRParenLoc());
}
void TypeLocWriter::VisitPipeTypeLoc(PipeTypeLoc TL) {
- Record.AddSourceLocation(TL.getKWLoc());
+ addSourceLocation(TL.getKWLoc());
}
void TypeLocWriter::VisitBitIntTypeLoc(clang::BitIntTypeLoc TL) {
- Record.AddSourceLocation(TL.getNameLoc());
+ addSourceLocation(TL.getNameLoc());
}
void TypeLocWriter::VisitDependentBitIntTypeLoc(
clang::DependentBitIntTypeLoc TL) {
- Record.AddSourceLocation(TL.getNameLoc());
+ addSourceLocation(TL.getNameLoc());
}
void ASTWriter::WriteTypeAbbrevs() {
@@ -589,30 +607,6 @@ void ASTWriter::WriteTypeAbbrevs() {
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 3)); // Quals
TypeExtQualAbbrev = Stream.EmitAbbrev(std::move(Abv));
-
- // Abbreviation for TYPE_FUNCTION_PROTO
- Abv = std::make_shared<BitCodeAbbrev>();
- Abv->Add(BitCodeAbbrevOp(serialization::TYPE_FUNCTION_PROTO));
- // FunctionType
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // ReturnType
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // NoReturn
- Abv->Add(BitCodeAbbrevOp(0)); // HasRegParm
- Abv->Add(BitCodeAbbrevOp(0)); // RegParm
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 4)); // CC
- Abv->Add(BitCodeAbbrevOp(0)); // ProducesResult
- Abv->Add(BitCodeAbbrevOp(0)); // NoCallerSavedRegs
- Abv->Add(BitCodeAbbrevOp(0)); // NoCfCheck
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // CmseNSCall
- // FunctionProtoType
- Abv->Add(BitCodeAbbrevOp(0)); // IsVariadic
- Abv->Add(BitCodeAbbrevOp(0)); // HasTrailingReturn
- Abv->Add(BitCodeAbbrevOp(0)); // TypeQuals
- Abv->Add(BitCodeAbbrevOp(0)); // RefQualifier
- Abv->Add(BitCodeAbbrevOp(EST_None)); // ExceptionSpec
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // NumParams
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Array));
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Params
- TypeFunctionProtoAbbrev = Stream.EmitAbbrev(std::move(Abv));
}
//===----------------------------------------------------------------------===//
@@ -863,6 +857,7 @@ void ASTWriter::WriteBlockInfoBlock() {
RECORD(PP_CONDITIONAL_STACK);
RECORD(DECLS_TO_CHECK_FOR_DEFERRED_DIAGS);
RECORD(PP_INCLUDED_FILES);
+ RECORD(PP_ASSUME_NONNULL_LOC);
// SourceManager Block.
BLOCK(SOURCE_MANAGER_BLOCK);
@@ -1107,8 +1102,7 @@ std::pair<ASTFileSignature, ASTFileSignature>
ASTWriter::createSignature(StringRef AllBytes, StringRef ASTBlockBytes) {
llvm::SHA1 Hasher;
Hasher.update(ASTBlockBytes);
- auto Hash = Hasher.result();
- ASTFileSignature ASTBlockHash = ASTFileSignature::create(Hash);
+ ASTFileSignature ASTBlockHash = ASTFileSignature::create(Hasher.result());
// Add the remaining bytes (i.e. bytes before the unhashed control block that
// are not part of the AST block).
@@ -1116,8 +1110,7 @@ ASTWriter::createSignature(StringRef AllBytes, StringRef ASTBlockBytes) {
AllBytes.take_front(ASTBlockBytes.bytes_end() - AllBytes.bytes_begin()));
Hasher.update(
AllBytes.take_back(AllBytes.bytes_end() - ASTBlockBytes.bytes_end()));
- Hash = Hasher.result();
- ASTFileSignature Signature = ASTFileSignature::create(Hash);
+ ASTFileSignature Signature = ASTFileSignature::create(Hasher.result());
return std::make_pair(ASTBlockHash, Signature);
}
@@ -1240,15 +1233,24 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
}
if (WritingModule && WritingModule->Directory) {
- SmallString<128> BaseDir(WritingModule->Directory->getName());
+ SmallString<128> BaseDir;
+ if (PP.getHeaderSearchInfo().getHeaderSearchOpts().ModuleFileHomeIsCwd) {
+ // Use the current working directory as the base path for all inputs.
+ auto *CWD =
+ Context.getSourceManager().getFileManager().getDirectory(".").get();
+ BaseDir.assign(CWD->getName());
+ } else {
+ BaseDir.assign(WritingModule->Directory->getName());
+ }
cleanPathForOutput(Context.getSourceManager().getFileManager(), BaseDir);
// If the home of the module is the current working directory, then we
// want to pick up the cwd of the build process loading the module, not
// our cwd, when we load this module.
- if (!PP.getHeaderSearchInfo()
- .getHeaderSearchOpts()
- .ModuleMapFileHomeIsCwd ||
+ if (!(PP.getHeaderSearchInfo()
+ .getHeaderSearchOpts()
+ .ModuleMapFileHomeIsCwd ||
+ PP.getHeaderSearchInfo().getHeaderSearchOpts().ModuleFileHomeIsCwd) ||
WritingModule->Directory->getName() != StringRef(".")) {
// Module directory.
auto Abbrev = std::make_shared<BitCodeAbbrev>();
@@ -1308,8 +1310,7 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
Record.push_back(M.Signature ? 0 : M.File->getSize());
Record.push_back(M.Signature ? 0 : getTimestampForOutput(M.File));
- for (auto I : M.Signature)
- Record.push_back(I);
+ llvm::append_range(Record, M.Signature);
AddString(M.ModuleName, Record);
AddPath(M.FileName, Record);
@@ -1478,8 +1479,7 @@ void ASTWriter::WriteControlBlock(Preprocessor &PP, ASTContext &Context,
unsigned AbbrevCode = Stream.EmitAbbrev(std::move(Abbrev));
SmallString<128> OutputPath(OutputFile);
-
- SM.getFileManager().makeAbsolutePath(OutputPath);
+ PreparePathForOutput(OutputPath);
StringRef origDir = llvm::sys::path::parent_path(OutputPath);
RecordData::value_type Record[] = {ORIGINAL_PCH_DIR};
@@ -1715,8 +1715,8 @@ static unsigned CreateSLocExpansionAbbrev(llvm::BitstreamWriter &Stream) {
Abbrev->Add(BitCodeAbbrevOp(SM_SLOC_EXPANSION_ENTRY));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // Offset
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // Spelling location
- Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // Start location
- Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 8)); // End location
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Start location
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // End location
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Is token range
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Token length
return Stream.EmitAbbrev(std::move(Abbrev));
@@ -1879,7 +1879,7 @@ void ASTWriter::WriteHeaderSearch(const HeaderSearch &HS) {
// headers list when emitting resolved headers in the first loop below.
// FIXME: It'd be preferable to avoid doing this if we were given
// sufficient stat information in the module map.
- HS.getModuleMap().resolveHeaderDirectives(M);
+ HS.getModuleMap().resolveHeaderDirectives(M, /*File=*/llvm::None);
// If the file didn't exist, we can still create a module if we were given
// enough information in the module map.
@@ -2002,15 +2002,11 @@ static void emitBlob(llvm::BitstreamWriter &Stream, StringRef Blob,
// consumers will not want its contents.
SmallString<0> CompressedBuffer;
if (llvm::zlib::isAvailable()) {
- llvm::Error E = llvm::zlib::compress(Blob.drop_back(1), CompressedBuffer);
- if (!E) {
- RecordDataType Record[] = {SM_SLOC_BUFFER_BLOB_COMPRESSED,
- Blob.size() - 1};
- Stream.EmitRecordWithBlob(SLocBufferBlobCompressedAbbrv, Record,
- CompressedBuffer);
- return;
- }
- llvm::consumeError(std::move(E));
+ llvm::zlib::compress(Blob.drop_back(1), CompressedBuffer);
+ RecordDataType Record[] = {SM_SLOC_BUFFER_BLOB_COMPRESSED, Blob.size() - 1};
+ Stream.EmitRecordWithBlob(SLocBufferBlobCompressedAbbrv, Record,
+ CompressedBuffer);
+ return;
}
RecordDataType Record[] = {SM_SLOC_BUFFER_BLOB};
@@ -2143,12 +2139,13 @@ void ASTWriter::WriteSourceManagerBlock(SourceManager &SourceMgr,
} else {
// The source location entry is a macro expansion.
const SrcMgr::ExpansionInfo &Expansion = SLoc->getExpansion();
- AddSourceLocation(Expansion.getSpellingLoc(), Record);
- AddSourceLocation(Expansion.getExpansionLocStart(), Record);
+ LocSeq::State Seq;
+ AddSourceLocation(Expansion.getSpellingLoc(), Record, Seq);
+ AddSourceLocation(Expansion.getExpansionLocStart(), Record, Seq);
AddSourceLocation(Expansion.isMacroArgExpansion()
? SourceLocation()
: Expansion.getExpansionLocEnd(),
- Record);
+ Record, Seq);
Record.push_back(Expansion.isExpansionTokenRange());
// Compute the token length for this macro expansion.
@@ -2295,10 +2292,21 @@ void ASTWriter::WritePreprocessor(const Preprocessor &PP, bool IsModule) {
Stream.EmitRecord(PP_COUNTER_VALUE, Record);
}
+ // If we have a recorded #pragma assume_nonnull, remember it so it can be
+ // replayed when the preamble terminates into the main file.
+ SourceLocation AssumeNonNullLoc =
+ PP.getPreambleRecordedPragmaAssumeNonNullLoc();
+ if (AssumeNonNullLoc.isValid()) {
+ assert(PP.isRecordingPreamble());
+ AddSourceLocation(AssumeNonNullLoc, Record);
+ Stream.EmitRecord(PP_ASSUME_NONNULL_LOC, Record);
+ Record.clear();
+ }
+
if (PP.isRecordingPreamble() && PP.hasRecordedPreamble()) {
assert(!IsModule);
auto SkipInfo = PP.getPreambleSkipInfo();
- if (SkipInfo.hasValue()) {
+ if (SkipInfo) {
Record.push_back(true);
AddSourceLocation(SkipInfo->HashTokenLoc, Record);
AddSourceLocation(SkipInfo->IfTokenLoc, Record);
@@ -2348,13 +2356,22 @@ void ASTWriter::WritePreprocessor(const Preprocessor &PP, bool IsModule) {
uint64_t StartOffset = Stream.GetCurrentBitNo() - MacroOffsetsBase;
assert((StartOffset >> 32) == 0 && "Macro identifiers offset too large");
- // Emit the macro directives in reverse source order.
- for (; MD; MD = MD->getPrevious()) {
- // Once we hit an ignored macro, we're done: the rest of the chain
- // will all be ignored macros.
- if (shouldIgnoreMacro(MD, IsModule, PP))
- break;
-
+ // Write out any exported module macros.
+ bool EmittedModuleMacros = false;
+ // C+=20 Header Units are compiled module interfaces, but they preserve
+ // macros that are live (i.e. have a defined value) at the end of the
+ // compilation. So when writing a header unit, we preserve only the final
+ // value of each macro (and discard any that are undefined). Header units
+ // do not have sub-modules (although they might import other header units).
+ // PCH files, conversely, retain the history of each macro's define/undef
+ // and of leaf macros in sub modules.
+ if (IsModule && WritingModule->isHeaderUnit()) {
+ // This is for the main TU when it is a C++20 header unit.
+ // We preserve the final state of defined macros, and we do not emit ones
+ // that are undefined.
+ if (!MD || shouldIgnoreMacro(MD, IsModule, PP) ||
+ MD->getKind() == MacroDirective::MD_Undefine)
+ continue;
AddSourceLocation(MD->getLocation(), Record);
Record.push_back(MD->getKind());
if (auto *DefMD = dyn_cast<DefMacroDirective>(MD)) {
@@ -2362,35 +2379,51 @@ void ASTWriter::WritePreprocessor(const Preprocessor &PP, bool IsModule) {
} else if (auto *VisMD = dyn_cast<VisibilityMacroDirective>(MD)) {
Record.push_back(VisMD->isPublic());
}
- }
+ ModuleMacroRecord.push_back(getSubmoduleID(WritingModule));
+ ModuleMacroRecord.push_back(getMacroRef(MD->getMacroInfo(), Name));
+ Stream.EmitRecord(PP_MODULE_MACRO, ModuleMacroRecord);
+ ModuleMacroRecord.clear();
+ EmittedModuleMacros = true;
+ } else {
+ // Emit the macro directives in reverse source order.
+ for (; MD; MD = MD->getPrevious()) {
+ // Once we hit an ignored macro, we're done: the rest of the chain
+ // will all be ignored macros.
+ if (shouldIgnoreMacro(MD, IsModule, PP))
+ break;
+ AddSourceLocation(MD->getLocation(), Record);
+ Record.push_back(MD->getKind());
+ if (auto *DefMD = dyn_cast<DefMacroDirective>(MD)) {
+ Record.push_back(getMacroRef(DefMD->getInfo(), Name));
+ } else if (auto *VisMD = dyn_cast<VisibilityMacroDirective>(MD)) {
+ Record.push_back(VisMD->isPublic());
+ }
+ }
- // Write out any exported module macros.
- bool EmittedModuleMacros = false;
- // We write out exported module macros for PCH as well.
- auto Leafs = PP.getLeafModuleMacros(Name);
- SmallVector<ModuleMacro*, 8> Worklist(Leafs.begin(), Leafs.end());
- llvm::DenseMap<ModuleMacro*, unsigned> Visits;
- while (!Worklist.empty()) {
- auto *Macro = Worklist.pop_back_val();
+ // We write out exported module macros for PCH as well.
+ auto Leafs = PP.getLeafModuleMacros(Name);
+ SmallVector<ModuleMacro *, 8> Worklist(Leafs.begin(), Leafs.end());
+ llvm::DenseMap<ModuleMacro *, unsigned> Visits;
+ while (!Worklist.empty()) {
+ auto *Macro = Worklist.pop_back_val();
- // Emit a record indicating this submodule exports this macro.
- ModuleMacroRecord.push_back(
- getSubmoduleID(Macro->getOwningModule()));
- ModuleMacroRecord.push_back(getMacroRef(Macro->getMacroInfo(), Name));
- for (auto *M : Macro->overrides())
- ModuleMacroRecord.push_back(getSubmoduleID(M->getOwningModule()));
+ // Emit a record indicating this submodule exports this macro.
+ ModuleMacroRecord.push_back(getSubmoduleID(Macro->getOwningModule()));
+ ModuleMacroRecord.push_back(getMacroRef(Macro->getMacroInfo(), Name));
+ for (auto *M : Macro->overrides())
+ ModuleMacroRecord.push_back(getSubmoduleID(M->getOwningModule()));
- Stream.EmitRecord(PP_MODULE_MACRO, ModuleMacroRecord);
- ModuleMacroRecord.clear();
+ Stream.EmitRecord(PP_MODULE_MACRO, ModuleMacroRecord);
+ ModuleMacroRecord.clear();
- // Enqueue overridden macros once we've visited all their ancestors.
- for (auto *M : Macro->overrides())
- if (++Visits[M] == M->getNumOverridingMacros())
- Worklist.push_back(M);
+ // Enqueue overridden macros once we've visited all their ancestors.
+ for (auto *M : Macro->overrides())
+ if (++Visits[M] == M->getNumOverridingMacros())
+ Worklist.push_back(M);
- EmittedModuleMacros = true;
+ EmittedModuleMacros = true;
+ }
}
-
if (Record.empty() && !EmittedModuleMacros)
continue;
@@ -2431,6 +2464,7 @@ void ASTWriter::WritePreprocessor(const Preprocessor &PP, bool IsModule) {
AddSourceLocation(MI->getDefinitionEndLoc(), Record);
Record.push_back(MI->isUsed());
Record.push_back(MI->isUsedForHeaderGuard());
+ Record.push_back(MI->getNumTokens());
unsigned Code;
if (MI->isObjectLike()) {
Code = PP_MACRO_OBJECT_LIKE;
@@ -2673,7 +2707,7 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) {
Abbrev->Add(BitCodeAbbrevOp(SUBMODULE_DEFINITION));
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // ID
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Parent
- Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // Kind
+ Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // Kind
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsFramework
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsExplicit
Abbrev->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // IsSystem
@@ -2832,8 +2866,11 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) {
{
auto TopHeaders = Mod->getTopHeaders(PP->getFileManager());
RecordData::value_type Record[] = {SUBMODULE_TOPHEADER};
- for (auto *H : TopHeaders)
- Stream.EmitRecordWithBlob(TopHeaderAbbrev, Record, H->getName());
+ for (auto *H : TopHeaders) {
+ SmallString<128> HeaderName(H->getName());
+ PreparePathForOutput(HeaderName);
+ Stream.EmitRecordWithBlob(TopHeaderAbbrev, Record, HeaderName);
+ }
}
// Emit the imports.
@@ -2860,6 +2897,8 @@ void ASTWriter::WriteSubmodules(Module *WritingModule) {
// Might be unnecessary as use declarations are only used to build the
// module itself.
+ // TODO: Consider serializing undeclared uses of modules.
+
// Emit the link libraries.
for (const auto &LL : Mod->LinkLibraries) {
RecordData::value_type Record[] = {SUBMODULE_LINK_LIBRARY,
@@ -3096,6 +3135,7 @@ void ASTWriter::WriteFileDeclIDsMap() {
for (auto &FileDeclEntry : SortedFileDeclIDs) {
DeclIDInFileInfo &Info = *FileDeclEntry.second;
Info.FirstDeclIndex = FileGroupedDeclIDs.size();
+ llvm::stable_sort(Info.DeclIDs);
for (auto &LocDeclEntry : Info.DeclIDs)
FileGroupedDeclIDs.push_back(LocDeclEntry.second);
}
@@ -3694,8 +3734,7 @@ public:
data_type ImportData(const reader::ASTDeclContextNameLookupTrait::data_type &FromReader) {
unsigned Start = DeclIDs.size();
- for (auto ID : FromReader)
- DeclIDs.push_back(ID);
+ llvm::append_range(DeclIDs, FromReader);
return std::make_pair(Start, DeclIDs.size());
}
@@ -4553,13 +4592,14 @@ ASTFileSignature ASTWriter::WriteASTCore(Sema &SemaRef, StringRef isysroot,
// entire table, since later PCH files in a PCH chain are only interested in
// the results at the end of the chain.
RecordData WeakUndeclaredIdentifiers;
- for (auto &WeakUndeclaredIdentifier : SemaRef.WeakUndeclaredIdentifiers) {
- IdentifierInfo *II = WeakUndeclaredIdentifier.first;
- WeakInfo &WI = WeakUndeclaredIdentifier.second;
- AddIdentifierRef(II, WeakUndeclaredIdentifiers);
- AddIdentifierRef(WI.getAlias(), WeakUndeclaredIdentifiers);
- AddSourceLocation(WI.getLocation(), WeakUndeclaredIdentifiers);
- WeakUndeclaredIdentifiers.push_back(WI.getUsed());
+ for (const auto &WeakUndeclaredIdentifierList :
+ SemaRef.WeakUndeclaredIdentifiers) {
+ const IdentifierInfo *const II = WeakUndeclaredIdentifierList.first;
+ for (const auto &WI : WeakUndeclaredIdentifierList.second) {
+ AddIdentifierRef(II, WeakUndeclaredIdentifiers);
+ AddIdentifierRef(WI.getAlias(), WeakUndeclaredIdentifiers);
+ AddSourceLocation(WI.getLocation(), WeakUndeclaredIdentifiers);
+ }
}
// Build a record containing all of the ext_vector declarations.
@@ -5184,14 +5224,15 @@ void ASTWriter::AddAlignPackInfo(const Sema::AlignPackInfo &Info,
Record.push_back(Raw);
}
-void ASTWriter::AddSourceLocation(SourceLocation Loc, RecordDataImpl &Record) {
- SourceLocation::UIntTy Raw = Loc.getRawEncoding();
- Record.push_back((Raw << 1) | (Raw >> (8 * sizeof(Raw) - 1)));
+void ASTWriter::AddSourceLocation(SourceLocation Loc, RecordDataImpl &Record,
+ SourceLocationSequence *Seq) {
+ Record.push_back(SourceLocationEncoding::encode(Loc, Seq));
}
-void ASTWriter::AddSourceRange(SourceRange Range, RecordDataImpl &Record) {
- AddSourceLocation(Range.getBegin(), Record);
- AddSourceLocation(Range.getEnd(), Record);
+void ASTWriter::AddSourceRange(SourceRange Range, RecordDataImpl &Record,
+ SourceLocationSequence *Seq) {
+ AddSourceLocation(Range.getBegin(), Record, Seq);
+ AddSourceLocation(Range.getEnd(), Record, Seq);
}
void ASTRecordWriter::AddAPFloat(const llvm::APFloat &Value) {
@@ -5318,8 +5359,9 @@ void ASTRecordWriter::AddTypeSourceInfo(TypeSourceInfo *TInfo) {
AddTypeLoc(TInfo->getTypeLoc());
}
-void ASTRecordWriter::AddTypeLoc(TypeLoc TL) {
- TypeLocWriter TLW(*this);
+void ASTRecordWriter::AddTypeLoc(TypeLoc TL, LocSeq *OuterSeq) {
+ LocSeq::State Seq(OuterSeq);
+ TypeLocWriter TLW(*this, Seq);
for (; !TL.isNull(); TL = TL.getNextTypeLoc())
TLW.Visit(TL);
}
@@ -5444,16 +5486,7 @@ void ASTWriter::associateDeclWithFile(const Decl *D, DeclID ID) {
std::pair<unsigned, serialization::DeclID> LocDecl(Offset, ID);
LocDeclIDsTy &Decls = Info->DeclIDs;
-
- if (Decls.empty() || Decls.back().first <= Offset) {
- Decls.push_back(LocDecl);
- return;
- }
-
- LocDeclIDsTy::iterator I =
- llvm::upper_bound(Decls, LocDecl, llvm::less_first());
-
- Decls.insert(I, LocDecl);
+ Decls.push_back(LocDecl);
}
unsigned ASTWriter::getAnonymousDeclarationNumber(const NamedDecl *D) {
@@ -5726,7 +5759,7 @@ void ASTRecordWriter::AddCXXDefinitionData(const CXXRecordDecl *D) {
// Add lambda-specific data.
if (Data.IsLambda) {
auto &Lambda = D->getLambdaData();
- Record->push_back(Lambda.Dependent);
+ Record->push_back(Lambda.DependencyKind);
Record->push_back(Lambda.IsGenericLambda);
Record->push_back(Lambda.CaptureDefault);
Record->push_back(Lambda.NumCaptures);
@@ -5904,8 +5937,7 @@ void ASTWriter::AddedVisibleDecl(const DeclContext *DC, const Decl *D) {
// We're adding a visible declaration to a predefined decl context. Ensure
// that we write out all of its lookup results so we don't get a nasty
// surprise when we try to emit its lookup table.
- for (auto *Child : DC->decls())
- DeclsToEmitEvenIfUnreferenced.push_back(Child);
+ llvm::append_range(DeclsToEmitEvenIfUnreferenced, DC->decls());
}
DeclsToEmitEvenIfUnreferenced.push_back(D);
}
@@ -6560,6 +6592,7 @@ void OMPClauseWriter::VisitOMPDependClause(OMPDependClause *C) {
Record.push_back(C->getDependencyKind());
Record.AddSourceLocation(C->getDependencyLoc());
Record.AddSourceLocation(C->getColonLoc());
+ Record.AddSourceLocation(C->getOmpAllMemoryLoc());
for (auto *VE : C->varlists())
Record.AddStmt(VE);
for (unsigned I = 0, E = C->getNumLoops(); I < E; ++I)
@@ -6790,6 +6823,26 @@ void OMPClauseWriter::VisitOMPIsDevicePtrClause(OMPIsDevicePtrClause *C) {
}
}
+void OMPClauseWriter::VisitOMPHasDeviceAddrClause(OMPHasDeviceAddrClause *C) {
+ Record.push_back(C->varlist_size());
+ Record.push_back(C->getUniqueDeclarationsNum());
+ Record.push_back(C->getTotalComponentListNum());
+ Record.push_back(C->getTotalComponentsNum());
+ Record.AddSourceLocation(C->getLParenLoc());
+ for (auto *E : C->varlists())
+ Record.AddStmt(E);
+ for (auto *D : C->all_decls())
+ Record.AddDeclRef(D);
+ for (auto N : C->all_num_lists())
+ Record.push_back(N);
+ for (auto N : C->all_lists_sizes())
+ Record.push_back(N);
+ for (auto &M : C->all_components()) {
+ Record.AddStmt(M.getAssociatedExpression());
+ Record.AddDeclRef(M.getAssociatedDeclaration());
+ }
+}
+
void OMPClauseWriter::VisitOMPUnifiedAddressClause(OMPUnifiedAddressClause *) {}
void OMPClauseWriter::VisitOMPUnifiedSharedMemoryClause(
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTWriterDecl.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTWriterDecl.cpp
index 06cb60823db6..01f692c9611b 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTWriterDecl.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTWriterDecl.cpp
@@ -96,6 +96,7 @@ namespace clang {
void VisitFieldDecl(FieldDecl *D);
void VisitMSPropertyDecl(MSPropertyDecl *D);
void VisitMSGuidDecl(MSGuidDecl *D);
+ void VisitUnnamedGlobalConstantDecl(UnnamedGlobalConstantDecl *D);
void VisitTemplateParamObjectDecl(TemplateParamObjectDecl *D);
void VisitIndirectFieldDecl(IndirectFieldDecl *D);
void VisitVarDecl(VarDecl *D);
@@ -310,7 +311,7 @@ void ASTDeclWriter::VisitDecl(Decl *D) {
Record.push_back(D->isReferenced());
Record.push_back(D->isTopLevelDeclInObjCContainer());
Record.push_back(D->getAccess());
- Record.push_back(D->isModulePrivate());
+ Record.push_back((uint64_t)D->getModuleOwnershipKind());
Record.push_back(Writer.getSubmoduleID(D->getOwningModule()));
// If this declaration injected a name into a context different from its
@@ -962,11 +963,17 @@ void ASTDeclWriter::VisitMSGuidDecl(MSGuidDecl *D) {
Record.push_back(Parts.Part1);
Record.push_back(Parts.Part2);
Record.push_back(Parts.Part3);
- for (auto C : Parts.Part4And5)
- Record.push_back(C);
+ Record.append(std::begin(Parts.Part4And5), std::end(Parts.Part4And5));
Code = serialization::DECL_MS_GUID;
}
+void ASTDeclWriter::VisitUnnamedGlobalConstantDecl(
+ UnnamedGlobalConstantDecl *D) {
+ VisitValueDecl(D);
+ Record.AddAPValue(D->getValue());
+ Code = serialization::DECL_UNNAMED_GLOBAL_CONSTANT;
+}
+
void ASTDeclWriter::VisitTemplateParamObjectDecl(TemplateParamObjectDecl *D) {
VisitValueDecl(D);
Record.AddAPValue(D->getValue());
@@ -1022,12 +1029,12 @@ void ASTDeclWriter::VisitVarDecl(VarDecl *D) {
if (Writer.WritingModule &&
!D->getDescribedVarTemplate() && !D->getMemberSpecializationInfo() &&
!isa<VarTemplateSpecializationDecl>(D)) {
- // When building a C++ Modules TS module interface unit, a strong
- // definition in the module interface is provided by the compilation of
- // that module interface unit, not by its users. (Inline variables are
- // still emitted in module users.)
+ // When building a C++20 module interface unit or a partition unit, a
+ // strong definition in the module interface is provided by the
+ // compilation of that unit, not by its users. (Inline variables are still
+ // emitted in module users.)
ModulesCodegen =
- (Writer.WritingModule->Kind == Module::ModuleInterfaceUnit ||
+ (Writer.WritingModule->isInterfaceOrPartition() ||
(D->hasAttr<DLLExportAttr>() &&
Writer.Context->getLangOpts().BuildingPCHWithObjectFile)) &&
Writer.Context->GetGVALinkageForVariable(D) == GVA_StrongExternal;
@@ -1921,7 +1928,7 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(0)); // isReferenced
Abv->Add(BitCodeAbbrevOp(0)); // TopLevelDeclInObjCContainer
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // AccessSpecifier
- Abv->Add(BitCodeAbbrevOp(0)); // ModulePrivate
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // ModuleOwnershipKind
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
// NamedDecl
Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier
@@ -1954,7 +1961,7 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(0)); // isReferenced
Abv->Add(BitCodeAbbrevOp(0)); // TopLevelDeclInObjCContainer
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // AccessSpecifier
- Abv->Add(BitCodeAbbrevOp(0)); // ModulePrivate
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // ModuleOwnershipKind
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
// NamedDecl
Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier
@@ -1992,7 +1999,7 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(0)); // isReferenced
Abv->Add(BitCodeAbbrevOp(0)); // TopLevelDeclInObjCContainer
Abv->Add(BitCodeAbbrevOp(AS_none)); // C++ AccessSpecifier
- Abv->Add(BitCodeAbbrevOp(0)); // ModulePrivate
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // ModuleOwnershipKind
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
// NamedDecl
Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier
@@ -2042,7 +2049,7 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(0)); // isReferenced
Abv->Add(BitCodeAbbrevOp(0)); // TopLevelDeclInObjCContainer
Abv->Add(BitCodeAbbrevOp(AS_none)); // C++ AccessSpecifier
- Abv->Add(BitCodeAbbrevOp(0)); // ModulePrivate
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // ModuleOwnershipKind
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
// NamedDecl
Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier
@@ -2104,7 +2111,7 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(0)); // isReferenced
Abv->Add(BitCodeAbbrevOp(0)); // TopLevelDeclInObjCContainer
Abv->Add(BitCodeAbbrevOp(AS_none)); // C++ AccessSpecifier
- Abv->Add(BitCodeAbbrevOp(0)); // ModulePrivate
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // ModuleOwnershipKind
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
// NamedDecl
Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier
@@ -2152,7 +2159,7 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // isReferenced
Abv->Add(BitCodeAbbrevOp(0)); // TopLevelDeclInObjCContainer
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // C++ AccessSpecifier
- Abv->Add(BitCodeAbbrevOp(0)); // ModulePrivate
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // ModuleOwnershipKind
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
// NamedDecl
Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier
@@ -2181,7 +2188,7 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(0)); // isReferenced
Abv->Add(BitCodeAbbrevOp(0)); // TopLevelDeclInObjCContainer
Abv->Add(BitCodeAbbrevOp(AS_none)); // C++ AccessSpecifier
- Abv->Add(BitCodeAbbrevOp(0)); // ModulePrivate
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // ModuleOwnershipKind
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
// NamedDecl
Abv->Add(BitCodeAbbrevOp(0)); // NameKind = Identifier
@@ -2233,7 +2240,7 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // Referenced
Abv->Add(BitCodeAbbrevOp(0)); // InObjCContainer
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 2)); // Access
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); // ModulePrivate
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); // ModuleOwnershipKind
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // SubmoduleID
// NamedDecl
Abv->Add(BitCodeAbbrevOp(DeclarationName::Identifier)); // NameKind
@@ -2281,17 +2288,14 @@ void ASTWriter::WriteDeclAbbrevs() {
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6));
DeclCXXMethodAbbrev = Stream.EmitAbbrev(std::move(Abv));
+ unsigned ExprDependenceBits = llvm::BitWidth<ExprDependence>;
// Abbreviation for EXPR_DECL_REF
Abv = std::make_shared<BitCodeAbbrev>();
Abv->Add(BitCodeAbbrevOp(serialization::EXPR_DECL_REF));
//Stmt
// Expr
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //TypeDependent
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ValueDependent
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //InstantiationDependent
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //UnexpandedParamPack
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ContainsErrors
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, ExprDependenceBits));
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetValueKind
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetObjectKind
//DeclRefExpr
@@ -2311,11 +2315,7 @@ void ASTWriter::WriteDeclAbbrevs() {
//Stmt
// Expr
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //TypeDependent
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ValueDependent
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //InstantiationDependent
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //UnexpandedParamPack
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ContainsErrors
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, ExprDependenceBits));
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetValueKind
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetObjectKind
//Integer Literal
@@ -2330,11 +2330,7 @@ void ASTWriter::WriteDeclAbbrevs() {
//Stmt
// Expr
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //TypeDependent
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ValueDependent
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //InstantiationDependent
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //UnexpandedParamPack
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ContainsErrors
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, ExprDependenceBits));
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetValueKind
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetObjectKind
//Character Literal
@@ -2349,11 +2345,7 @@ void ASTWriter::WriteDeclAbbrevs() {
// Stmt
// Expr
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::VBR, 6)); // Type
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //TypeDependent
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ValueDependent
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //InstantiationDependent
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //UnexpandedParamPack
- Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 1)); //ContainsErrors
+ Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, ExprDependenceBits));
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetValueKind
Abv->Add(BitCodeAbbrevOp(BitCodeAbbrevOp::Fixed, 3)); //GetObjectKind
// CastExpr
@@ -2463,11 +2455,11 @@ void ASTRecordWriter::AddFunctionDefinition(const FunctionDecl *FD) {
if (!FD->isDependentContext()) {
Optional<GVALinkage> Linkage;
if (Writer->WritingModule &&
- Writer->WritingModule->Kind == Module::ModuleInterfaceUnit) {
- // When building a C++ Modules TS module interface unit, a strong
- // definition in the module interface is provided by the compilation of
- // that module interface unit, not by its users. (Inline functions are
- // still emitted in module users.)
+ Writer->WritingModule->isInterfaceOrPartition()) {
+ // When building a C++20 module interface unit or a partition unit, a
+ // strong definition in the module interface is provided by the
+ // compilation of that unit, not by its users. (Inline functions are still
+ // emitted in module users.)
Linkage = Writer->Context->GetGVALinkageForFunction(FD);
ModulesCodegen = *Linkage == GVA_StrongExternal;
}
diff --git a/contrib/llvm-project/clang/lib/Serialization/ASTWriterStmt.cpp b/contrib/llvm-project/clang/lib/Serialization/ASTWriterStmt.cpp
index 2d92dec76dc9..5e5a86ee01a2 100644
--- a/contrib/llvm-project/clang/lib/Serialization/ASTWriterStmt.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/ASTWriterStmt.cpp
@@ -81,8 +81,11 @@ void ASTStmtWriter::VisitNullStmt(NullStmt *S) {
void ASTStmtWriter::VisitCompoundStmt(CompoundStmt *S) {
VisitStmt(S);
Record.push_back(S->size());
+ Record.push_back(S->hasStoredFPFeatures());
for (auto *CS : S->body())
Record.AddStmt(CS);
+ if (S->hasStoredFPFeatures())
+ Record.push_back(S->getStoredFPFeatures().getAsOpaqueInt());
Record.AddSourceLocation(S->getLBracLoc());
Record.AddSourceLocation(S->getRBracLoc());
Code = serialization::STMT_COMPOUND;
@@ -543,11 +546,7 @@ void ASTStmtWriter::VisitCapturedStmt(CapturedStmt *S) {
void ASTStmtWriter::VisitExpr(Expr *E) {
VisitStmt(E);
Record.AddTypeRef(E->getType());
- Record.push_back(E->isTypeDependent());
- Record.push_back(E->isValueDependent());
- Record.push_back(E->isInstantiationDependent());
- Record.push_back(E->containsUnexpandedParameterPack());
- Record.push_back(E->containsErrors());
+ Record.push_back(E->getDependence());
Record.push_back(E->getValueKind());
Record.push_back(E->getObjectKind());
}
@@ -2302,6 +2301,13 @@ void ASTStmtWriter::VisitOMPParallelMasterDirective(
Code = serialization::STMT_OMP_PARALLEL_MASTER_DIRECTIVE;
}
+void ASTStmtWriter::VisitOMPParallelMaskedDirective(
+ OMPParallelMaskedDirective *D) {
+ VisitStmt(D);
+ VisitOMPExecutableDirective(D);
+ Code = serialization::STMT_OMP_PARALLEL_MASKED_DIRECTIVE;
+}
+
void ASTStmtWriter::VisitOMPParallelSectionsDirective(
OMPParallelSectionsDirective *D) {
VisitStmt(D);
@@ -2322,6 +2328,7 @@ void ASTStmtWriter::VisitOMPAtomicDirective(OMPAtomicDirective *D) {
VisitOMPExecutableDirective(D);
Record.writeBool(D->isXLHSInRHSPart());
Record.writeBool(D->isPostfixUpdate());
+ Record.writeBool(D->isFailOnly());
Code = serialization::STMT_OMP_ATOMIC_DIRECTIVE;
}
@@ -2454,12 +2461,25 @@ void ASTStmtWriter::VisitOMPMasterTaskLoopDirective(
Code = serialization::STMT_OMP_MASTER_TASKLOOP_DIRECTIVE;
}
+void ASTStmtWriter::VisitOMPMaskedTaskLoopDirective(
+ OMPMaskedTaskLoopDirective *D) {
+ VisitOMPLoopDirective(D);
+ Record.writeBool(D->hasCancel());
+ Code = serialization::STMT_OMP_MASKED_TASKLOOP_DIRECTIVE;
+}
+
void ASTStmtWriter::VisitOMPMasterTaskLoopSimdDirective(
OMPMasterTaskLoopSimdDirective *D) {
VisitOMPLoopDirective(D);
Code = serialization::STMT_OMP_MASTER_TASKLOOP_SIMD_DIRECTIVE;
}
+void ASTStmtWriter::VisitOMPMaskedTaskLoopSimdDirective(
+ OMPMaskedTaskLoopSimdDirective *D) {
+ VisitOMPLoopDirective(D);
+ Code = serialization::STMT_OMP_MASKED_TASKLOOP_SIMD_DIRECTIVE;
+}
+
void ASTStmtWriter::VisitOMPParallelMasterTaskLoopDirective(
OMPParallelMasterTaskLoopDirective *D) {
VisitOMPLoopDirective(D);
@@ -2467,12 +2487,25 @@ void ASTStmtWriter::VisitOMPParallelMasterTaskLoopDirective(
Code = serialization::STMT_OMP_PARALLEL_MASTER_TASKLOOP_DIRECTIVE;
}
+void ASTStmtWriter::VisitOMPParallelMaskedTaskLoopDirective(
+ OMPParallelMaskedTaskLoopDirective *D) {
+ VisitOMPLoopDirective(D);
+ Record.writeBool(D->hasCancel());
+ Code = serialization::STMT_OMP_PARALLEL_MASKED_TASKLOOP_DIRECTIVE;
+}
+
void ASTStmtWriter::VisitOMPParallelMasterTaskLoopSimdDirective(
OMPParallelMasterTaskLoopSimdDirective *D) {
VisitOMPLoopDirective(D);
Code = serialization::STMT_OMP_PARALLEL_MASTER_TASKLOOP_SIMD_DIRECTIVE;
}
+void ASTStmtWriter::VisitOMPParallelMaskedTaskLoopSimdDirective(
+ OMPParallelMaskedTaskLoopSimdDirective *D) {
+ VisitOMPLoopDirective(D);
+ Code = serialization::STMT_OMP_PARALLEL_MASKED_TASKLOOP_SIMD_DIRECTIVE;
+}
+
void ASTStmtWriter::VisitOMPDistributeDirective(OMPDistributeDirective *D) {
VisitOMPLoopDirective(D);
Code = serialization::STMT_OMP_DISTRIBUTE_DIRECTIVE;
@@ -2595,6 +2628,30 @@ void ASTStmtWriter::VisitOMPGenericLoopDirective(OMPGenericLoopDirective *D) {
Code = serialization::STMT_OMP_GENERIC_LOOP_DIRECTIVE;
}
+void ASTStmtWriter::VisitOMPTeamsGenericLoopDirective(
+ OMPTeamsGenericLoopDirective *D) {
+ VisitOMPLoopDirective(D);
+ Code = serialization::STMT_OMP_TEAMS_GENERIC_LOOP_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPTargetTeamsGenericLoopDirective(
+ OMPTargetTeamsGenericLoopDirective *D) {
+ VisitOMPLoopDirective(D);
+ Code = serialization::STMT_OMP_TARGET_TEAMS_GENERIC_LOOP_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPParallelGenericLoopDirective(
+ OMPParallelGenericLoopDirective *D) {
+ VisitOMPLoopDirective(D);
+ Code = serialization::STMT_OMP_PARALLEL_GENERIC_LOOP_DIRECTIVE;
+}
+
+void ASTStmtWriter::VisitOMPTargetParallelGenericLoopDirective(
+ OMPTargetParallelGenericLoopDirective *D) {
+ VisitOMPLoopDirective(D);
+ Code = serialization::STMT_OMP_TARGET_PARALLEL_GENERIC_LOOP_DIRECTIVE;
+}
+
//===----------------------------------------------------------------------===//
// ASTWriter Implementation
//===----------------------------------------------------------------------===//
diff --git a/contrib/llvm-project/clang/lib/Serialization/GlobalModuleIndex.cpp b/contrib/llvm-project/clang/lib/Serialization/GlobalModuleIndex.cpp
index 52ce17d984bf..b2283c2b3987 100644
--- a/contrib/llvm-project/clang/lib/Serialization/GlobalModuleIndex.cpp
+++ b/contrib/llvm-project/clang/lib/Serialization/GlobalModuleIndex.cpp
@@ -277,7 +277,7 @@ GlobalModuleIndex::readIndex(StringRef Path) {
return std::make_pair(nullptr, Res.takeError());
}
- return std::make_pair(new GlobalModuleIndex(std::move(Buffer), Cursor),
+ return std::make_pair(new GlobalModuleIndex(std::move(Buffer), std::move(Cursor)),
llvm::Error::success());
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp
index 605b11874ef5..03e85b9c4373 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ArrayBoundChecker.cpp
@@ -58,8 +58,8 @@ void ArrayBoundChecker::checkLocation(SVal l, bool isLoad, const Stmt* LoadS,
DefinedOrUnknownSVal ElementCount = getDynamicElementCount(
state, ER->getSuperRegion(), C.getSValBuilder(), ER->getValueType());
- ProgramStateRef StInBound = state->assumeInBound(Idx, ElementCount, true);
- ProgramStateRef StOutBound = state->assumeInBound(Idx, ElementCount, false);
+ ProgramStateRef StInBound, StOutBound;
+ std::tie(StInBound, StOutBound) = state->assumeInBoundDual(Idx, ElementCount);
if (StOutBound && !StInBound) {
ExplodedNode *N = C.generateErrorNode(StOutBound);
if (!N)
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
index 2a5fe9d8ed92..5be5bcde4d6e 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ArrayBoundCheckerV2.cpp
@@ -11,9 +11,9 @@
//
//===----------------------------------------------------------------------===//
-#include "Taint.h"
#include "clang/AST/CharUnits.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Checkers/Taint.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
@@ -144,10 +144,9 @@ void ArrayBoundCheckerV2::checkLocation(SVal location, bool isLoad,
SVal extentBegin = computeExtentBegin(svalBuilder, rawOffset.getRegion());
if (Optional<NonLoc> NV = extentBegin.getAs<NonLoc>()) {
- if (NV->getAs<nonloc::ConcreteInt>()) {
+ if (auto ConcreteNV = NV->getAs<nonloc::ConcreteInt>()) {
std::pair<NonLoc, nonloc::ConcreteInt> simplifiedOffsets =
- getSimplifiedOffsets(rawOffset.getByteOffset(),
- NV->castAs<nonloc::ConcreteInt>(),
+ getSimplifiedOffsets(rawOffset.getByteOffset(), *ConcreteNV,
svalBuilder);
rawOffsetVal = simplifiedOffsets.first;
*NV = simplifiedOffsets.second;
@@ -180,13 +179,13 @@ void ArrayBoundCheckerV2::checkLocation(SVal location, bool isLoad,
// we are doing a load/store after the last valid offset.
const MemRegion *MR = rawOffset.getRegion();
DefinedOrUnknownSVal Size = getDynamicExtent(state, MR, svalBuilder);
- if (!Size.getAs<NonLoc>())
+ if (!isa<NonLoc>(Size))
break;
- if (Size.getAs<nonloc::ConcreteInt>()) {
+ if (auto ConcreteSize = Size.getAs<nonloc::ConcreteInt>()) {
std::pair<NonLoc, nonloc::ConcreteInt> simplifiedOffsets =
- getSimplifiedOffsets(rawOffset.getByteOffset(),
- Size.castAs<nonloc::ConcreteInt>(), svalBuilder);
+ getSimplifiedOffsets(rawOffset.getByteOffset(), *ConcreteSize,
+ svalBuilder);
rawOffsetVal = simplifiedOffsets.first;
Size = simplifiedOffsets.second;
}
@@ -275,7 +274,7 @@ void RegionRawOffsetV2::dumpToStream(raw_ostream &os) const {
// is unknown or undefined, we lazily substitute '0'. Otherwise,
// return 'val'.
static inline SVal getValue(SVal val, SValBuilder &svalBuilder) {
- return val.getAs<UndefinedVal>() ? svalBuilder.makeArrayIndex(0) : val;
+ return val.isUndef() ? svalBuilder.makeZeroArrayIndex() : val;
}
// Scale a base value by a scaling factor, and return the scaled
@@ -324,7 +323,7 @@ RegionRawOffsetV2 RegionRawOffsetV2::computeOffset(ProgramStateRef state,
case MemRegion::ElementRegionKind: {
const ElementRegion *elemReg = cast<ElementRegion>(region);
SVal index = elemReg->getIndex();
- if (!index.getAs<NonLoc>())
+ if (!isa<NonLoc>(index))
return RegionRawOffsetV2();
QualType elemType = elemReg->getElementType();
// If the element is an incomplete type, go no further.
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
index 2c210fb6cdb9..330ca90b7659 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BasicObjCFoundationChecks.cpp
@@ -347,10 +347,6 @@ public:
CFNumberChecker() : ICreate(nullptr), IGetValue(nullptr) {}
void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
-
-private:
- void EmitError(const TypedRegion* R, const Expr *Ex,
- uint64_t SourceSize, uint64_t TargetSize, uint64_t NumberKind);
};
} // end anonymous namespace
@@ -446,7 +442,7 @@ void CFNumberChecker::checkPreStmt(const CallExpr *CE,
// FIXME: We really should allow ranges of valid theType values, and
// bifurcate the state appropriately.
- Optional<nonloc::ConcreteInt> V = TheTypeVal.getAs<nonloc::ConcreteInt>();
+ Optional<nonloc::ConcreteInt> V = dyn_cast<nonloc::ConcreteInt>(TheTypeVal);
if (!V)
return;
@@ -758,7 +754,7 @@ void VariadicMethodTypeChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
continue;
// Ignore pointer constants.
- if (msg.getArgSVal(I).getAs<loc::ConcreteInt>())
+ if (isa<loc::ConcreteInt>(msg.getArgSVal(I)))
continue;
// Ignore pointer types annotated with 'NSObject' attribute.
@@ -770,7 +766,7 @@ void VariadicMethodTypeChecker::checkPreObjCMessage(const ObjCMethodCall &msg,
continue;
// Generate only one error node to use for all bug reports.
- if (!errorNode.hasValue())
+ if (!errorNode)
errorNode = C.generateNonFatalErrorNode();
if (!errorNode.getValue())
@@ -907,7 +903,7 @@ static ProgramStateRef checkElementNonNil(CheckerContext &C,
// Go ahead and assume the value is non-nil.
SVal Val = State->getSVal(*ElementLoc);
- return State->assume(Val.castAs<DefinedOrUnknownSVal>(), true);
+ return State->assume(cast<DefinedOrUnknownSVal>(Val), true);
}
/// Returns NULL state if the collection is known to contain elements
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
index 6c0caf3c4e78..dad25d6f853b 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/BoolAssignmentChecker.cpp
@@ -12,6 +12,7 @@
//===----------------------------------------------------------------------===//
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Checkers/Taint.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
@@ -23,20 +24,23 @@ using namespace ento;
namespace {
class BoolAssignmentChecker : public Checker< check::Bind > {
mutable std::unique_ptr<BuiltinBug> BT;
- void emitReport(ProgramStateRef state, CheckerContext &C) const;
+ void emitReport(ProgramStateRef state, CheckerContext &C,
+ bool IsTainted = false) const;
+
public:
void checkBind(SVal loc, SVal val, const Stmt *S, CheckerContext &C) const;
};
} // end anonymous namespace
-void BoolAssignmentChecker::emitReport(ProgramStateRef state,
- CheckerContext &C) const {
+void BoolAssignmentChecker::emitReport(ProgramStateRef state, CheckerContext &C,
+ bool IsTainted) const {
if (ExplodedNode *N = C.generateNonFatalErrorNode(state)) {
if (!BT)
BT.reset(new BuiltinBug(this, "Assignment of a non-Boolean value"));
- C.emitReport(
- std::make_unique<PathSensitiveBugReport>(*BT, BT->getDescription(), N));
+ StringRef Msg = IsTainted ? "Might assign a tainted non-Boolean value"
+ : "Assignment of a non-Boolean value";
+ C.emitReport(std::make_unique<PathSensitiveBugReport>(*BT, Msg, N));
}
}
@@ -90,6 +94,8 @@ void BoolAssignmentChecker::checkBind(SVal loc, SVal val, const Stmt *S,
if (!StIn)
emitReport(StOut, C);
+ if (StIn && StOut && taint::isTainted(state, *NV))
+ emitReport(StOut, C, /*IsTainted=*/true);
}
void ento::registerBoolAssignmentChecker(CheckerManager &mgr) {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
index 6955efe8e6c2..2e4c8e643698 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CStringChecker.cpp
@@ -80,7 +80,7 @@ class CStringChecker : public Checker< eval::Call,
check::RegionChanges
> {
mutable std::unique_ptr<BugType> BT_Null, BT_Bounds, BT_Overlap,
- BT_NotCString, BT_AdditionOverflow;
+ BT_NotCString, BT_AdditionOverflow, BT_UninitRead;
mutable const char *CurrentFunctionDescription;
@@ -88,15 +88,17 @@ public:
/// The filter is used to filter out the diagnostics which are not enabled by
/// the user.
struct CStringChecksFilter {
- DefaultBool CheckCStringNullArg;
- DefaultBool CheckCStringOutOfBounds;
- DefaultBool CheckCStringBufferOverlap;
- DefaultBool CheckCStringNotNullTerm;
+ bool CheckCStringNullArg = false;
+ bool CheckCStringOutOfBounds = false;
+ bool CheckCStringBufferOverlap = false;
+ bool CheckCStringNotNullTerm = false;
+ bool CheckCStringUninitializedRead = false;
CheckerNameRef CheckNameCStringNullArg;
CheckerNameRef CheckNameCStringOutOfBounds;
CheckerNameRef CheckNameCStringBufferOverlap;
CheckerNameRef CheckNameCStringNotNullTerm;
+ CheckerNameRef CheckNameCStringUninitializedRead;
};
CStringChecksFilter Filter;
@@ -257,7 +259,8 @@ public:
void emitNotCStringBug(CheckerContext &C, ProgramStateRef State,
const Stmt *S, StringRef WarningMsg) const;
void emitAdditionOverflowBug(CheckerContext &C, ProgramStateRef State) const;
-
+ void emitUninitializedReadBug(CheckerContext &C, ProgramStateRef State,
+ const Expr *E) const;
ProgramStateRef checkAdditionOverflow(CheckerContext &C,
ProgramStateRef state,
NonLoc left,
@@ -352,8 +355,8 @@ ProgramStateRef CStringChecker::CheckLocation(CheckerContext &C,
// Get the index of the accessed element.
DefinedOrUnknownSVal Idx = ER->getIndex().castAs<DefinedOrUnknownSVal>();
- ProgramStateRef StInBound = state->assumeInBound(Idx, Size, true);
- ProgramStateRef StOutBound = state->assumeInBound(Idx, Size, false);
+ ProgramStateRef StInBound, StOutBound;
+ std::tie(StInBound, StOutBound) = state->assumeInBoundDual(Idx, Size);
if (StOutBound && !StInBound) {
// These checks are either enabled by the CString out-of-bounds checker
// explicitly or implicitly by the Malloc checker.
@@ -368,6 +371,15 @@ ProgramStateRef CStringChecker::CheckLocation(CheckerContext &C,
return nullptr;
}
+ // Ensure that we wouldn't read uninitialized value.
+ if (Access == AccessKind::read) {
+ if (Filter.CheckCStringUninitializedRead &&
+ StInBound->getSVal(ER).isUndef()) {
+ emitUninitializedReadBug(C, StInBound, Buffer.Expression);
+ return nullptr;
+ }
+ }
+
// Array bound check succeeded. From this point forward the array bound
// should always succeed.
return StInBound;
@@ -420,7 +432,6 @@ ProgramStateRef CStringChecker::CheckBufferAccess(CheckerContext &C,
SVal BufEnd =
svalBuilder.evalBinOpLN(State, BO_Add, *BufLoc, LastOffset, PtrTy);
-
State = CheckLocation(C, State, Buffer, BufEnd, Access);
// If the buffer isn't large enough, abort.
@@ -449,6 +460,11 @@ ProgramStateRef CStringChecker::CheckOverlap(CheckerContext &C,
ProgramStateRef stateTrue, stateFalse;
+ // Assume different address spaces cannot overlap.
+ if (First.Expression->getType()->getPointeeType().getAddressSpace() !=
+ Second.Expression->getType()->getPointeeType().getAddressSpace())
+ return state;
+
// Get the buffer values and make sure they're known locations.
const LocationContext *LCtx = C.getLocationContext();
SVal firstVal = state->getSVal(First.Expression, LCtx);
@@ -580,6 +596,26 @@ void CStringChecker::emitNullArgBug(CheckerContext &C, ProgramStateRef State,
}
}
+void CStringChecker::emitUninitializedReadBug(CheckerContext &C,
+ ProgramStateRef State,
+ const Expr *E) const {
+ if (ExplodedNode *N = C.generateErrorNode(State)) {
+ const char *Msg =
+ "Bytes string function accesses uninitialized/garbage values";
+ if (!BT_UninitRead)
+ BT_UninitRead.reset(
+ new BuiltinBug(Filter.CheckNameCStringUninitializedRead,
+ "Accessing unitialized/garbage values", Msg));
+
+ BuiltinBug *BT = static_cast<BuiltinBug *>(BT_UninitRead.get());
+
+ auto Report = std::make_unique<PathSensitiveBugReport>(*BT, Msg, N);
+ Report->addRange(E->getSourceRange());
+ bugreporter::trackExpressionValue(N, E, *Report);
+ C.emitReport(std::move(Report));
+ }
+}
+
void CStringChecker::emitOutOfBoundsBug(CheckerContext &C,
ProgramStateRef State, const Stmt *S,
StringRef WarningMsg) const {
@@ -622,8 +658,8 @@ void CStringChecker::emitNotCStringBug(CheckerContext &C, ProgramStateRef State,
void CStringChecker::emitAdditionOverflowBug(CheckerContext &C,
ProgramStateRef State) const {
if (ExplodedNode *N = C.generateErrorNode(State)) {
- if (!BT_NotCString)
- BT_NotCString.reset(
+ if (!BT_AdditionOverflow)
+ BT_AdditionOverflow.reset(
new BuiltinBug(Filter.CheckNameCStringOutOfBounds, "API",
"Sum of expressions causes overflow."));
@@ -634,8 +670,8 @@ void CStringChecker::emitAdditionOverflowBug(CheckerContext &C,
"This expression will create a string whose length is too big to "
"be represented as a size_t";
- auto Report =
- std::make_unique<PathSensitiveBugReport>(*BT_NotCString, WarningMsg, N);
+ auto Report = std::make_unique<PathSensitiveBugReport>(*BT_AdditionOverflow,
+ WarningMsg, N);
C.emitReport(std::move(Report));
}
}
@@ -660,7 +696,7 @@ ProgramStateRef CStringChecker::checkAdditionOverflow(CheckerContext &C,
NonLoc maxVal = svalBuilder.makeIntVal(maxValInt);
SVal maxMinusRight;
- if (right.getAs<nonloc::ConcreteInt>()) {
+ if (isa<nonloc::ConcreteInt>(right)) {
maxMinusRight = svalBuilder.evalBinOpNN(state, BO_Sub, maxVal, right,
sizeTy);
} else {
@@ -1010,23 +1046,20 @@ bool CStringChecker::SummarizeRegion(raw_ostream &os, ASTContext &Ctx,
case MemRegion::CXXThisRegionKind:
case MemRegion::CXXTempObjectRegionKind:
os << "a C++ temp object of type "
- << cast<TypedValueRegion>(MR)->getValueType().getAsString();
+ << cast<TypedValueRegion>(MR)->getValueType();
return true;
case MemRegion::NonParamVarRegionKind:
- os << "a variable of type"
- << cast<TypedValueRegion>(MR)->getValueType().getAsString();
+ os << "a variable of type" << cast<TypedValueRegion>(MR)->getValueType();
return true;
case MemRegion::ParamVarRegionKind:
- os << "a parameter of type"
- << cast<TypedValueRegion>(MR)->getValueType().getAsString();
+ os << "a parameter of type" << cast<TypedValueRegion>(MR)->getValueType();
return true;
case MemRegion::FieldRegionKind:
- os << "a field of type "
- << cast<TypedValueRegion>(MR)->getValueType().getAsString();
+ os << "a field of type " << cast<TypedValueRegion>(MR)->getValueType();
return true;
case MemRegion::ObjCIvarRegionKind:
os << "an instance variable of type "
- << cast<TypedValueRegion>(MR)->getValueType().getAsString();
+ << cast<TypedValueRegion>(MR)->getValueType();
return true;
default:
return false;
@@ -1642,7 +1675,7 @@ void CStringChecker::evalStrcpyCommon(CheckerContext &C, const CallExpr *CE,
// amountCopied = min (size - dstLen - 1 , srcLen)
SVal freeSpace = svalBuilder.evalBinOpNN(state, BO_Sub, *lenValNL,
*dstStrLengthNL, sizeTy);
- if (!freeSpace.getAs<NonLoc>())
+ if (!isa<NonLoc>(freeSpace))
return;
freeSpace =
svalBuilder.evalBinOp(state, BO_Sub, freeSpace,
@@ -2460,3 +2493,4 @@ REGISTER_CHECKER(CStringNullArg)
REGISTER_CHECKER(CStringOutOfBounds)
REGISTER_CHECKER(CStringBufferOverlap)
REGISTER_CHECKER(CStringNotNullTerm)
+REGISTER_CHECKER(CStringUninitializedRead)
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
index 3e46e2372516..79225e8e6ca3 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CallAndMessageChecker.cpp
@@ -73,7 +73,7 @@ public:
CK_NumCheckKinds
};
- DefaultBool ChecksEnabled[CK_NumCheckKinds];
+ bool ChecksEnabled[CK_NumCheckKinds] = {false};
// The original core.CallAndMessage checker name. This should rather be an
// array, as seen in MallocChecker and CStringChecker.
CheckerNameRef OriginalName;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CastValueChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CastValueChecker.cpp
index 945590d0ba8c..45f9a82a9d0a 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CastValueChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CastValueChecker.cpp
@@ -250,7 +250,7 @@ static void addCastTransition(const CallEvent &Call, DefinedOrUnknownSVal DV,
CastSucceeds);
SVal V = CastSucceeds ? C.getSValBuilder().evalCast(DV, CastToTy, CastFromTy)
- : C.getSValBuilder().makeNull();
+ : C.getSValBuilder().makeNullWithType(CastToTy);
C.addTransition(
State->BindExpr(Call.getOriginExpr(), C.getLocationContext(), V, false),
getNoteTag(C, CastInfo, CastToTy, Object, CastSucceeds, IsKnownCast));
@@ -359,7 +359,9 @@ static void evalNullParamNullReturn(const CallEvent &Call,
if (ProgramStateRef State = C.getState()->assume(DV, false))
C.addTransition(State->BindExpr(Call.getOriginExpr(),
C.getLocationContext(),
- C.getSValBuilder().makeNull(), false),
+ C.getSValBuilder().makeNullWithType(
+ Call.getOriginExpr()->getType()),
+ false),
C.getNoteTag("Assuming null pointer is passed into cast",
/*IsPrunable=*/true));
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
index 78b3c209ad6b..2d2ddcdf3890 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckObjCDealloc.cpp
@@ -286,7 +286,7 @@ void ObjCDeallocChecker::checkBeginFunction(
if (!LValLoc)
continue;
- SVal InitialVal = State->getSVal(LValLoc.getValue());
+ SVal InitialVal = State->getSVal(*LValLoc);
SymbolRef Symbol = InitialVal.getAsSymbol();
if (!Symbol || !isa<SymbolRegionValue>(Symbol))
continue;
@@ -957,7 +957,7 @@ ObjCDeallocChecker::getValueReleasedByNillingOut(const ObjCMethodCall &M,
if (!LValLoc)
return nullptr;
- SVal CurrentValInIvar = State->getSVal(LValLoc.getValue());
+ SVal CurrentValInIvar = State->getSVal(*LValLoc);
return CurrentValInIvar.getAsSymbol();
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp
index a13de306eac8..c8fe5c2ccf38 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckObjCInstMethSignature.cpp
@@ -55,13 +55,11 @@ static void CompareReturnTypes(const ObjCMethodDecl *MethDerived,
<< *MethAncestor->getClassInterface()
<< "', defines the instance method '";
MethDerived->getSelector().print(os);
- os << "' whose return type is '"
- << ResDerived.getAsString()
+ os << "' whose return type is '" << ResDerived
<< "'. A method with the same name (same selector) is also defined in "
"class '"
- << *MethAncestor->getClassInterface()
- << "' and has a return type of '"
- << ResAncestor.getAsString()
+ << *MethAncestor->getClassInterface() << "' and has a return type of '"
+ << ResAncestor
<< "'. These two types are incompatible, and may result in undefined "
"behavior for clients of these classes.";
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
index 61ff5e59f06d..18d575041ba7 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CheckSecuritySyntaxOnly.cpp
@@ -36,20 +36,20 @@ static bool isArc4RandomAvailable(const ASTContext &Ctx) {
namespace {
struct ChecksFilter {
- DefaultBool check_bcmp;
- DefaultBool check_bcopy;
- DefaultBool check_bzero;
- DefaultBool check_gets;
- DefaultBool check_getpw;
- DefaultBool check_mktemp;
- DefaultBool check_mkstemp;
- DefaultBool check_strcpy;
- DefaultBool check_DeprecatedOrUnsafeBufferHandling;
- DefaultBool check_rand;
- DefaultBool check_vfork;
- DefaultBool check_FloatLoopCounter;
- DefaultBool check_UncheckedReturn;
- DefaultBool check_decodeValueOfObjCType;
+ bool check_bcmp = false;
+ bool check_bcopy = false;
+ bool check_bzero = false;
+ bool check_gets = false;
+ bool check_getpw = false;
+ bool check_mktemp = false;
+ bool check_mkstemp = false;
+ bool check_strcpy = false;
+ bool check_DeprecatedOrUnsafeBufferHandling = false;
+ bool check_rand = false;
+ bool check_vfork = false;
+ bool check_FloatLoopCounter = false;
+ bool check_UncheckedReturn = false;
+ bool check_decodeValueOfObjCType = false;
CheckerNameRef checkName_bcmp;
CheckerNameRef checkName_bcopy;
@@ -325,7 +325,7 @@ void WalkAST::checkLoopConditionForFloat(const ForStmt *FS) {
llvm::raw_svector_ostream os(sbuf);
os << "Variable '" << drCond->getDecl()->getName()
- << "' with floating point type '" << drCond->getType().getAsString()
+ << "' with floating point type '" << drCond->getType()
<< "' should not be used as a loop counter";
ranges.push_back(drCond->getSourceRange());
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CloneChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CloneChecker.cpp
index 7968aed85e1b..0e21ea7e90c9 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CloneChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/CloneChecker.cpp
@@ -30,7 +30,7 @@ class CloneChecker
public:
// Checker options.
int MinComplexity;
- bool ReportNormalClones;
+ bool ReportNormalClones = false;
StringRef IgnoredFilesPattern;
private:
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
index 8070d869f678..2102f9233bc1 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DeadStoresChecker.cpp
@@ -107,11 +107,8 @@ LookThroughTransitiveAssignmentsAndCommaOperators(const Expr *Ex) {
dyn_cast<BinaryOperator>(Ex->IgnoreParenCasts());
if (!BO)
break;
- if (BO->getOpcode() == BO_Assign) {
- Ex = BO->getRHS();
- continue;
- }
- if (BO->getOpcode() == BO_Comma) {
+ BinaryOperatorKind Op = BO->getOpcode();
+ if (Op == BO_Assign || Op == BO_Comma) {
Ex = BO->getRHS();
continue;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
index 4a9c7ce3c66d..a678c3827e7f 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DereferenceChecker.cpp
@@ -11,9 +11,10 @@
//
//===----------------------------------------------------------------------===//
-#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExprOpenMP.h"
+#include "clang/Basic/TargetInfo.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
@@ -39,6 +40,8 @@ class DereferenceChecker
void reportBug(DerefKind K, ProgramStateRef State, const Stmt *S,
CheckerContext &C) const;
+ bool suppressReport(CheckerContext &C, const Expr *E) const;
+
public:
void checkLocation(SVal location, bool isLoad, const Stmt* S,
CheckerContext &C) const;
@@ -49,6 +52,8 @@ public:
const Expr *Ex, const ProgramState *state,
const LocationContext *LCtx,
bool loadedFrom = false);
+
+ bool SuppressAddressSpaces = false;
};
} // end anonymous namespace
@@ -109,9 +114,35 @@ static const Expr *getDereferenceExpr(const Stmt *S, bool IsBind=false){
return E;
}
-static bool suppressReport(const Expr *E) {
- // Do not report dereferences on memory in non-default address spaces.
- return E->getType().hasAddressSpace();
+bool DereferenceChecker::suppressReport(CheckerContext &C,
+ const Expr *E) const {
+ // Do not report dereferences on memory that use address space #256, #257,
+ // and #258. Those address spaces are used when dereferencing address spaces
+ // relative to the GS, FS, and SS segments on x86/x86-64 targets.
+ // Dereferencing a null pointer in these address spaces is not defined
+ // as an error. All other null dereferences in other address spaces
+ // are defined as an error unless explicitly defined.
+ // See https://clang.llvm.org/docs/LanguageExtensions.html, the section
+ // "X86/X86-64 Language Extensions"
+
+ QualType Ty = E->getType();
+ if (!Ty.hasAddressSpace())
+ return false;
+ if (SuppressAddressSpaces)
+ return true;
+
+ const llvm::Triple::ArchType Arch =
+ C.getASTContext().getTargetInfo().getTriple().getArch();
+
+ if ((Arch == llvm::Triple::x86) || (Arch == llvm::Triple::x86_64)) {
+ switch (toTargetAddressSpace(E->getType().getAddressSpace())) {
+ case 256:
+ case 257:
+ case 258:
+ return true;
+ }
+ }
+ return false;
}
static bool isDeclRefExprToReference(const Expr *E) {
@@ -209,7 +240,7 @@ void DereferenceChecker::checkLocation(SVal l, bool isLoad, const Stmt* S,
// Check for dereference of an undefined value.
if (l.isUndef()) {
const Expr *DerefExpr = getDereferenceExpr(S);
- if (!suppressReport(DerefExpr))
+ if (!suppressReport(C, DerefExpr))
reportBug(DerefKind::UndefinedPointerValue, C.getState(), DerefExpr, C);
return;
}
@@ -217,7 +248,7 @@ void DereferenceChecker::checkLocation(SVal l, bool isLoad, const Stmt* S,
DefinedOrUnknownSVal location = l.castAs<DefinedOrUnknownSVal>();
// Check for null dereferences.
- if (!location.getAs<Loc>())
+ if (!isa<Loc>(location))
return;
ProgramStateRef state = C.getState();
@@ -230,7 +261,7 @@ void DereferenceChecker::checkLocation(SVal l, bool isLoad, const Stmt* S,
// We know that 'location' can only be null. This is what
// we call an "explicit" null dereference.
const Expr *expr = getDereferenceExpr(S);
- if (!suppressReport(expr)) {
+ if (!suppressReport(C, expr)) {
reportBug(DerefKind::NullPointer, nullState, expr, C);
return;
}
@@ -272,7 +303,7 @@ void DereferenceChecker::checkBind(SVal L, SVal V, const Stmt *S,
if (StNull) {
if (!StNonNull) {
const Expr *expr = getDereferenceExpr(S, /*IsBind=*/true);
- if (!suppressReport(expr)) {
+ if (!suppressReport(C, expr)) {
reportBug(DerefKind::NullPointer, StNull, expr, C);
return;
}
@@ -308,7 +339,9 @@ void DereferenceChecker::checkBind(SVal L, SVal V, const Stmt *S,
}
void ento::registerDereferenceChecker(CheckerManager &mgr) {
- mgr.registerChecker<DereferenceChecker>();
+ auto *Chk = mgr.registerChecker<DereferenceChecker>();
+ Chk->SuppressAddressSpaces = mgr.getAnalyzerOptions().getCheckerBooleanOption(
+ mgr.getCurrentCheckerName(), "SuppressAddressSpaces");
}
bool ento::shouldRegisterDereferenceChecker(const CheckerManager &mgr) {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
index 2b3164ba4a2c..bfd1d4c162ec 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DivZeroChecker.cpp
@@ -11,8 +11,8 @@
//
//===----------------------------------------------------------------------===//
-#include "Taint.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Checkers/Taint.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
index b07f59125a82..aaf8cca32b60 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/DynamicTypePropagation.cpp
@@ -56,9 +56,6 @@ class DynamicTypePropagation:
check::PreObjCMessage,
check::PostObjCMessage > {
- const ObjCObjectType *getObjectTypeForAllocAndNew(const ObjCMessageExpr *MsgE,
- CheckerContext &C) const;
-
/// Return a better dynamic type if one can be derived from the cast.
const ObjCObjectPointerType *getBetterObjCType(const Expr *CastE,
CheckerContext &C) const;
@@ -108,7 +105,7 @@ public:
void checkPostObjCMessage(const ObjCMethodCall &M, CheckerContext &C) const;
/// This value is set to true, when the Generics checker is turned on.
- DefaultBool CheckGenerics;
+ bool CheckGenerics = false;
CheckerNameRef GenericCheckName;
};
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoChecker.cpp
new file mode 100644
index 000000000000..d8c3d7a4a6a6
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoChecker.cpp
@@ -0,0 +1,249 @@
+//=== ErrnoChecker.cpp ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines an "errno checker" that can detect some invalid use of the
+// system-defined value 'errno'. This checker works together with the
+// ErrnoModeling checker and other checkers like StdCLibraryFunctions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ErrnoModeling.h"
+#include "clang/AST/ParentMapContext.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "llvm/ADT/STLExtras.h"
+
+using namespace clang;
+using namespace ento;
+using namespace errno_modeling;
+
+namespace {
+
+class ErrnoChecker
+ : public Checker<check::Location, check::PreCall, check::RegionChanges> {
+public:
+ void checkLocation(SVal Loc, bool IsLoad, const Stmt *S,
+ CheckerContext &) const;
+ void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
+ ProgramStateRef
+ checkRegionChanges(ProgramStateRef State,
+ const InvalidatedSymbols *Invalidated,
+ ArrayRef<const MemRegion *> ExplicitRegions,
+ ArrayRef<const MemRegion *> Regions,
+ const LocationContext *LCtx, const CallEvent *Call) const;
+ void checkBranchCondition(const Stmt *Condition, CheckerContext &Ctx) const;
+
+ /// Indicates if a read (load) of \c errno is allowed in a non-condition part
+ /// of \c if, \c switch, loop and conditional statements when the errno
+ /// value may be undefined.
+ bool AllowErrnoReadOutsideConditions = true;
+
+private:
+ void generateErrnoNotCheckedBug(CheckerContext &C, ProgramStateRef State,
+ const MemRegion *ErrnoRegion,
+ const CallEvent *CallMayChangeErrno) const;
+
+ BugType BT_InvalidErrnoRead{this, "Value of 'errno' could be undefined",
+ "Error handling"};
+ BugType BT_ErrnoNotChecked{this, "Value of 'errno' was not checked",
+ "Error handling"};
+};
+
+} // namespace
+
+static ProgramStateRef setErrnoStateIrrelevant(ProgramStateRef State) {
+ return setErrnoState(State, Irrelevant);
+}
+
+/// Check if a statement (expression) or an ancestor of it is in a condition
+/// part of a (conditional, loop, switch) statement.
+static bool isInCondition(const Stmt *S, CheckerContext &C) {
+ ParentMapContext &ParentCtx = C.getASTContext().getParentMapContext();
+ bool CondFound = false;
+ while (S && !CondFound) {
+ const DynTypedNodeList Parents = ParentCtx.getParents(*S);
+ if (Parents.empty())
+ break;
+ const auto *ParentS = Parents[0].get<Stmt>();
+ if (!ParentS || isa<CallExpr>(ParentS))
+ break;
+ switch (ParentS->getStmtClass()) {
+ case Expr::IfStmtClass:
+ CondFound = (S == cast<IfStmt>(ParentS)->getCond());
+ break;
+ case Expr::ForStmtClass:
+ CondFound = (S == cast<ForStmt>(ParentS)->getCond());
+ break;
+ case Expr::DoStmtClass:
+ CondFound = (S == cast<DoStmt>(ParentS)->getCond());
+ break;
+ case Expr::WhileStmtClass:
+ CondFound = (S == cast<WhileStmt>(ParentS)->getCond());
+ break;
+ case Expr::SwitchStmtClass:
+ CondFound = (S == cast<SwitchStmt>(ParentS)->getCond());
+ break;
+ case Expr::ConditionalOperatorClass:
+ CondFound = (S == cast<ConditionalOperator>(ParentS)->getCond());
+ break;
+ case Expr::BinaryConditionalOperatorClass:
+ CondFound = (S == cast<BinaryConditionalOperator>(ParentS)->getCommon());
+ break;
+ default:
+ break;
+ }
+ S = ParentS;
+ }
+ return CondFound;
+}
+
+void ErrnoChecker::generateErrnoNotCheckedBug(
+ CheckerContext &C, ProgramStateRef State, const MemRegion *ErrnoRegion,
+ const CallEvent *CallMayChangeErrno) const {
+ if (ExplodedNode *N = C.generateNonFatalErrorNode(State)) {
+ SmallString<100> StrBuf;
+ llvm::raw_svector_ostream OS(StrBuf);
+ if (CallMayChangeErrno) {
+ OS << "Value of 'errno' was not checked and may be overwritten by "
+ "function '";
+ const auto *CallD =
+ dyn_cast_or_null<FunctionDecl>(CallMayChangeErrno->getDecl());
+ assert(CallD && CallD->getIdentifier());
+ OS << CallD->getIdentifier()->getName() << "'";
+ } else {
+ OS << "Value of 'errno' was not checked and is overwritten here";
+ }
+ auto BR = std::make_unique<PathSensitiveBugReport>(BT_ErrnoNotChecked,
+ OS.str(), N);
+ BR->markInteresting(ErrnoRegion);
+ C.emitReport(std::move(BR));
+ }
+}
+
+void ErrnoChecker::checkLocation(SVal Loc, bool IsLoad, const Stmt *S,
+ CheckerContext &C) const {
+ Optional<ento::Loc> ErrnoLoc = getErrnoLoc(C.getState());
+ if (!ErrnoLoc)
+ return;
+
+ auto L = Loc.getAs<ento::Loc>();
+ if (!L || *ErrnoLoc != *L)
+ return;
+
+ ProgramStateRef State = C.getState();
+ ErrnoCheckState EState = getErrnoState(State);
+
+ if (IsLoad) {
+ switch (EState) {
+ case MustNotBeChecked:
+ // Read of 'errno' when it may have undefined value.
+ if (!AllowErrnoReadOutsideConditions || isInCondition(S, C)) {
+ if (ExplodedNode *N = C.generateErrorNode()) {
+ auto BR = std::make_unique<PathSensitiveBugReport>(
+ BT_InvalidErrnoRead,
+ "An undefined value may be read from 'errno'", N);
+ BR->markInteresting(ErrnoLoc->getAsRegion());
+ C.emitReport(std::move(BR));
+ }
+ }
+ break;
+ case MustBeChecked:
+ // 'errno' has to be checked. A load is required for this, with no more
+ // information we can assume that it is checked somehow.
+ // After this place 'errno' is allowed to be read and written.
+ State = setErrnoStateIrrelevant(State);
+ C.addTransition(State);
+ break;
+ default:
+ break;
+ }
+ } else {
+ switch (EState) {
+ case MustBeChecked:
+ // 'errno' is overwritten without a read before but it should have been
+ // checked.
+ generateErrnoNotCheckedBug(C, setErrnoStateIrrelevant(State),
+ ErrnoLoc->getAsRegion(), nullptr);
+ break;
+ case MustNotBeChecked:
+ // Write to 'errno' when it is not allowed to be read.
+ // After this place 'errno' is allowed to be read and written.
+ State = setErrnoStateIrrelevant(State);
+ C.addTransition(State);
+ break;
+ default:
+ break;
+ }
+ }
+}
+
+void ErrnoChecker::checkPreCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ const auto *CallF = dyn_cast_or_null<FunctionDecl>(Call.getDecl());
+ if (!CallF)
+ return;
+
+ CallF = CallF->getCanonicalDecl();
+ // If 'errno' must be checked, it should be done as soon as possible, and
+ // before any other call to a system function (something in a system header).
+ // To avoid use of a long list of functions that may change 'errno'
+ // (which may be different with standard library versions) assume that any
+ // function can change it.
+ // A list of special functions can be used that are allowed here without
+ // generation of diagnostic. For now the only such case is 'errno' itself.
+ // Probably 'strerror'?
+ if (CallF->isExternC() && CallF->isGlobal() &&
+ C.getSourceManager().isInSystemHeader(CallF->getLocation()) &&
+ !isErrno(CallF)) {
+ if (getErrnoState(C.getState()) == MustBeChecked) {
+ Optional<ento::Loc> ErrnoLoc = getErrnoLoc(C.getState());
+ assert(ErrnoLoc && "ErrnoLoc should exist if an errno state is set.");
+ generateErrnoNotCheckedBug(C, setErrnoStateIrrelevant(C.getState()),
+ ErrnoLoc->getAsRegion(), &Call);
+ }
+ }
+}
+
+ProgramStateRef ErrnoChecker::checkRegionChanges(
+ ProgramStateRef State, const InvalidatedSymbols *Invalidated,
+ ArrayRef<const MemRegion *> ExplicitRegions,
+ ArrayRef<const MemRegion *> Regions, const LocationContext *LCtx,
+ const CallEvent *Call) const {
+ Optional<ento::Loc> ErrnoLoc = getErrnoLoc(State);
+ if (!ErrnoLoc)
+ return State;
+ const MemRegion *ErrnoRegion = ErrnoLoc->getAsRegion();
+
+ // If 'errno' is invalidated we can not know if it is checked or written into,
+ // allow read and write without bug reports.
+ if (llvm::is_contained(Regions, ErrnoRegion))
+ return setErrnoStateIrrelevant(State);
+
+ // Always reset errno state when the system memory space is invalidated.
+ // The ErrnoRegion is not always found in the list in this case.
+ if (llvm::is_contained(Regions, ErrnoRegion->getMemorySpace()))
+ return setErrnoStateIrrelevant(State);
+
+ return State;
+}
+
+void ento::registerErrnoChecker(CheckerManager &mgr) {
+ const AnalyzerOptions &Opts = mgr.getAnalyzerOptions();
+ auto *Checker = mgr.registerChecker<ErrnoChecker>();
+ Checker->AllowErrnoReadOutsideConditions = Opts.getCheckerBooleanOption(
+ Checker, "AllowErrnoReadOutsideConditionExpressions");
+}
+
+bool ento::shouldRegisterErrnoChecker(const CheckerManager &mgr) {
+ return true;
+}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.cpp
new file mode 100644
index 000000000000..618f7e97f6e8
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.cpp
@@ -0,0 +1,288 @@
+//=== ErrnoModeling.cpp -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines a checker `ErrnoModeling`, which is used to make the system
+// value 'errno' available to other checkers.
+// The 'errno' value is stored at a special memory region that is accessible
+// through the `errno_modeling` namespace. The memory region is either the
+// region of `errno` itself if it is a variable, otherwise an artifically
+// created region (in the system memory space). If `errno` is defined by using
+// a function which returns the address of it (this is always the case if it is
+// not a variable) this function is recognized and evaluated. In this way
+// `errno` becomes visible to the analysis and checkers can change its value.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ErrnoModeling.h"
+#include "clang/AST/ParentMapContext.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "llvm/ADT/STLExtras.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+// Name of the "errno" variable.
+// FIXME: Is there a system where it is not called "errno" but is a variable?
+const char *ErrnoVarName = "errno";
+// Names of functions that return a location of the "errno" value.
+// FIXME: Are there other similar function names?
+const char *ErrnoLocationFuncNames[] = {"__errno_location", "___errno",
+ "__errno", "_errno", "__error"};
+
+class ErrnoModeling
+ : public Checker<check::ASTDecl<TranslationUnitDecl>, check::BeginFunction,
+ check::LiveSymbols, eval::Call> {
+public:
+ void checkASTDecl(const TranslationUnitDecl *D, AnalysisManager &Mgr,
+ BugReporter &BR) const;
+ void checkBeginFunction(CheckerContext &C) const;
+ void checkLiveSymbols(ProgramStateRef State, SymbolReaper &SR) const;
+ bool evalCall(const CallEvent &Call, CheckerContext &C) const;
+
+ // The declaration of an "errno" variable or "errno location" function.
+ mutable const Decl *ErrnoDecl = nullptr;
+
+private:
+ // FIXME: Names from `ErrnoLocationFuncNames` are used to build this set.
+ CallDescriptionSet ErrnoLocationCalls{{"__errno_location", 0, 0},
+ {"___errno", 0, 0},
+ {"__errno", 0, 0},
+ {"_errno", 0, 0},
+ {"__error", 0, 0}};
+};
+
+} // namespace
+
+/// Store a MemRegion that contains the 'errno' integer value.
+/// The value is null if the 'errno' value was not recognized in the AST.
+REGISTER_TRAIT_WITH_PROGRAMSTATE(ErrnoRegion, const MemRegion *)
+
+REGISTER_TRAIT_WITH_PROGRAMSTATE(ErrnoState, errno_modeling::ErrnoCheckState)
+
+/// Search for a variable called "errno" in the AST.
+/// Return nullptr if not found.
+static const VarDecl *getErrnoVar(ASTContext &ACtx) {
+ IdentifierInfo &II = ACtx.Idents.get(ErrnoVarName);
+ auto LookupRes = ACtx.getTranslationUnitDecl()->lookup(&II);
+ auto Found = llvm::find_if(LookupRes, [&ACtx](const Decl *D) {
+ if (auto *VD = dyn_cast<VarDecl>(D))
+ return ACtx.getSourceManager().isInSystemHeader(VD->getLocation()) &&
+ VD->hasExternalStorage() &&
+ VD->getType().getCanonicalType() == ACtx.IntTy;
+ return false;
+ });
+ if (Found == LookupRes.end())
+ return nullptr;
+
+ return cast<VarDecl>(*Found);
+}
+
+/// Search for a function with a specific name that is used to return a pointer
+/// to "errno".
+/// Return nullptr if no such function was found.
+static const FunctionDecl *getErrnoFunc(ASTContext &ACtx) {
+ SmallVector<const Decl *> LookupRes;
+ for (StringRef ErrnoName : ErrnoLocationFuncNames) {
+ IdentifierInfo &II = ACtx.Idents.get(ErrnoName);
+ llvm::append_range(LookupRes, ACtx.getTranslationUnitDecl()->lookup(&II));
+ }
+
+ auto Found = llvm::find_if(LookupRes, [&ACtx](const Decl *D) {
+ if (auto *FD = dyn_cast<FunctionDecl>(D))
+ return ACtx.getSourceManager().isInSystemHeader(FD->getLocation()) &&
+ FD->isExternC() && FD->getNumParams() == 0 &&
+ FD->getReturnType().getCanonicalType() ==
+ ACtx.getPointerType(ACtx.IntTy);
+ return false;
+ });
+ if (Found == LookupRes.end())
+ return nullptr;
+
+ return cast<FunctionDecl>(*Found);
+}
+
+void ErrnoModeling::checkASTDecl(const TranslationUnitDecl *D,
+ AnalysisManager &Mgr, BugReporter &BR) const {
+ // Try to find an usable `errno` value.
+ // It can be an external variable called "errno" or a function that returns a
+ // pointer to the "errno" value. This function can have different names.
+ // The actual case is dependent on the C library implementation, we
+ // can only search for a match in one of these variations.
+ // We assume that exactly one of these cases might be true.
+ ErrnoDecl = getErrnoVar(Mgr.getASTContext());
+ if (!ErrnoDecl)
+ ErrnoDecl = getErrnoFunc(Mgr.getASTContext());
+}
+
+void ErrnoModeling::checkBeginFunction(CheckerContext &C) const {
+ if (!C.inTopFrame())
+ return;
+
+ ASTContext &ACtx = C.getASTContext();
+ ProgramStateRef State = C.getState();
+
+ if (const auto *ErrnoVar = dyn_cast_or_null<VarDecl>(ErrnoDecl)) {
+ // There is an external 'errno' variable.
+ // Use its memory region.
+ // The memory region for an 'errno'-like variable is allocated in system
+ // space by MemRegionManager.
+ const MemRegion *ErrnoR =
+ State->getRegion(ErrnoVar, C.getLocationContext());
+ assert(ErrnoR && "Memory region should exist for the 'errno' variable.");
+ State = State->set<ErrnoRegion>(ErrnoR);
+ State =
+ errno_modeling::setErrnoValue(State, C, 0, errno_modeling::Irrelevant);
+ C.addTransition(State);
+ } else if (ErrnoDecl) {
+ assert(isa<FunctionDecl>(ErrnoDecl) && "Invalid errno location function.");
+ // There is a function that returns the location of 'errno'.
+ // We must create a memory region for it in system space.
+ // Currently a symbolic region is used with an artifical symbol.
+ // FIXME: It is better to have a custom (new) kind of MemRegion for such
+ // cases.
+ SValBuilder &SVB = C.getSValBuilder();
+ MemRegionManager &RMgr = C.getStateManager().getRegionManager();
+
+ const MemSpaceRegion *GlobalSystemSpace =
+ RMgr.getGlobalsRegion(MemRegion::GlobalSystemSpaceRegionKind);
+
+ // Create an artifical symbol for the region.
+ // It is not possible to associate a statement or expression in this case.
+ const SymbolConjured *Sym = SVB.conjureSymbol(
+ nullptr, C.getLocationContext(),
+ ACtx.getLValueReferenceType(ACtx.IntTy), C.blockCount(), &ErrnoDecl);
+
+ // The symbolic region is untyped, create a typed sub-region in it.
+ // The ElementRegion is used to make the errno region a typed region.
+ const MemRegion *ErrnoR = RMgr.getElementRegion(
+ ACtx.IntTy, SVB.makeZeroArrayIndex(),
+ RMgr.getSymbolicRegion(Sym, GlobalSystemSpace), C.getASTContext());
+ State = State->set<ErrnoRegion>(ErrnoR);
+ State =
+ errno_modeling::setErrnoValue(State, C, 0, errno_modeling::Irrelevant);
+ C.addTransition(State);
+ }
+}
+
+bool ErrnoModeling::evalCall(const CallEvent &Call, CheckerContext &C) const {
+ // Return location of "errno" at a call to an "errno address returning"
+ // function.
+ if (ErrnoLocationCalls.contains(Call)) {
+ ProgramStateRef State = C.getState();
+
+ const MemRegion *ErrnoR = State->get<ErrnoRegion>();
+ if (!ErrnoR)
+ return false;
+
+ State = State->BindExpr(Call.getOriginExpr(), C.getLocationContext(),
+ loc::MemRegionVal{ErrnoR});
+ C.addTransition(State);
+ return true;
+ }
+
+ return false;
+}
+
+void ErrnoModeling::checkLiveSymbols(ProgramStateRef State,
+ SymbolReaper &SR) const {
+ // The special errno region should never garbage collected.
+ if (const auto *ErrnoR = State->get<ErrnoRegion>())
+ SR.markLive(ErrnoR);
+}
+
+namespace clang {
+namespace ento {
+namespace errno_modeling {
+
+Optional<SVal> getErrnoValue(ProgramStateRef State) {
+ const MemRegion *ErrnoR = State->get<ErrnoRegion>();
+ if (!ErrnoR)
+ return {};
+ QualType IntTy = State->getAnalysisManager().getASTContext().IntTy;
+ return State->getSVal(ErrnoR, IntTy);
+}
+
+ProgramStateRef setErrnoValue(ProgramStateRef State,
+ const LocationContext *LCtx, SVal Value,
+ ErrnoCheckState EState) {
+ const MemRegion *ErrnoR = State->get<ErrnoRegion>();
+ if (!ErrnoR)
+ return State;
+ // First set the errno value, the old state is still available at 'checkBind'
+ // or 'checkLocation' for errno value.
+ State = State->bindLoc(loc::MemRegionVal{ErrnoR}, Value, LCtx);
+ return State->set<ErrnoState>(EState);
+}
+
+ProgramStateRef setErrnoValue(ProgramStateRef State, CheckerContext &C,
+ uint64_t Value, ErrnoCheckState EState) {
+ const MemRegion *ErrnoR = State->get<ErrnoRegion>();
+ if (!ErrnoR)
+ return State;
+ State = State->bindLoc(
+ loc::MemRegionVal{ErrnoR},
+ C.getSValBuilder().makeIntVal(Value, C.getASTContext().IntTy),
+ C.getLocationContext());
+ return State->set<ErrnoState>(EState);
+}
+
+Optional<Loc> getErrnoLoc(ProgramStateRef State) {
+ const MemRegion *ErrnoR = State->get<ErrnoRegion>();
+ if (!ErrnoR)
+ return {};
+ return loc::MemRegionVal{ErrnoR};
+}
+
+ProgramStateRef setErrnoState(ProgramStateRef State, ErrnoCheckState EState) {
+ return State->set<ErrnoState>(EState);
+}
+
+ErrnoCheckState getErrnoState(ProgramStateRef State) {
+ return State->get<ErrnoState>();
+}
+
+bool isErrno(const Decl *D) {
+ if (const auto *VD = dyn_cast_or_null<VarDecl>(D))
+ if (const IdentifierInfo *II = VD->getIdentifier())
+ return II->getName() == ErrnoVarName;
+ if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D))
+ if (const IdentifierInfo *II = FD->getIdentifier())
+ return llvm::is_contained(ErrnoLocationFuncNames, II->getName());
+ return false;
+}
+
+const NoteTag *getErrnoNoteTag(CheckerContext &C, const std::string &Message) {
+ return C.getNoteTag([Message](PathSensitiveBugReport &BR) -> std::string {
+ const MemRegion *ErrnoR = BR.getErrorNode()->getState()->get<ErrnoRegion>();
+ if (ErrnoR && BR.isInteresting(ErrnoR)) {
+ BR.markNotInteresting(ErrnoR);
+ return Message;
+ }
+ return "";
+ });
+}
+
+} // namespace errno_modeling
+} // namespace ento
+} // namespace clang
+
+void ento::registerErrnoModeling(CheckerManager &mgr) {
+ mgr.registerChecker<ErrnoModeling>();
+}
+
+bool ento::shouldRegisterErrnoModeling(const CheckerManager &mgr) {
+ return true;
+}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.h b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.h
new file mode 100644
index 000000000000..3757e25e1afe
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoModeling.h
@@ -0,0 +1,78 @@
+//=== ErrnoModeling.h - Tracking value of 'errno'. -----------------*- C++ -*-//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines inter-checker API for using the system value 'errno'.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_ERRNOMODELING_H
+#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_ERRNOMODELING_H
+
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+
+namespace clang {
+namespace ento {
+namespace errno_modeling {
+
+enum ErrnoCheckState : unsigned {
+ /// We do not know anything about 'errno'.
+ Irrelevant = 0,
+
+ /// Value of 'errno' should be checked to find out if a previous function call
+ /// has failed.
+ MustBeChecked = 1,
+
+ /// Value of 'errno' is not allowed to be read, it can contain an unspecified
+ /// value.
+ MustNotBeChecked = 2
+};
+
+/// Returns the value of 'errno', if 'errno' was found in the AST.
+llvm::Optional<SVal> getErrnoValue(ProgramStateRef State);
+
+/// Returns the errno check state, \c Errno_Irrelevant if 'errno' was not found
+/// (this is not the only case for that value).
+ErrnoCheckState getErrnoState(ProgramStateRef State);
+
+/// Returns the location that points to the \c MemoryRegion where the 'errno'
+/// value is stored. Returns \c None if 'errno' was not found. Otherwise it
+/// always returns a valid memory region in the system global memory space.
+llvm::Optional<Loc> getErrnoLoc(ProgramStateRef State);
+
+/// Set value of 'errno' to any SVal, if possible.
+/// The errno check state is set always when the 'errno' value is set.
+ProgramStateRef setErrnoValue(ProgramStateRef State,
+ const LocationContext *LCtx, SVal Value,
+ ErrnoCheckState EState);
+
+/// Set value of 'errno' to a concrete (signed) integer, if possible.
+/// The errno check state is set always when the 'errno' value is set.
+ProgramStateRef setErrnoValue(ProgramStateRef State, CheckerContext &C,
+ uint64_t Value, ErrnoCheckState EState);
+
+/// Set the errno check state, do not modify the errno value.
+ProgramStateRef setErrnoState(ProgramStateRef State, ErrnoCheckState EState);
+
+/// Determine if a `Decl` node related to 'errno'.
+/// This is true if the declaration is the errno variable or a function
+/// that returns a pointer to the 'errno' value (usually the 'errno' macro is
+/// defined with this function). \p D is not required to be a canonical
+/// declaration.
+bool isErrno(const Decl *D);
+
+/// Create a NoteTag that displays the message if the 'errno' memory region is
+/// marked as interesting, and resets the interestingness.
+const NoteTag *getErrnoNoteTag(CheckerContext &C, const std::string &Message);
+
+} // namespace errno_modeling
+} // namespace ento
+} // namespace clang
+
+#endif // LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_ERRNOMODELING_H
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoTesterChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoTesterChecker.cpp
new file mode 100644
index 000000000000..924407ebe2d1
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ErrnoTesterChecker.cpp
@@ -0,0 +1,184 @@
+//=== ErrnoTesterChecker.cpp ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This defines ErrnoTesterChecker, which is used to test functionality of the
+// errno_check API.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ErrnoModeling.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/Checker.h"
+#include "clang/StaticAnalyzer/Core/CheckerManager.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+using namespace errno_modeling;
+
+namespace {
+
+class ErrnoTesterChecker : public Checker<eval::Call> {
+public:
+ bool evalCall(const CallEvent &Call, CheckerContext &C) const;
+
+private:
+ /// Evaluate function \code void ErrnoTesterChecker_setErrno(int) \endcode.
+ /// Set value of \c errno to the argument.
+ static void evalSetErrno(CheckerContext &C, const CallEvent &Call);
+ /// Evaluate function \code int ErrnoTesterChecker_getErrno() \endcode.
+ /// Return the value of \c errno.
+ static void evalGetErrno(CheckerContext &C, const CallEvent &Call);
+ /// Evaluate function \code int ErrnoTesterChecker_setErrnoIfError() \endcode.
+ /// Simulate a standard library function tha returns 0 on success and 1 on
+ /// failure. On the success case \c errno is not allowed to be used (may be
+ /// undefined). On the failure case \c errno is set to a fixed value 11 and
+ /// is not needed to be checked.
+ static void evalSetErrnoIfError(CheckerContext &C, const CallEvent &Call);
+ /// Evaluate function \code int ErrnoTesterChecker_setErrnoIfErrorRange()
+ /// \endcode. Same as \c ErrnoTesterChecker_setErrnoIfError but \c errno is
+ /// set to a range (to be nonzero) at the failure case.
+ static void evalSetErrnoIfErrorRange(CheckerContext &C,
+ const CallEvent &Call);
+ /// Evaluate function \code int ErrnoTesterChecker_setErrnoCheckState()
+ /// \endcode. This function simulates the following:
+ /// - Return 0 and leave \c errno with undefined value.
+ /// This is the case of a successful standard function call.
+ /// For example if \c ftell returns not -1.
+ /// - Return 1 and sets \c errno to a specific error code (1).
+ /// This is the case of a failed standard function call.
+ /// The function indicates the failure by a special return value
+ /// that is returned only at failure.
+ /// \c errno can be checked but it is not required.
+ /// For example if \c ftell returns -1.
+ /// - Return 2 and may set errno to a value (actually it does not set it).
+ /// This is the case of a standard function call where the failure can only
+ /// be checked by reading from \c errno. The value of \c errno is changed by
+ /// the function only at failure, the user should set \c errno to 0 before
+ /// the call (\c ErrnoChecker does not check for this rule).
+ /// \c strtol is an example of this case, if it returns \c LONG_MIN (or
+ /// \c LONG_MAX). This case applies only if \c LONG_MIN or \c LONG_MAX is
+ /// returned, otherwise the first case in this list applies.
+ static void evalSetErrnoCheckState(CheckerContext &C, const CallEvent &Call);
+
+ using EvalFn = std::function<void(CheckerContext &, const CallEvent &)>;
+ const CallDescriptionMap<EvalFn> TestCalls{
+ {{"ErrnoTesterChecker_setErrno", 1}, &ErrnoTesterChecker::evalSetErrno},
+ {{"ErrnoTesterChecker_getErrno", 0}, &ErrnoTesterChecker::evalGetErrno},
+ {{"ErrnoTesterChecker_setErrnoIfError", 0},
+ &ErrnoTesterChecker::evalSetErrnoIfError},
+ {{"ErrnoTesterChecker_setErrnoIfErrorRange", 0},
+ &ErrnoTesterChecker::evalSetErrnoIfErrorRange},
+ {{"ErrnoTesterChecker_setErrnoCheckState", 0},
+ &ErrnoTesterChecker::evalSetErrnoCheckState}};
+};
+
+} // namespace
+
+void ErrnoTesterChecker::evalSetErrno(CheckerContext &C,
+ const CallEvent &Call) {
+ C.addTransition(setErrnoValue(C.getState(), C.getLocationContext(),
+ Call.getArgSVal(0), Irrelevant));
+}
+
+void ErrnoTesterChecker::evalGetErrno(CheckerContext &C,
+ const CallEvent &Call) {
+ ProgramStateRef State = C.getState();
+
+ Optional<SVal> ErrnoVal = getErrnoValue(State);
+ assert(ErrnoVal && "Errno value should be available.");
+ State =
+ State->BindExpr(Call.getOriginExpr(), C.getLocationContext(), *ErrnoVal);
+
+ C.addTransition(State);
+}
+
+void ErrnoTesterChecker::evalSetErrnoIfError(CheckerContext &C,
+ const CallEvent &Call) {
+ ProgramStateRef State = C.getState();
+ SValBuilder &SVB = C.getSValBuilder();
+
+ ProgramStateRef StateSuccess = State->BindExpr(
+ Call.getOriginExpr(), C.getLocationContext(), SVB.makeIntVal(0, true));
+ StateSuccess = setErrnoState(StateSuccess, MustNotBeChecked);
+
+ ProgramStateRef StateFailure = State->BindExpr(
+ Call.getOriginExpr(), C.getLocationContext(), SVB.makeIntVal(1, true));
+ StateFailure = setErrnoValue(StateFailure, C, 11, Irrelevant);
+
+ C.addTransition(StateSuccess);
+ C.addTransition(StateFailure);
+}
+
+void ErrnoTesterChecker::evalSetErrnoIfErrorRange(CheckerContext &C,
+ const CallEvent &Call) {
+ ProgramStateRef State = C.getState();
+ SValBuilder &SVB = C.getSValBuilder();
+
+ ProgramStateRef StateSuccess = State->BindExpr(
+ Call.getOriginExpr(), C.getLocationContext(), SVB.makeIntVal(0, true));
+ StateSuccess = setErrnoState(StateSuccess, MustNotBeChecked);
+
+ ProgramStateRef StateFailure = State->BindExpr(
+ Call.getOriginExpr(), C.getLocationContext(), SVB.makeIntVal(1, true));
+ DefinedOrUnknownSVal ErrnoVal = SVB.conjureSymbolVal(
+ nullptr, Call.getOriginExpr(), C.getLocationContext(), C.blockCount());
+ StateFailure = StateFailure->assume(ErrnoVal, true);
+ assert(StateFailure && "Failed to assume on an initial value.");
+ StateFailure =
+ setErrnoValue(StateFailure, C.getLocationContext(), ErrnoVal, Irrelevant);
+
+ C.addTransition(StateSuccess);
+ C.addTransition(StateFailure);
+}
+
+void ErrnoTesterChecker::evalSetErrnoCheckState(CheckerContext &C,
+ const CallEvent &Call) {
+ ProgramStateRef State = C.getState();
+ SValBuilder &SVB = C.getSValBuilder();
+
+ ProgramStateRef StateSuccess = State->BindExpr(
+ Call.getOriginExpr(), C.getLocationContext(), SVB.makeIntVal(0, true));
+ StateSuccess = setErrnoState(StateSuccess, MustNotBeChecked);
+
+ ProgramStateRef StateFailure1 = State->BindExpr(
+ Call.getOriginExpr(), C.getLocationContext(), SVB.makeIntVal(1, true));
+ StateFailure1 = setErrnoValue(StateFailure1, C, 1, Irrelevant);
+
+ ProgramStateRef StateFailure2 = State->BindExpr(
+ Call.getOriginExpr(), C.getLocationContext(), SVB.makeIntVal(2, true));
+ StateFailure2 = setErrnoValue(StateFailure2, C, 2, MustBeChecked);
+
+ C.addTransition(StateSuccess,
+ getErrnoNoteTag(C, "Assuming that this function succeeds but "
+ "sets 'errno' to an unspecified value."));
+ C.addTransition(StateFailure1);
+ C.addTransition(
+ StateFailure2,
+ getErrnoNoteTag(C, "Assuming that this function returns 2. 'errno' "
+ "should be checked to test for failure."));
+}
+
+bool ErrnoTesterChecker::evalCall(const CallEvent &Call,
+ CheckerContext &C) const {
+ const EvalFn *Fn = TestCalls.lookup(Call);
+ if (Fn) {
+ (*Fn)(C, Call);
+ return C.isDifferent();
+ }
+ return false;
+}
+
+void ento::registerErrnoTesterChecker(CheckerManager &Mgr) {
+ Mgr.registerChecker<ErrnoTesterChecker>();
+}
+
+bool ento::shouldRegisterErrnoTesterChecker(const CheckerManager &Mgr) {
+ return true;
+}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
index 2ce1bef6d228..1c33648b2b32 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ExprInspectionChecker.cpp
@@ -6,10 +6,10 @@
//
//===----------------------------------------------------------------------===//
-#include "Taint.h"
#include "clang/Analysis/IssueHash.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Checkers/SValExplainer.h"
+#include "clang/StaticAnalyzer/Checkers/Taint.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
@@ -40,6 +40,7 @@ class ExprInspectionChecker
void analyzerNumTimesReached(const CallExpr *CE, CheckerContext &C) const;
void analyzerCrash(const CallExpr *CE, CheckerContext &C) const;
void analyzerWarnOnDeadSymbol(const CallExpr *CE, CheckerContext &C) const;
+ void analyzerDumpSValType(const CallExpr *CE, CheckerContext &C) const;
void analyzerDump(const CallExpr *CE, CheckerContext &C) const;
void analyzerExplain(const CallExpr *CE, CheckerContext &C) const;
void analyzerPrintState(const CallExpr *CE, CheckerContext &C) const;
@@ -98,6 +99,8 @@ bool ExprInspectionChecker::evalCall(const CallEvent &Call,
&ExprInspectionChecker::analyzerDumpExtent)
.Case("clang_analyzer_dumpElementCount",
&ExprInspectionChecker::analyzerDumpElementCount)
+ .StartsWith("clang_analyzer_dumpSvalType",
+ &ExprInspectionChecker::analyzerDumpSValType)
.StartsWith("clang_analyzer_dump",
&ExprInspectionChecker::analyzerDump)
.Case("clang_analyzer_getExtent",
@@ -255,6 +258,16 @@ void ExprInspectionChecker::analyzerExplain(const CallExpr *CE,
reportBug(Ex.Visit(V), C);
}
+void ExprInspectionChecker::analyzerDumpSValType(const CallExpr *CE,
+ CheckerContext &C) const {
+ const Expr *Arg = getArgExpr(CE, C);
+ if (!Arg)
+ return;
+
+ QualType Ty = C.getSVal(Arg).getType(C.getASTContext());
+ reportBug(Ty.getAsString(), C);
+}
+
void ExprInspectionChecker::analyzerDump(const CallExpr *CE,
CheckerContext &C) const {
const Expr *Arg = getArgExpr(CE, C);
@@ -461,6 +474,14 @@ public:
return None;
}
+ Optional<std::string> VisitUnarySymExpr(const UnarySymExpr *S) {
+ if (Optional<std::string> Str = lookup(S))
+ return Str;
+ if (Optional<std::string> Str = Visit(S->getOperand()))
+ return (UnaryOperator::getOpcodeStr(S->getOpcode()) + *Str).str();
+ return None;
+ }
+
Optional<std::string> VisitSymbolCast(const SymbolCast *S) {
if (Optional<std::string> Str = lookup(S))
return Str;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/FuchsiaHandleChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/FuchsiaHandleChecker.cpp
index 6e4801aa8e91..eb3b89ee6c5c 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/FuchsiaHandleChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/FuchsiaHandleChecker.cpp
@@ -254,9 +254,6 @@ static const ExplodedNode *getAcquireSite(const ExplodedNode *N, SymbolRef Sym,
namespace {
class FuchsiaHandleSymbolVisitor final : public SymbolVisitor {
public:
- FuchsiaHandleSymbolVisitor(ProgramStateRef State) : State(std::move(State)) {}
- ProgramStateRef getState() const { return State; }
-
bool VisitSymbol(SymbolRef S) override {
if (const auto *HandleType = S->getType()->getAs<TypedefType>())
if (HandleType->getDecl()->getName() == HandleTypeName)
@@ -268,7 +265,6 @@ public:
private:
SmallVector<SymbolRef, 1024> Symbols;
- ProgramStateRef State;
};
} // end anonymous namespace
@@ -284,7 +280,7 @@ getFuchsiaHandleSymbols(QualType QT, SVal Arg, ProgramStateRef State) {
if (QT->isStructureType()) {
// If we see a structure, see if there is any handle referenced by the
// structure.
- FuchsiaHandleSymbolVisitor Visitor(State);
+ FuchsiaHandleSymbolVisitor Visitor;
State->scanReachableSymbols(Arg, Visitor);
return Visitor.GetSymbols();
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GTestChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GTestChecker.cpp
index 8d9afbe88aa8..dbfdff4d2a3b 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GTestChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GTestChecker.cpp
@@ -135,7 +135,7 @@ void GTestChecker::modelAssertionResultBoolConstructor(
SVal BooleanArgVal = Call->getArgSVal(0);
if (IsRef) {
// The argument is a reference, so load from it to get the boolean value.
- if (!BooleanArgVal.getAs<Loc>())
+ if (!isa<Loc>(BooleanArgVal))
return;
BooleanArgVal = C.getState()->getSVal(BooleanArgVal.castAs<Loc>());
}
@@ -260,7 +260,7 @@ SVal GTestChecker::getAssertionResultSuccessFieldValue(
Optional<Loc> FieldLoc =
State->getLValue(SuccessField, Instance).getAs<Loc>();
- if (!FieldLoc.hasValue())
+ if (!FieldLoc)
return UnknownVal();
return State->getSVal(*FieldLoc);
@@ -270,20 +270,17 @@ SVal GTestChecker::getAssertionResultSuccessFieldValue(
ProgramStateRef GTestChecker::assumeValuesEqual(SVal Val1, SVal Val2,
ProgramStateRef State,
CheckerContext &C) {
- if (!Val1.getAs<DefinedOrUnknownSVal>() ||
- !Val2.getAs<DefinedOrUnknownSVal>())
+ auto DVal1 = Val1.getAs<DefinedOrUnknownSVal>();
+ auto DVal2 = Val2.getAs<DefinedOrUnknownSVal>();
+ if (!DVal1 || !DVal2)
return State;
auto ValuesEqual =
- C.getSValBuilder().evalEQ(State, Val1.castAs<DefinedOrUnknownSVal>(),
- Val2.castAs<DefinedOrUnknownSVal>());
-
- if (!ValuesEqual.getAs<DefinedSVal>())
+ C.getSValBuilder().evalEQ(State, *DVal1, *DVal2).getAs<DefinedSVal>();
+ if (!ValuesEqual)
return State;
- State = C.getConstraintManager().assume(
- State, ValuesEqual.castAs<DefinedSVal>(), true);
-
+ State = C.getConstraintManager().assume(State, *ValuesEqual, true);
return State;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
index e2209e3debfd..f0a114801dda 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/GenericTaintChecker.cpp
@@ -14,11 +14,11 @@
//
//===----------------------------------------------------------------------===//
-#include "Taint.h"
#include "Yaml.h"
#include "clang/AST/Attr.h"
#include "clang/Basic/Builtins.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Checkers/Taint.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
@@ -32,10 +32,14 @@
#include <memory>
#include <utility>
+#define DEBUG_TYPE "taint-checker"
+
using namespace clang;
using namespace ento;
using namespace taint;
+using llvm::ImmutableSet;
+
namespace {
class GenericTaintChecker;
@@ -88,10 +92,8 @@ bool isStdin(SVal Val, const ASTContext &ACtx) {
return false;
// Get it's symbol and find the declaration region it's pointing to.
- const auto *Sm = dyn_cast<SymbolRegionValue>(SymReg->getSymbol());
- if (!Sm)
- return false;
- const auto *DeclReg = dyn_cast<DeclRegion>(Sm->getRegion());
+ const auto *DeclReg =
+ dyn_cast_or_null<DeclRegion>(SymReg->getSymbol()->getOriginRegion());
if (!DeclReg)
return false;
@@ -150,7 +152,7 @@ Optional<SVal> getTaintedPointeeOrPointer(const CheckerContext &C, SVal Arg) {
bool isTaintedOrPointsToTainted(const Expr *E, const ProgramStateRef &State,
CheckerContext &C) {
- return getTaintedPointeeOrPointer(C, C.getSVal(E)).hasValue();
+ return getTaintedPointeeOrPointer(C, C.getSVal(E)).has_value();
}
/// ArgSet is used to describe arguments relevant for taint detection or
@@ -172,15 +174,6 @@ public:
bool isEmpty() const { return DiscreteArgs.empty() && !VariadicIndex; }
- ArgVecTy ArgsUpTo(ArgIdxTy LastArgIdx) const {
- ArgVecTy Args;
- for (ArgIdxTy I = ReturnValueIndex; I <= LastArgIdx; ++I) {
- if (contains(I))
- Args.push_back(I);
- }
- return Args;
- }
-
private:
ArgVecTy DiscreteArgs;
Optional<ArgIdxTy> VariadicIndex;
@@ -336,11 +329,6 @@ private:
class GenericTaintChecker : public Checker<check::PreCall, check::PostCall> {
public:
- static void *getTag() {
- static int Tag;
- return &Tag;
- }
-
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
void checkPostCall(const CallEvent &Call, CheckerContext &C) const;
@@ -432,7 +420,9 @@ template <> struct ScalarEnumerationTraits<TaintConfiguration::VariadicType> {
/// to the call post-visit. The values are signed integers, which are either
/// ReturnValueIndex, or indexes of the pointer/reference argument, which
/// points to data, which should be tainted on return.
-REGISTER_SET_WITH_PROGRAMSTATE(TaintArgsOnPostVisit, ArgIdxTy)
+REGISTER_MAP_WITH_PROGRAMSTATE(TaintArgsOnPostVisit, const LocationContext *,
+ ImmutableSet<ArgIdxTy>)
+REGISTER_SET_FACTORY_WITH_PROGRAMSTATE(ArgIdxFactory, ArgIdxTy)
void GenericTaintRuleParser::validateArgVector(const std::string &Option,
const ArgVecTy &Args) const {
@@ -544,8 +534,27 @@ void GenericTaintChecker::initTaintRules(CheckerContext &C) const {
{{"getchar"}, TR::Source({{ReturnValueIndex}})},
{{"getchar_unlocked"}, TR::Source({{ReturnValueIndex}})},
{{"gets"}, TR::Source({{0}, ReturnValueIndex})},
+ {{"gets_s"}, TR::Source({{0}, ReturnValueIndex})},
{{"scanf"}, TR::Source({{}, 1})},
+ {{"scanf_s"}, TR::Source({{}, {1}})},
{{"wgetch"}, TR::Source({{}, ReturnValueIndex})},
+ // Sometimes the line between taint sources and propagators is blurry.
+ // _IO_getc is choosen to be a source, but could also be a propagator.
+ // This way it is simpler, as modeling it as a propagator would require
+ // to model the possible sources of _IO_FILE * values, which the _IO_getc
+ // function takes as parameters.
+ {{"_IO_getc"}, TR::Source({{ReturnValueIndex}})},
+ {{"getcwd"}, TR::Source({{0, ReturnValueIndex}})},
+ {{"getwd"}, TR::Source({{0, ReturnValueIndex}})},
+ {{"readlink"}, TR::Source({{1, ReturnValueIndex}})},
+ {{"readlinkat"}, TR::Source({{2, ReturnValueIndex}})},
+ {{"get_current_dir_name"}, TR::Source({{ReturnValueIndex}})},
+ {{"gethostname"}, TR::Source({{0}})},
+ {{"getnameinfo"}, TR::Source({{2, 4}})},
+ {{"getseuserbyname"}, TR::Source({{1, 2}})},
+ {{"getgroups"}, TR::Source({{1, ReturnValueIndex}})},
+ {{"getlogin"}, TR::Source({{ReturnValueIndex}})},
+ {{"getlogin_r"}, TR::Source({{0}})},
// Props
{{"atoi"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
@@ -553,9 +562,11 @@ void GenericTaintChecker::initTaintRules(CheckerContext &C) const {
{{"atoll"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
{{"fgetc"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
{{"fgetln"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
- {{"fgets"}, TR::Prop({{2}}, {{0}, ReturnValueIndex})},
+ {{"fgets"}, TR::Prop({{2}}, {{0, ReturnValueIndex}})},
{{"fscanf"}, TR::Prop({{0}}, {{}, 2})},
+ {{"fscanf_s"}, TR::Prop({{0}}, {{}, {2}})},
{{"sscanf"}, TR::Prop({{0}}, {{}, 2})},
+
{{"getc"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
{{"getc_unlocked"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
{{"getdelim"}, TR::Prop({{3}}, {{0}})},
@@ -567,6 +578,78 @@ void GenericTaintChecker::initTaintRules(CheckerContext &C) const {
{{"strrchr"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
{{"tolower"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
{{"toupper"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{"fread"}, TR::Prop({{3}}, {{0, ReturnValueIndex}})},
+ {{"recv"}, TR::Prop({{0}}, {{1, ReturnValueIndex}})},
+ {{"recvfrom"}, TR::Prop({{0}}, {{1, ReturnValueIndex}})},
+
+ {{"ttyname"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{"ttyname_r"}, TR::Prop({{0}}, {{1, ReturnValueIndex}})},
+
+ {{"basename"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{"dirname"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{"fnmatch"}, TR::Prop({{1}}, {{ReturnValueIndex}})},
+ {{"memchr"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{"memrchr"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{"rawmemchr"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+
+ {{"mbtowc"}, TR::Prop({{1}}, {{0, ReturnValueIndex}})},
+ {{"wctomb"}, TR::Prop({{1}}, {{0, ReturnValueIndex}})},
+ {{"wcwidth"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+
+ {{"memcmp"}, TR::Prop({{0, 1}}, {{ReturnValueIndex}})},
+ {{"memcpy"}, TR::Prop({{1}}, {{0, ReturnValueIndex}})},
+ {{"memmove"}, TR::Prop({{1}}, {{0, ReturnValueIndex}})},
+ // If memmem was called with a tainted needle and the search was
+ // successful, that would mean that the value pointed by the return value
+ // has the same content as the needle. If we choose to go by the policy of
+ // content equivalence implies taintedness equivalence, that would mean
+ // haystack should be considered a propagation source argument.
+ {{"memmem"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+
+ // The comment for memmem above also applies to strstr.
+ {{"strstr"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{"strcasestr"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+
+ {{"strchrnul"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+
+ {{"index"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{"rindex"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+
+ // FIXME: In case of arrays, only the first element of the array gets
+ // tainted.
+ {{"qsort"}, TR::Prop({{0}}, {{0}})},
+ {{"qsort_r"}, TR::Prop({{0}}, {{0}})},
+
+ {{"strcmp"}, TR::Prop({{0, 1}}, {{ReturnValueIndex}})},
+ {{"strcasecmp"}, TR::Prop({{0, 1}}, {{ReturnValueIndex}})},
+ {{"strncmp"}, TR::Prop({{0, 1, 2}}, {{ReturnValueIndex}})},
+ {{"strncasecmp"}, TR::Prop({{0, 1, 2}}, {{ReturnValueIndex}})},
+ {{"strspn"}, TR::Prop({{0, 1}}, {{ReturnValueIndex}})},
+ {{"strcspn"}, TR::Prop({{0, 1}}, {{ReturnValueIndex}})},
+ {{"strpbrk"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{"strndup"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{"strndupa"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{"strlen"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{"strnlen"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{"strtol"}, TR::Prop({{0}}, {{1, ReturnValueIndex}})},
+ {{"strtoll"}, TR::Prop({{0}}, {{1, ReturnValueIndex}})},
+ {{"strtoul"}, TR::Prop({{0}}, {{1, ReturnValueIndex}})},
+ {{"strtoull"}, TR::Prop({{0}}, {{1, ReturnValueIndex}})},
+
+ {{"isalnum"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{"isalpha"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{"isascii"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{"isblank"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{"iscntrl"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{"isdigit"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{"isgraph"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{"islower"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{"isprint"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{"ispunct"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{"isspace"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{"isupper"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+ {{"isxdigit"}, TR::Prop({{0}}, {{ReturnValueIndex}})},
+
{{CDF_MaybeBuiltin, {BI.getName(Builtin::BIstrncat)}},
TR::Prop({{1, 2}}, {{0, ReturnValueIndex}})},
{{CDF_MaybeBuiltin, {BI.getName(Builtin::BIstrlcpy)}},
@@ -626,7 +709,7 @@ void GenericTaintChecker::initTaintRules(CheckerContext &C) const {
if (TR::UntrustedEnv(C)) {
// void setproctitle_init(int argc, char *argv[], char *envp[])
GlobalCRules.push_back(
- {{{"setproctitle_init"}}, TR::Sink({{2}}, MsgCustomSink)});
+ {{{"setproctitle_init"}}, TR::Sink({{1, 2}}, MsgCustomSink)});
GlobalCRules.push_back({{"getenv"}, TR::Source({{ReturnValueIndex}})});
}
@@ -649,7 +732,7 @@ void GenericTaintChecker::initTaintRules(CheckerContext &C) const {
}
GenericTaintRuleParser::RulesContTy Rules{
- ConfigParser.parseConfiguration(Option, std::move(Config.getValue()))};
+ ConfigParser.parseConfiguration(Option, std::move(*Config))};
DynamicTaintRules.emplace(std::make_move_iterator(Rules.begin()),
std::make_move_iterator(Rules.end()));
@@ -683,15 +766,26 @@ void GenericTaintChecker::checkPostCall(const CallEvent &Call,
// Set the marked values as tainted. The return value only accessible from
// checkPostStmt.
ProgramStateRef State = C.getState();
+ const StackFrameContext *CurrentFrame = C.getStackFrame();
// Depending on what was tainted at pre-visit, we determined a set of
// arguments which should be tainted after the function returns. These are
// stored in the state as TaintArgsOnPostVisit set.
- TaintArgsOnPostVisitTy TaintArgs = State->get<TaintArgsOnPostVisit>();
- if (TaintArgs.isEmpty())
+ TaintArgsOnPostVisitTy TaintArgsMap = State->get<TaintArgsOnPostVisit>();
+
+ const ImmutableSet<ArgIdxTy> *TaintArgs = TaintArgsMap.lookup(CurrentFrame);
+ if (!TaintArgs)
return;
+ assert(!TaintArgs->isEmpty());
+
+ LLVM_DEBUG(for (ArgIdxTy I
+ : *TaintArgs) {
+ llvm::dbgs() << "PostCall<";
+ Call.dump(llvm::dbgs());
+ llvm::dbgs() << "> actually wants to taint arg index: " << I << '\n';
+ });
- for (ArgIdxTy ArgNum : TaintArgs) {
+ for (ArgIdxTy ArgNum : *TaintArgs) {
// Special handling for the tainted return value.
if (ArgNum == ReturnValueIndex) {
State = addTaint(State, Call.getReturnValue());
@@ -705,7 +799,7 @@ void GenericTaintChecker::checkPostCall(const CallEvent &Call,
}
// Clear up the taint info from the state.
- State = State->remove<TaintArgsOnPostVisit>();
+ State = State->remove<TaintArgsOnPostVisit>(CurrentFrame);
C.addTransition(State);
}
@@ -730,7 +824,7 @@ void GenericTaintRule::process(const GenericTaintChecker &Checker,
/// Check for taint sinks.
ForEachCallArg([this, &Checker, &C, &State](ArgIdxTy I, const Expr *E, SVal) {
if (SinkArgs.contains(I) && isTaintedOrPointsToTainted(E, State, C))
- Checker.generateReportIfTainted(E, SinkMsg.getValueOr(MsgCustomSink), C);
+ Checker.generateReportIfTainted(E, SinkMsg.value_or(MsgCustomSink), C);
});
/// Check for taint filters.
@@ -756,7 +850,7 @@ void GenericTaintRule::process(const GenericTaintChecker &Checker,
return;
const auto WouldEscape = [](SVal V, QualType Ty) -> bool {
- if (!V.getAs<Loc>())
+ if (!isa<Loc>(V))
return false;
const bool IsNonConstRef = Ty->isReferenceType() && !Ty.isConstQualified();
@@ -767,18 +861,32 @@ void GenericTaintRule::process(const GenericTaintChecker &Checker,
};
/// Propagate taint where it is necessary.
+ auto &F = State->getStateManager().get_context<ArgIdxFactory>();
+ ImmutableSet<ArgIdxTy> Result = F.getEmptySet();
ForEachCallArg(
- [this, &State, WouldEscape](ArgIdxTy I, const Expr *E, SVal V) {
- if (PropDstArgs.contains(I))
- State = State->add<TaintArgsOnPostVisit>(I);
+ [&](ArgIdxTy I, const Expr *E, SVal V) {
+ if (PropDstArgs.contains(I)) {
+ LLVM_DEBUG(llvm::dbgs() << "PreCall<"; Call.dump(llvm::dbgs());
+ llvm::dbgs()
+ << "> prepares tainting arg index: " << I << '\n';);
+ Result = F.add(Result, I);
+ }
// TODO: We should traverse all reachable memory regions via the
// escaping parameter. Instead of doing that we simply mark only the
// referred memory region as tainted.
- if (WouldEscape(V, E->getType()))
- State = State->add<TaintArgsOnPostVisit>(I);
+ if (WouldEscape(V, E->getType())) {
+ LLVM_DEBUG(if (!Result.contains(I)) {
+ llvm::dbgs() << "PreCall<";
+ Call.dump(llvm::dbgs());
+ llvm::dbgs() << "> prepares tainting arg index: " << I << '\n';
+ });
+ Result = F.add(Result, I);
+ }
});
+ if (!Result.isEmpty())
+ State = State->set<TaintArgsOnPostVisit>(C.getStackFrame(), Result);
C.addTransition(State);
}
@@ -869,11 +977,14 @@ void GenericTaintChecker::taintUnsafeSocketProtocol(const CallEvent &Call,
if (SafeProtocol)
return;
- C.addTransition(C.getState()->add<TaintArgsOnPostVisit>(ReturnValueIndex));
+ ProgramStateRef State = C.getState();
+ auto &F = State->getStateManager().get_context<ArgIdxFactory>();
+ ImmutableSet<ArgIdxTy> Result = F.add(F.getEmptySet(), ReturnValueIndex);
+ State = State->set<TaintArgsOnPostVisit>(C.getStackFrame(), Result);
+ C.addTransition(State);
}
/// Checker registration
-
void ento::registerGenericTaintChecker(CheckerManager &Mgr) {
Mgr.registerChecker<GenericTaintChecker>();
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp
index 6f9867b9607d..895212d134b8 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/InnerPointerChecker.cpp
@@ -35,9 +35,9 @@ namespace {
class InnerPointerChecker
: public Checker<check::DeadSymbols, check::PostCall> {
- CallDescription AppendFn, AssignFn, AddressofFn, ClearFn, CStrFn, DataFn,
- DataMemberFn, EraseFn, InsertFn, PopBackFn, PushBackFn, ReplaceFn,
- ReserveFn, ResizeFn, ShrinkToFitFn, SwapFn;
+ CallDescription AppendFn, AssignFn, AddressofFn, AddressofFn_, ClearFn,
+ CStrFn, DataFn, DataMemberFn, EraseFn, InsertFn, PopBackFn, PushBackFn,
+ ReplaceFn, ReserveFn, ResizeFn, ShrinkToFitFn, SwapFn;
public:
class InnerPointerBRVisitor : public BugReporterVisitor {
@@ -74,7 +74,7 @@ public:
InnerPointerChecker()
: AppendFn({"std", "basic_string", "append"}),
AssignFn({"std", "basic_string", "assign"}),
- AddressofFn({"std", "addressof"}),
+ AddressofFn({"std", "addressof"}), AddressofFn_({"std", "__addressof"}),
ClearFn({"std", "basic_string", "clear"}),
CStrFn({"std", "basic_string", "c_str"}), DataFn({"std", "data"}, 1),
DataMemberFn({"std", "basic_string", "data"}),
@@ -179,9 +179,9 @@ void InnerPointerChecker::checkFunctionArguments(const CallEvent &Call,
if (!ArgRegion)
continue;
- // std::addressof function accepts a non-const reference as an argument,
+ // std::addressof functions accepts a non-const reference as an argument,
// but doesn't modify it.
- if (AddressofFn.matches(Call))
+ if (matchesAny(Call, AddressofFn, AddressofFn_))
continue;
markPtrSymbolsReleased(Call, State, ArgRegion, C);
@@ -320,8 +320,7 @@ PathDiagnosticPieceRef InnerPointerChecker::InnerPointerBRVisitor::VisitNode(
SmallString<256> Buf;
llvm::raw_svector_ostream OS(Buf);
- OS << "Pointer to inner buffer of '" << ObjTy.getAsString()
- << "' obtained here";
+ OS << "Pointer to inner buffer of '" << ObjTy << "' obtained here";
PathDiagnosticLocation Pos(S, BRC.getSourceManager(),
N->getLocationContext());
return std::make_shared<PathDiagnosticEventPiece>(Pos, OS.str(), true);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Iterator.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Iterator.cpp
index 496190149991..38ed9e702db4 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Iterator.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Iterator.cpp
@@ -308,8 +308,8 @@ bool compare(ProgramStateRef State, NonLoc NL1, NonLoc NL2,
const auto comparison =
SVB.evalBinOp(State, Opc, NL1, NL2, SVB.getConditionType());
- assert(comparison.getAs<DefinedSVal>() &&
- "Symbol comparison must be a `DefinedSVal`");
+ assert(isa<DefinedSVal>(comparison) &&
+ "Symbol comparison must be a `DefinedSVal`");
return !State->assume(comparison.castAs<DefinedSVal>(), false);
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Iterator.h b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Iterator.h
index 37157492fe3e..353daf0bed08 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Iterator.h
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Iterator.h
@@ -63,9 +63,7 @@ public:
return Cont == X.Cont && Valid == X.Valid && Offset == X.Offset;
}
- bool operator!=(const IteratorPosition &X) const {
- return Cont != X.Cont || Valid != X.Valid || Offset != X.Offset;
- }
+ bool operator!=(const IteratorPosition &X) const { return !(*this == X); }
void Profile(llvm::FoldingSetNodeID &ID) const {
ID.AddPointer(Cont);
@@ -101,9 +99,7 @@ public:
return Begin == X.Begin && End == X.End;
}
- bool operator!=(const ContainerData &X) const {
- return Begin != X.Begin || End != X.End;
- }
+ bool operator!=(const ContainerData &X) const { return !(*this == X); }
void Profile(llvm::FoldingSetNodeID &ID) const {
ID.Add(Begin);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp
index 235c9010412a..80431e65519e 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IteratorModeling.cpp
@@ -151,8 +151,6 @@ public:
void checkBind(SVal Loc, SVal Val, const Stmt *S, CheckerContext &C) const;
void checkPostStmt(const UnaryOperator *UO, CheckerContext &C) const;
void checkPostStmt(const BinaryOperator *BO, CheckerContext &C) const;
- void checkPostStmt(const CXXConstructExpr *CCE, CheckerContext &C) const;
- void checkPostStmt(const DeclStmt *DS, CheckerContext &C) const;
void checkPostStmt(const MaterializeTemporaryExpr *MTE,
CheckerContext &C) const;
void checkLiveSymbols(ProgramStateRef State, SymbolReaper &SR) const;
@@ -631,7 +629,7 @@ void IteratorModeling::handlePtrIncrOrDecr(CheckerContext &C,
const Expr *Iterator,
OverloadedOperatorKind OK,
SVal Offset) const {
- if (!Offset.getAs<DefinedSVal>())
+ if (!isa<DefinedSVal>(Offset))
return;
QualType PtrType = Iterator->getType();
@@ -801,8 +799,8 @@ ProgramStateRef relateSymbols(ProgramStateRef State, SymbolRef Sym1,
SVB.evalBinOp(State, BO_EQ, nonloc::SymbolVal(Sym1),
nonloc::SymbolVal(Sym2), SVB.getConditionType());
- assert(comparison.getAs<DefinedSVal>() &&
- "Symbol comparison must be a `DefinedSVal`");
+ assert(isa<DefinedSVal>(comparison) &&
+ "Symbol comparison must be a `DefinedSVal`");
auto NewState = State->assume(comparison.castAs<DefinedSVal>(), Equal);
if (!NewState)
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp
index 3e6756efe0e6..ce334b18386e 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/IvarInvalidationChecker.cpp
@@ -44,9 +44,9 @@ using namespace ento;
namespace {
struct ChecksFilter {
/// Check for missing invalidation method declarations.
- DefaultBool check_MissingInvalidationMethod;
+ bool check_MissingInvalidationMethod = false;
/// Check that all ivars are invalidated.
- DefaultBool check_InstanceVariableInvalidation;
+ bool check_InstanceVariableInvalidation = false;
CheckerNameRef checkName_MissingInvalidationMethod;
CheckerNameRef checkName_InstanceVariableInvalidation;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
index 1f3d8844d330..fa51aa80216b 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LLVMConventionsChecker.cpp
@@ -273,7 +273,7 @@ void ASTFieldVisitor::ReportError(QualType T) {
os << (*I)->getName();
}
}
- os << " (type " << FieldChain.back()->getType().getAsString() << ")";
+ os << " (type " << FieldChain.back()->getType() << ")";
// Note that this will fire for every translation unit that uses this
// class. This is suboptimal, but at least scan-build will merge
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
index b57c5dc6de56..9870746cbe0c 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/LocalizationChecker.cpp
@@ -92,7 +92,7 @@ public:
// When this parameter is set to true, the checker assumes all
// methods that return NSStrings are unlocalized. Thus, more false
// positives will be reported.
- DefaultBool IsAggressive;
+ bool IsAggressive = false;
void checkPreObjCMessage(const ObjCMethodCall &msg, CheckerContext &C) const;
void checkPostObjCMessage(const ObjCMethodCall &msg, CheckerContext &C) const;
@@ -1005,7 +1005,7 @@ NonLocalizedStringBRVisitor::VisitNode(const ExplodedNode *Succ,
return nullptr;
Optional<StmtPoint> Point = Succ->getLocation().getAs<StmtPoint>();
- if (!Point.hasValue())
+ if (!Point)
return nullptr;
auto *LiteralExpr = dyn_cast<ObjCStringLiteral>(Point->getStmt());
@@ -1145,8 +1145,8 @@ void EmptyLocalizationContextChecker::MethodCrawler::VisitObjCMessageExpr(
Mgr.getSourceManager().getBufferOrNone(SLInfo.first, SL);
if (!BF)
return;
-
- Lexer TheLexer(SL, LangOptions(), BF->getBufferStart(),
+ LangOptions LangOpts;
+ Lexer TheLexer(SL, LangOpts, BF->getBufferStart(),
BF->getBufferStart() + SLInfo.second, BF->getBufferEnd());
Token I;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp
index 5d6bd381d3cc..4c0a8ba2c7c0 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MPI-Checker/MPIChecker.cpp
@@ -165,7 +165,7 @@ void MPIChecker::allRegionsUsedByWait(
Ctx.getState(), SuperRegion, Ctx.getSValBuilder(),
CE.getArgExpr(1)->getType()->getPointeeType());
const llvm::APSInt &ArrSize =
- ElementCount.getAs<nonloc::ConcreteInt>()->getValue();
+ ElementCount.castAs<nonloc::ConcreteInt>().getValue();
for (size_t i = 0; i < ArrSize; ++i) {
const NonLoc Idx = Ctx.getSValBuilder().makeArrayIndex(i);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
index 57080a84451a..92d7cef78b13 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocChecker.cpp
@@ -306,9 +306,9 @@ public:
/// functions might free the memory.
/// In optimistic mode, the checker assumes that all user-defined functions
/// which might free a pointer are annotated.
- DefaultBool ShouldIncludeOwnershipAnnotatedFunctions;
+ bool ShouldIncludeOwnershipAnnotatedFunctions = false;
- DefaultBool ShouldRegisterNoOwnershipChangeVisitor;
+ bool ShouldRegisterNoOwnershipChangeVisitor = false;
/// Many checkers are essentially built into this one, so enabling them will
/// make MallocChecker perform additional modeling and reporting.
@@ -326,7 +326,7 @@ public:
using LeakInfo = std::pair<const ExplodedNode *, const MemRegion *>;
- DefaultBool ChecksEnabled[CK_NumCheckKinds];
+ bool ChecksEnabled[CK_NumCheckKinds] = {false};
CheckerNameRef CheckNames[CK_NumCheckKinds];
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
@@ -398,6 +398,9 @@ private:
};
bool isFreeingCall(const CallEvent &Call) const;
+ static bool isFreeingOwnershipAttrCall(const FunctionDecl *Func);
+
+ friend class NoOwnershipChangeVisitor;
CallDescriptionMap<CheckFn> AllocatingMemFnMap{
{{"alloca", 1}, &MallocChecker::checkAlloca},
@@ -742,7 +745,9 @@ private:
namespace {
class NoOwnershipChangeVisitor final : public NoStateChangeFuncVisitor {
+ // The symbol whose (lack of) ownership change we are interested in.
SymbolRef Sym;
+ const MallocChecker &Checker;
using OwnerSet = llvm::SmallPtrSet<const MemRegion *, 8>;
// Collect which entities point to the allocated memory, and could be
@@ -794,18 +799,57 @@ protected:
return "";
}
+ /// Syntactically checks whether the callee is a deallocating function. Since
+ /// we have no path-sensitive information on this call (we would need a
+ /// CallEvent instead of a CallExpr for that), its possible that a
+ /// deallocation function was called indirectly through a function pointer,
+ /// but we are not able to tell, so this is a best effort analysis.
+ /// See namespace `memory_passed_to_fn_call_free_through_fn_ptr` in
+ /// clang/test/Analysis/NewDeleteLeaks.cpp.
+ bool isFreeingCallAsWritten(const CallExpr &Call) const {
+ if (Checker.FreeingMemFnMap.lookupAsWritten(Call) ||
+ Checker.ReallocatingMemFnMap.lookupAsWritten(Call))
+ return true;
+
+ if (const auto *Func =
+ llvm::dyn_cast_or_null<FunctionDecl>(Call.getCalleeDecl()))
+ return MallocChecker::isFreeingOwnershipAttrCall(Func);
+
+ return false;
+ }
+
+ /// Heuristically guess whether the callee intended to free memory. This is
+ /// done syntactically, because we are trying to argue about alternative
+ /// paths of execution, and as a consequence we don't have path-sensitive
+ /// information.
bool doesFnIntendToHandleOwnership(const Decl *Callee, ASTContext &ACtx) {
using namespace clang::ast_matchers;
const FunctionDecl *FD = dyn_cast<FunctionDecl>(Callee);
- if (!FD)
+
+ // Given that the stack frame was entered, the body should always be
+ // theoretically obtainable. In case of body farms, the synthesized body
+ // is not attached to declaration, thus triggering the '!FD->hasBody()'
+ // branch. That said, would a synthesized body ever intend to handle
+ // ownership? As of today they don't. And if they did, how would we
+ // put notes inside it, given that it doesn't match any source locations?
+ if (!FD || !FD->hasBody())
return false;
- // TODO: Operator delete is hardly the only deallocator -- Can we reuse
- // isFreeingCall() or something thats already here?
- auto Deallocations = match(
- stmt(hasDescendant(cxxDeleteExpr().bind("delete"))
- ), *FD->getBody(), ACtx);
- // TODO: Ownership my change with an attempt to store the allocated memory.
- return !Deallocations.empty();
+
+ auto Matches = match(findAll(stmt(anyOf(cxxDeleteExpr().bind("delete"),
+ callExpr().bind("call")))),
+ *FD->getBody(), ACtx);
+ for (BoundNodes Match : Matches) {
+ if (Match.getNodeAs<CXXDeleteExpr>("delete"))
+ return true;
+
+ if (const auto *Call = Match.getNodeAs<CallExpr>("call"))
+ if (isFreeingCallAsWritten(*Call))
+ return true;
+ }
+ // TODO: Ownership might change with an attempt to store the allocated
+ // memory, not only through deallocation. Check for attempted stores as
+ // well.
+ return false;
}
virtual bool
@@ -874,20 +918,15 @@ protected:
}
public:
- NoOwnershipChangeVisitor(SymbolRef Sym)
- : NoStateChangeFuncVisitor(bugreporter::TrackingKind::Thorough),
- Sym(Sym) {}
+ NoOwnershipChangeVisitor(SymbolRef Sym, const MallocChecker *Checker)
+ : NoStateChangeFuncVisitor(bugreporter::TrackingKind::Thorough), Sym(Sym),
+ Checker(*Checker) {}
void Profile(llvm::FoldingSetNodeID &ID) const override {
static int Tag = 0;
ID.AddPointer(&Tag);
ID.AddPointer(Sym);
}
-
- void *getTag() const {
- static int Tag = 0;
- return static_cast<void *>(&Tag);
- }
};
} // end anonymous namespace
@@ -1061,12 +1100,8 @@ static bool isStandardNewDelete(const FunctionDecl *FD) {
// Methods of MallocChecker and MallocBugVisitor.
//===----------------------------------------------------------------------===//
-bool MallocChecker::isFreeingCall(const CallEvent &Call) const {
- if (FreeingMemFnMap.lookup(Call) || ReallocatingMemFnMap.lookup(Call))
- return true;
-
- const auto *Func = dyn_cast<FunctionDecl>(Call.getDecl());
- if (Func && Func->hasAttrs()) {
+bool MallocChecker::isFreeingOwnershipAttrCall(const FunctionDecl *Func) {
+ if (Func->hasAttrs()) {
for (const auto *I : Func->specific_attrs<OwnershipAttr>()) {
OwnershipAttr::OwnershipKind OwnKind = I->getOwnKind();
if (OwnKind == OwnershipAttr::Takes || OwnKind == OwnershipAttr::Holds)
@@ -1076,6 +1111,16 @@ bool MallocChecker::isFreeingCall(const CallEvent &Call) const {
return false;
}
+bool MallocChecker::isFreeingCall(const CallEvent &Call) const {
+ if (FreeingMemFnMap.lookup(Call) || ReallocatingMemFnMap.lookup(Call))
+ return true;
+
+ if (const auto *Func = dyn_cast_or_null<FunctionDecl>(Call.getDecl()))
+ return isFreeingOwnershipAttrCall(Func);
+
+ return false;
+}
+
bool MallocChecker::isMemCall(const CallEvent &Call) const {
if (FreeingMemFnMap.lookup(Call) || AllocatingMemFnMap.lookup(Call) ||
ReallocatingMemFnMap.lookup(Call))
@@ -1110,7 +1155,7 @@ MallocChecker::performKernelMalloc(const CallEvent &Call, CheckerContext &C,
ASTContext &Ctx = C.getASTContext();
llvm::Triple::OSType OS = Ctx.getTargetInfo().getTriple().getOS();
- if (!KernelZeroFlagVal.hasValue()) {
+ if (!KernelZeroFlagVal) {
if (OS == llvm::Triple::FreeBSD)
KernelZeroFlagVal = 0x0100;
else if (OS == llvm::Triple::NetBSD)
@@ -1137,7 +1182,7 @@ MallocChecker::performKernelMalloc(const CallEvent &Call, CheckerContext &C,
const Expr *FlagsEx = Call.getArgExpr(Call.getNumArgs() - 1);
const SVal V = C.getSVal(FlagsEx);
- if (!V.getAs<NonLoc>()) {
+ if (!isa<NonLoc>(V)) {
// The case where 'V' can be a location can only be due to a bad header,
// so in this case bail out.
return None;
@@ -1193,7 +1238,7 @@ void MallocChecker::checkKernelMalloc(const CallEvent &Call,
ProgramStateRef State = C.getState();
llvm::Optional<ProgramStateRef> MaybeState =
performKernelMalloc(Call, C, State);
- if (MaybeState.hasValue())
+ if (MaybeState)
State = MaybeState.getValue();
else
State = MallocMemAux(C, Call, Call.getArgExpr(0), UndefinedVal(), State,
@@ -1358,8 +1403,8 @@ void MallocChecker::checkGMalloc0(const CallEvent &Call,
void MallocChecker::checkGMemdup(const CallEvent &Call,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
- State = MallocMemAux(C, Call, Call.getArgExpr(1), UndefinedVal(), State,
- AF_Malloc);
+ State =
+ MallocMemAux(C, Call, Call.getArgExpr(1), UnknownVal(), State, AF_Malloc);
State = ProcessZeroAllocCheck(Call, 1, State);
C.addTransition(State);
}
@@ -1862,12 +1907,12 @@ ProgramStateRef MallocChecker::FreeMemAux(
return nullptr;
SVal ArgVal = C.getSVal(ArgExpr);
- if (!ArgVal.getAs<DefinedOrUnknownSVal>())
+ if (!isa<DefinedOrUnknownSVal>(ArgVal))
return nullptr;
DefinedOrUnknownSVal location = ArgVal.castAs<DefinedOrUnknownSVal>();
// Check for null dereferences.
- if (!location.getAs<Loc>())
+ if (!isa<Loc>(location))
return nullptr;
// The explicit NULL case, no operation is performed.
@@ -2173,7 +2218,7 @@ void MallocChecker::HandleNonHeapDealloc(CheckerContext &C, SVal ArgVal,
}
Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(Family);
- if (!CheckKind.hasValue())
+ if (!CheckKind)
return;
if (ExplodedNode *N = C.generateErrorNode()) {
@@ -2306,7 +2351,7 @@ void MallocChecker::HandleOffsetFree(CheckerContext &C, SVal ArgVal,
}
Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(Family);
- if (!CheckKind.hasValue())
+ if (!CheckKind)
return;
ExplodedNode *N = C.generateErrorNode();
@@ -2363,7 +2408,7 @@ void MallocChecker::HandleUseAfterFree(CheckerContext &C, SourceRange Range,
}
Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(C, Sym);
- if (!CheckKind.hasValue())
+ if (!CheckKind)
return;
if (ExplodedNode *N = C.generateErrorNode()) {
@@ -2402,7 +2447,7 @@ void MallocChecker::HandleDoubleFree(CheckerContext &C, SourceRange Range,
}
Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(C, Sym);
- if (!CheckKind.hasValue())
+ if (!CheckKind)
return;
if (ExplodedNode *N = C.generateErrorNode()) {
@@ -2432,7 +2477,7 @@ void MallocChecker::HandleDoubleDelete(CheckerContext &C, SymbolRef Sym) const {
}
Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(C, Sym);
- if (!CheckKind.hasValue())
+ if (!CheckKind)
return;
if (ExplodedNode *N = C.generateErrorNode()) {
@@ -2460,7 +2505,7 @@ void MallocChecker::HandleUseZeroAlloc(CheckerContext &C, SourceRange Range,
Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(C, Sym);
- if (!CheckKind.hasValue())
+ if (!CheckKind)
return;
if (ExplodedNode *N = C.generateErrorNode()) {
@@ -2492,7 +2537,7 @@ void MallocChecker::HandleFunctionPtrFree(CheckerContext &C, SVal ArgVal,
}
Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(Family);
- if (!CheckKind.hasValue())
+ if (!CheckKind)
return;
if (ExplodedNode *N = C.generateErrorNode()) {
@@ -2537,14 +2582,14 @@ MallocChecker::ReallocMemAux(CheckerContext &C, const CallEvent &Call,
const Expr *arg0Expr = CE->getArg(0);
SVal Arg0Val = C.getSVal(arg0Expr);
- if (!Arg0Val.getAs<DefinedOrUnknownSVal>())
+ if (!isa<DefinedOrUnknownSVal>(Arg0Val))
return nullptr;
DefinedOrUnknownSVal arg0Val = Arg0Val.castAs<DefinedOrUnknownSVal>();
SValBuilder &svalBuilder = C.getSValBuilder();
- DefinedOrUnknownSVal PtrEQ =
- svalBuilder.evalEQ(State, arg0Val, svalBuilder.makeNull());
+ DefinedOrUnknownSVal PtrEQ = svalBuilder.evalEQ(
+ State, arg0Val, svalBuilder.makeNullWithType(arg0Expr->getType()));
// Get the size argument.
const Expr *Arg1 = CE->getArg(1);
@@ -2553,13 +2598,14 @@ MallocChecker::ReallocMemAux(CheckerContext &C, const CallEvent &Call,
SVal TotalSize = C.getSVal(Arg1);
if (SuffixWithN)
TotalSize = evalMulForBufferSize(C, Arg1, CE->getArg(2));
- if (!TotalSize.getAs<DefinedOrUnknownSVal>())
+ if (!isa<DefinedOrUnknownSVal>(TotalSize))
return nullptr;
// Compare the size argument to 0.
DefinedOrUnknownSVal SizeZero =
- svalBuilder.evalEQ(State, TotalSize.castAs<DefinedOrUnknownSVal>(),
- svalBuilder.makeIntValWithPtrWidth(0, false));
+ svalBuilder.evalEQ(State, TotalSize.castAs<DefinedOrUnknownSVal>(),
+ svalBuilder.makeIntValWithWidth(
+ svalBuilder.getContext().getSizeType(), 0));
ProgramStateRef StatePtrIsNull, StatePtrNotNull;
std::tie(StatePtrIsNull, StatePtrNotNull) = State->assume(PtrEQ);
@@ -2704,7 +2750,7 @@ void MallocChecker::HandleLeak(SymbolRef Sym, ExplodedNode *N,
Optional<MallocChecker::CheckKind>
CheckKind = getCheckIfTracked(Family, true);
- if (!CheckKind.hasValue())
+ if (!CheckKind)
return;
assert(N);
@@ -2748,7 +2794,7 @@ void MallocChecker::HandleLeak(SymbolRef Sym, ExplodedNode *N,
R->markInteresting(Sym);
R->addVisitor<MallocBugVisitor>(Sym, true);
if (ShouldRegisterNoOwnershipChangeVisitor)
- R->addVisitor<NoOwnershipChangeVisitor>(Sym);
+ R->addVisitor<NoOwnershipChangeVisitor>(Sym, this);
C.emitReport(std::move(R));
}
@@ -2862,7 +2908,7 @@ void MallocChecker::checkPreCall(const CallEvent &Call,
// Check arguments for being used after free.
for (unsigned I = 0, E = Call.getNumArgs(); I != E; ++I) {
SVal ArgSVal = Call.getArgSVal(I);
- if (ArgSVal.getAs<Loc>()) {
+ if (isa<Loc>(ArgSVal)) {
SymbolRef Sym = ArgSVal.getAsSymbol();
if (!Sym)
continue;
@@ -3394,7 +3440,7 @@ PathDiagnosticPieceRef MallocBugVisitor::VisitNode(const ExplodedNode *N,
allocation_state::getContainerObjRegion(statePrev, Sym);
const auto *TypedRegion = cast<TypedValueRegion>(ObjRegion);
QualType ObjTy = TypedRegion->getValueType();
- OS << "Inner buffer of '" << ObjTy.getAsString() << "' ";
+ OS << "Inner buffer of '" << ObjTy << "' ";
if (N->getLocation().getKind() == ProgramPoint::PostImplicitCallKind) {
OS << "deallocated by call to destructor";
@@ -3525,13 +3571,13 @@ void MallocChecker::printState(raw_ostream &Out, ProgramStateRef State,
const RefState *RefS = State->get<RegionState>(I.getKey());
AllocationFamily Family = RefS->getAllocationFamily();
Optional<MallocChecker::CheckKind> CheckKind = getCheckIfTracked(Family);
- if (!CheckKind.hasValue())
- CheckKind = getCheckIfTracked(Family, true);
+ if (!CheckKind)
+ CheckKind = getCheckIfTracked(Family, true);
I.getKey()->dumpToStream(Out);
Out << " : ";
I.getData().dump(Out);
- if (CheckKind.hasValue())
+ if (CheckKind)
Out << " (" << CheckNames[*CheckKind].getName() << ")";
Out << NL;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
index 4b5206a102b8..58ba3dac69ab 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MallocSizeofChecker.cpp
@@ -228,9 +228,9 @@ public:
OS << '\'' << Callee->getIdentifier()->getName() << '\'';
else
OS << "call";
- OS << " is converted to a pointer of type '"
- << PointeeType.getAsString() << "', which is incompatible with "
- << "sizeof operand type '" << SizeofType.getAsString() << "'";
+ OS << " is converted to a pointer of type '" << PointeeType
+ << "', which is incompatible with "
+ << "sizeof operand type '" << SizeofType << "'";
SmallVector<SourceRange, 4> Ranges;
Ranges.push_back(i->AllocCall->getCallee()->getSourceRange());
Ranges.push_back(SFinder.Sizeofs[0]->getSourceRange());
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp
index aa70db041c76..1906ca5c8f55 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MmapWriteExecChecker.cpp
@@ -48,8 +48,8 @@ void MmapWriteExecChecker::checkPreCall(const CallEvent &Call,
CheckerContext &C) const {
if (matchesAny(Call, MmapFn, MprotectFn)) {
SVal ProtVal = Call.getArgSVal(2);
- Optional<nonloc::ConcreteInt> ProtLoc = ProtVal.getAs<nonloc::ConcreteInt>();
- int64_t Prot = ProtLoc->getValue().getSExtValue();
+ auto ProtLoc = ProtVal.castAs<nonloc::ConcreteInt>();
+ int64_t Prot = ProtLoc.getValue().getSExtValue();
if (ProtExecOv != ProtExec)
ProtExec = ProtExecOv;
if (ProtReadOv != ProtRead)
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp
index 4a232c6f4b3f..e78c130a9c22 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/MoveChecker.cpp
@@ -49,7 +49,6 @@ class MoveChecker
: public Checker<check::PreCall, check::PostCall,
check::DeadSymbols, check::RegionChanges> {
public:
- void checkEndFunction(const ReturnStmt *RS, CheckerContext &C) const;
void checkPreCall(const CallEvent &MC, CheckerContext &C) const;
void checkPostCall(const CallEvent &MC, CheckerContext &C) const;
void checkDeadSymbols(SymbolReaper &SR, CheckerContext &C) const;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
index dcca8be55e33..fea35d03cb81 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NSErrorChecker.cpp
@@ -166,7 +166,7 @@ class NSOrCFErrorDerefChecker
mutable std::unique_ptr<NSErrorDerefBug> NSBT;
mutable std::unique_ptr<CFErrorDerefBug> CFBT;
public:
- DefaultBool ShouldCheckNSError, ShouldCheckCFError;
+ bool ShouldCheckNSError = false, ShouldCheckCFError = false;
CheckerNameRef NSErrorName, CFErrorName;
NSOrCFErrorDerefChecker() : NSErrorII(nullptr), CFErrorII(nullptr) {}
@@ -214,7 +214,7 @@ void NSOrCFErrorDerefChecker::checkLocation(SVal loc, bool isLoad,
CheckerContext &C) const {
if (!isLoad)
return;
- if (loc.isUndef() || !loc.getAs<Loc>())
+ if (loc.isUndef() || !isa<Loc>(loc))
return;
ASTContext &Ctx = C.getASTContext();
@@ -266,7 +266,7 @@ void NSOrCFErrorDerefChecker::checkEvent(ImplicitNullDerefEvent event) const {
SmallString<128> Buf;
llvm::raw_svector_ostream os(Buf);
- os << "Potential null dereference. According to coding standards ";
+ os << "Potential null dereference. According to coding standards ";
os << (isNSError
? "in 'Creating and Returning NSError Objects' the parameter"
: "documented in CoreFoundation/CFError.h the parameter");
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
index 534b5d68434f..3481936e572b 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NonNullParamChecker.cpp
@@ -136,10 +136,10 @@ void NonNullParamChecker::checkPreCall(const CallEvent &Call,
if (!DV)
continue;
- assert(!HasRefTypeParam || DV->getAs<Loc>());
+ assert(!HasRefTypeParam || isa<Loc>(DV.getValue()));
// Process the case when the argument is not a location.
- if (ExpectedToBeNonNull && !DV->getAs<Loc>()) {
+ if (ExpectedToBeNonNull && !isa<Loc>(DV.getValue())) {
// If the argument is a union type, we want to handle a potential
// transparent_union GCC extension.
if (!ArgE)
@@ -161,7 +161,7 @@ void NonNullParamChecker::checkPreCall(const CallEvent &Call,
assert(++CSV->begin() == CSV->end());
// FIXME: Handle (some_union){ some_other_union_val }, which turns into
// a LazyCompoundVal inside a CompoundVal.
- if (!V.getAs<Loc>())
+ if (!isa<Loc>(V))
continue;
// Retrieve the corresponding expression.
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
index fe8f7e7bf69e..1d8835f6b474 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NullabilityChecker.cpp
@@ -90,7 +90,7 @@ public:
// find warnings about nullability annotations that they have explicitly
// added themselves higher priority to fix than warnings on calls to system
// libraries.
- DefaultBool NoDiagnoseCallsToSystemHeaders;
+ bool NoDiagnoseCallsToSystemHeaders = false;
void checkBind(SVal L, SVal V, const Stmt *S, CheckerContext &C) const;
void checkPostStmt(const ExplicitCastExpr *CE, CheckerContext &C) const;
@@ -115,7 +115,7 @@ public:
CK_NumCheckKinds
};
- DefaultBool ChecksEnabled[CK_NumCheckKinds];
+ bool ChecksEnabled[CK_NumCheckKinds] = {false};
CheckerNameRef CheckNames[CK_NumCheckKinds];
mutable std::unique_ptr<BugType> BTs[CK_NumCheckKinds];
@@ -130,7 +130,7 @@ public:
// NullabilityMap. It is possible to catch errors like passing a null pointer
// to a callee that expects nonnull argument without the information that is
// stroed in the NullabilityMap. This is an optimization.
- DefaultBool NeedTracking;
+ bool NeedTracking = false;
private:
class NullabilityBugVisitor : public BugReporterVisitor {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp
index df01cc760e7e..3e9fc696f8e6 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/NumberObjectConversionChecker.cpp
@@ -143,7 +143,7 @@ void Callback::run(const MatchFinder::MatchResult &Result) {
else
OS << "Converting ";
- OS << "a pointer value of type '" << ObjT.getAsString() << "' to a ";
+ OS << "a pointer value of type '" << ObjT << "' to a ";
std::string EuphemismForPlain = "primitive";
std::string SuggestedApi = IsObjC ? (IsInteger ? "" : "-boolValue")
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
index 43af4bb14286..a6383009e1fe 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCAtSyncChecker.cpp
@@ -41,7 +41,7 @@ void ObjCAtSyncChecker::checkPreStmt(const ObjCAtSynchronizedStmt *S,
SVal V = C.getSVal(Ex);
// Uninitialized value used for the mutex?
- if (V.getAs<UndefinedVal>()) {
+ if (isa<UndefinedVal>(V)) {
if (ExplodedNode *N = C.generateErrorNode()) {
if (!BT_undef)
BT_undef.reset(new BuiltinBug(this, "Uninitialized value used as mutex "
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp
index c8eab3288094..514f53b4804f 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCAutoreleaseWriteChecker.cpp
@@ -98,11 +98,13 @@ private:
};
}
-static inline std::vector<llvm::StringRef> toRefs(std::vector<std::string> V) {
+static inline std::vector<llvm::StringRef>
+toRefs(const std::vector<std::string> &V) {
return std::vector<llvm::StringRef>(V.begin(), V.end());
}
-static decltype(auto) callsNames(std::vector<std::string> FunctionNames) {
+static decltype(auto)
+callsNames(const std::vector<std::string> &FunctionNames) {
return callee(functionDecl(hasAnyName(toRefs(FunctionNames))));
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp
index 8428b2294ba6..6688278a7a33 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCContainersASTChecker.cpp
@@ -135,9 +135,9 @@ void WalkAST::VisitCallExpr(CallExpr *CE) {
llvm::raw_svector_ostream Os(Buf);
// Use "second" and "third" since users will expect 1-based indexing
// for parameter names when mentioned in prose.
- Os << " The "<< ((ArgNum == 1) ? "second" : "third") << " argument to '"
- << Name << "' must be a C array of pointer-sized values, not '"
- << Arg->getType().getAsString() << "'";
+ Os << " The " << ((ArgNum == 1) ? "second" : "third") << " argument to '"
+ << Name << "' must be a C array of pointer-sized values, not '"
+ << Arg->getType() << "'";
PathDiagnosticLocation CELoc =
PathDiagnosticLocation::createBegin(CE, BR.getSourceManager(), AC);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
index 13985af76b00..0244a7a3ebff 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCContainersChecker.cpp
@@ -47,9 +47,6 @@ class ObjCContainersChecker : public Checker< check::PreStmt<CallExpr>,
CheckerContext &C) const;
public:
- /// A tag to id this checker.
- static void *getTag() { static int Tag; return &Tag; }
-
void checkPostStmt(const CallExpr *CE, CheckerContext &C) const;
void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
ProgramStateRef checkPointerEscape(ProgramStateRef State,
@@ -137,8 +134,8 @@ void ObjCContainersChecker::checkPreStmt(const CallExpr *CE,
// Now, check if 'Idx in [0, Size-1]'.
const QualType T = IdxExpr->getType();
- ProgramStateRef StInBound = State->assumeInBound(Idx, *Size, true, T);
- ProgramStateRef StOutBound = State->assumeInBound(Idx, *Size, false, T);
+ ProgramStateRef StInBound, StOutBound;
+ std::tie(StInBound, StOutBound) = State->assumeInBoundDual(Idx, *Size, T);
if (StOutBound && !StInBound) {
ExplodedNode *N = C.generateErrorNode(StOutBound);
if (!N)
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
index 17d3c042ac40..9f1a6e416dc6 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ObjCSelfInitChecker.cpp
@@ -94,19 +94,19 @@ enum SelfFlagEnum {
};
}
-REGISTER_MAP_WITH_PROGRAMSTATE(SelfFlag, SymbolRef, unsigned)
+REGISTER_MAP_WITH_PROGRAMSTATE(SelfFlag, SymbolRef, SelfFlagEnum)
REGISTER_TRAIT_WITH_PROGRAMSTATE(CalledInit, bool)
/// A call receiving a reference to 'self' invalidates the object that
/// 'self' contains. This keeps the "self flags" assigned to the 'self'
/// object before the call so we can assign them to the new object that 'self'
/// points to after the call.
-REGISTER_TRAIT_WITH_PROGRAMSTATE(PreCallSelfFlags, unsigned)
+REGISTER_TRAIT_WITH_PROGRAMSTATE(PreCallSelfFlags, SelfFlagEnum)
static SelfFlagEnum getSelfFlags(SVal val, ProgramStateRef state) {
if (SymbolRef sym = val.getAsSymbol())
- if (const unsigned *attachedFlags = state->get<SelfFlag>(sym))
- return (SelfFlagEnum)*attachedFlags;
+ if (const SelfFlagEnum *attachedFlags = state->get<SelfFlag>(sym))
+ return *attachedFlags;
return SelfFlag_None;
}
@@ -118,7 +118,8 @@ static void addSelfFlag(ProgramStateRef state, SVal val,
SelfFlagEnum flag, CheckerContext &C) {
// We tag the symbol that the SVal wraps.
if (SymbolRef sym = val.getAsSymbol()) {
- state = state->set<SelfFlag>(sym, getSelfFlags(val, state) | flag);
+ state = state->set<SelfFlag>(sym,
+ SelfFlagEnum(getSelfFlags(val, state) | flag));
C.addTransition(state);
}
}
@@ -251,11 +252,12 @@ void ObjCSelfInitChecker::checkPreCall(const CallEvent &CE,
for (unsigned i = 0; i < NumArgs; ++i) {
SVal argV = CE.getArgSVal(i);
if (isSelfVar(argV, C)) {
- unsigned selfFlags = getSelfFlags(state->getSVal(argV.castAs<Loc>()), C);
+ SelfFlagEnum selfFlags =
+ getSelfFlags(state->getSVal(argV.castAs<Loc>()), C);
C.addTransition(state->set<PreCallSelfFlags>(selfFlags));
return;
} else if (hasSelfFlag(argV, SelfFlag_Self, C)) {
- unsigned selfFlags = getSelfFlags(argV, C);
+ SelfFlagEnum selfFlags = getSelfFlags(argV, C);
C.addTransition(state->set<PreCallSelfFlags>(selfFlags));
return;
}
@@ -270,7 +272,7 @@ void ObjCSelfInitChecker::checkPostCall(const CallEvent &CE,
return;
ProgramStateRef state = C.getState();
- SelfFlagEnum prevFlags = (SelfFlagEnum)state->get<PreCallSelfFlags>();
+ SelfFlagEnum prevFlags = state->get<PreCallSelfFlags>();
if (!prevFlags)
return;
state = state->remove<PreCallSelfFlags>();
@@ -338,7 +340,7 @@ void ObjCSelfInitChecker::printState(raw_ostream &Out, ProgramStateRef State,
const char *NL, const char *Sep) const {
SelfFlagTy FlagMap = State->get<SelfFlag>();
bool DidCallInit = State->get<CalledInit>();
- SelfFlagEnum PreCallFlags = (SelfFlagEnum)State->get<PreCallSelfFlags>();
+ SelfFlagEnum PreCallFlags = State->get<PreCallSelfFlags>();
if (FlagMap.isEmpty() && !DidCallInit && !PreCallFlags)
return;
@@ -411,7 +413,7 @@ static bool isSelfVar(SVal location, CheckerContext &C) {
AnalysisDeclContext *analCtx = C.getCurrentAnalysisDeclContext();
if (!analCtx->getSelfDecl())
return false;
- if (!location.getAs<loc::MemRegionVal>())
+ if (!isa<loc::MemRegionVal>(location))
return false;
loc::MemRegionVal MRV = location.castAs<loc::MemRegionVal>();
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
index 40472ccfe7e6..cddf206728b1 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PaddingChecker.cpp
@@ -332,10 +332,10 @@ public:
}
Os << " (" << BaselinePad.getQuantity() << " padding bytes, where "
- << OptimalPad.getQuantity() << " is optimal). \n"
- << "Optimal fields order: \n";
+ << OptimalPad.getQuantity() << " is optimal). "
+ << "Optimal fields order: ";
for (const auto *FD : OptimalFieldsOrder)
- Os << FD->getName() << ", \n";
+ Os << FD->getName() << ", ";
Os << "consider reordering the fields or adding explicit padding "
"members.";
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
index f4e9a67438e7..e877dd119ff6 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/PthreadLockChecker.cpp
@@ -78,7 +78,7 @@ public:
CK_C11LockChecker,
CK_NumCheckKinds
};
- DefaultBool ChecksEnabled[CK_NumCheckKinds];
+ bool ChecksEnabled[CK_NumCheckKinds] = {false};
CheckerNameRef CheckNames[CK_NumCheckKinds];
private:
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp
index 0bde088d0e85..bee7c468812c 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountChecker.cpp
@@ -45,7 +45,7 @@ static ProgramStateRef removeRefBinding(ProgramStateRef State, SymbolRef Sym) {
void RefVal::print(raw_ostream &Out) const {
if (!T.isNull())
- Out << "Tracked " << T.getAsString() << " | ";
+ Out << "Tracked " << T << " | ";
switch (getKind()) {
default: llvm_unreachable("Invalid RefVal kind");
@@ -945,7 +945,8 @@ bool RetainCountChecker::evalCall(const CallEvent &Call,
// Assume that output is zero on the other branch.
NullOutputState = NullOutputState->BindExpr(
- CE, LCtx, C.getSValBuilder().makeNull(), /*Invalidate=*/false);
+ CE, LCtx, C.getSValBuilder().makeNullWithType(ResultTy),
+ /*Invalidate=*/false);
C.addTransition(NullOutputState, &getCastFailTag());
// And on the original branch assume that both input and
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp
index 41ef45d317cd..5109ae668686 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/RetainCountChecker/RetainCountDiagnostics.cpp
@@ -261,14 +261,12 @@ static void generateDiagnosticsForCallLike(ProgramStateRef CurrSt,
}
if (CurrV.getObjKind() == ObjKind::CF) {
- os << "a Core Foundation object of type '"
- << Sym->getType().getAsString() << "' with a ";
+ os << "a Core Foundation object of type '" << Sym->getType() << "' with a ";
} else if (CurrV.getObjKind() == ObjKind::OS) {
os << "an OSObject of type '" << findAllocatedObjectName(S, Sym->getType())
<< "' with a ";
} else if (CurrV.getObjKind() == ObjKind::Generalized) {
- os << "an object of type '" << Sym->getType().getAsString()
- << "' with a ";
+ os << "an object of type '" << Sym->getType() << "' with a ";
} else {
assert(CurrV.getObjKind() == ObjKind::ObjC);
QualType T = Sym->getType();
@@ -276,8 +274,7 @@ static void generateDiagnosticsForCallLike(ProgramStateRef CurrSt,
os << "an Objective-C object with a ";
} else {
const ObjCObjectPointerType *PT = cast<ObjCObjectPointerType>(T);
- os << "an instance of " << PT->getPointeeType().getAsString()
- << " with a ";
+ os << "an instance of " << PT->getPointeeType() << " with a ";
}
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
index c4dc06d4a077..b35ab1fe23ce 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ReturnPointerRangeChecker.cpp
@@ -64,8 +64,8 @@ void ReturnPointerRangeChecker::checkPreStmt(const ReturnStmt *RS,
if (Idx == ElementCount)
return;
- ProgramStateRef StInBound = state->assumeInBound(Idx, ElementCount, true);
- ProgramStateRef StOutBound = state->assumeInBound(Idx, ElementCount, false);
+ ProgramStateRef StInBound, StOutBound;
+ std::tie(StInBound, StOutBound) = state->assumeInBoundDual(Idx, ElementCount);
if (StOutBound && !StInBound) {
ExplodedNode *N = C.generateErrorNode(StOutBound);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/STLAlgorithmModeling.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/STLAlgorithmModeling.cpp
index ea72ebe3ed57..e9d5d306cc06 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/STLAlgorithmModeling.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/STLAlgorithmModeling.cpp
@@ -131,7 +131,7 @@ void STLAlgorithmModeling::Find(CheckerContext &C, const CallExpr *CE,
nonloc::SymbolVal(NewPos->getOffset()),
nonloc::SymbolVal(Pos->getOffset()),
SVB.getConditionType());
- assert(GreaterOrEqual.getAs<DefinedSVal>() &&
+ assert(isa<DefinedSVal>(GreaterOrEqual) &&
"Symbol comparison must be a `DefinedSVal`");
StateFound = StateFound->assume(GreaterOrEqual.castAs<DefinedSVal>(), true);
}
@@ -153,7 +153,7 @@ void STLAlgorithmModeling::Find(CheckerContext &C, const CallExpr *CE,
nonloc::SymbolVal(NewPos->getOffset()),
nonloc::SymbolVal(Pos->getOffset()),
SVB.getConditionType());
- assert(Less.getAs<DefinedSVal>() &&
+ assert(isa<DefinedSVal>(Less) &&
"Symbol comparison must be a `DefinedSVal`");
StateFound = StateFound->assume(Less.castAs<DefinedSVal>(), true);
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp
index 1de5d7285f65..8c87a548fd91 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SimpleStreamChecker.cpp
@@ -89,20 +89,6 @@ public:
/// state. Let's store it in the ProgramState.
REGISTER_MAP_WITH_PROGRAMSTATE(StreamMap, SymbolRef, StreamState)
-namespace {
-class StopTrackingCallback final : public SymbolVisitor {
- ProgramStateRef state;
-public:
- StopTrackingCallback(ProgramStateRef st) : state(std::move(st)) {}
- ProgramStateRef getState() const { return state; }
-
- bool VisitSymbol(SymbolRef sym) override {
- state = state->remove<StreamMap>(sym);
- return true;
- }
-};
-} // end anonymous namespace
-
SimpleStreamChecker::SimpleStreamChecker()
: OpenFn("fopen"), CloseFn("fclose", 1) {
// Initialize the bug types.
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp
index c789a8dbcca1..92eef20d2daa 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/SmartPtrModeling.cpp
@@ -48,9 +48,8 @@ class SmartPtrModeling
public:
// Whether the checker should model for null dereferences of smart pointers.
- DefaultBool ModelSmartPtrDereference;
+ bool ModelSmartPtrDereference = false;
bool evalCall(const CallEvent &Call, CheckerContext &C) const;
- void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
void checkDeadSymbols(SymbolReaper &SymReaper, CheckerContext &C) const;
ProgramStateRef
checkRegionChanges(ProgramStateRef State,
@@ -71,7 +70,8 @@ private:
bool handleMoveCtr(const CallEvent &Call, CheckerContext &C,
const MemRegion *ThisRegion) const;
bool updateMovedSmartPointers(CheckerContext &C, const MemRegion *ThisRegion,
- const MemRegion *OtherSmartPtrRegion) const;
+ const MemRegion *OtherSmartPtrRegion,
+ const CallEvent &Call) const;
void handleBoolConversion(const CallEvent &Call, CheckerContext &C) const;
bool handleComparisionOp(const CallEvent &Call, CheckerContext &C) const;
bool handleOstreamOperator(const CallEvent &Call, CheckerContext &C) const;
@@ -379,11 +379,13 @@ bool SmartPtrModeling::evalCall(const CallEvent &Call,
if (!ThisRegion)
return false;
+ QualType ThisType = cast<CXXMethodDecl>(Call.getDecl())->getThisType();
+
if (CC->getDecl()->isMoveConstructor())
return handleMoveCtr(Call, C, ThisRegion);
if (Call.getNumArgs() == 0) {
- auto NullVal = C.getSValBuilder().makeNull();
+ auto NullVal = C.getSValBuilder().makeNullWithType(ThisType);
State = State->set<TrackedRegionMap>(ThisRegion, NullVal);
C.addTransition(
@@ -640,7 +642,8 @@ void SmartPtrModeling::handleRelease(const CallEvent &Call,
*InnerPointVal);
}
- auto ValueToUpdate = C.getSValBuilder().makeNull();
+ QualType ThisType = cast<CXXMethodDecl>(Call.getDecl())->getThisType();
+ auto ValueToUpdate = C.getSValBuilder().makeNullWithType(ThisType);
State = State->set<TrackedRegionMap>(ThisRegion, ValueToUpdate);
C.addTransition(State, C.getNoteTag([ThisRegion](PathSensitiveBugReport &BR,
@@ -738,13 +741,15 @@ bool SmartPtrModeling::handleAssignOp(const CallEvent &Call,
if (!ThisRegion)
return false;
+ QualType ThisType = cast<CXXMethodDecl>(Call.getDecl())->getThisType();
+
const MemRegion *OtherSmartPtrRegion = OC->getArgSVal(0).getAsRegion();
// In case of 'nullptr' or '0' assigned
if (!OtherSmartPtrRegion) {
bool AssignedNull = Call.getArgSVal(0).isZeroConstant();
if (!AssignedNull)
return false;
- auto NullVal = C.getSValBuilder().makeNull();
+ auto NullVal = C.getSValBuilder().makeNullWithType(ThisType);
State = State->set<TrackedRegionMap>(ThisRegion, NullVal);
C.addTransition(State, C.getNoteTag([ThisRegion](PathSensitiveBugReport &BR,
llvm::raw_ostream &OS) {
@@ -758,7 +763,7 @@ bool SmartPtrModeling::handleAssignOp(const CallEvent &Call,
return true;
}
- return updateMovedSmartPointers(C, ThisRegion, OtherSmartPtrRegion);
+ return updateMovedSmartPointers(C, ThisRegion, OtherSmartPtrRegion, Call);
}
bool SmartPtrModeling::handleMoveCtr(const CallEvent &Call, CheckerContext &C,
@@ -767,17 +772,19 @@ bool SmartPtrModeling::handleMoveCtr(const CallEvent &Call, CheckerContext &C,
if (!OtherSmartPtrRegion)
return false;
- return updateMovedSmartPointers(C, ThisRegion, OtherSmartPtrRegion);
+ return updateMovedSmartPointers(C, ThisRegion, OtherSmartPtrRegion, Call);
}
bool SmartPtrModeling::updateMovedSmartPointers(
CheckerContext &C, const MemRegion *ThisRegion,
- const MemRegion *OtherSmartPtrRegion) const {
+ const MemRegion *OtherSmartPtrRegion, const CallEvent &Call) const {
ProgramStateRef State = C.getState();
+ QualType ThisType = cast<CXXMethodDecl>(Call.getDecl())->getThisType();
const auto *OtherInnerPtr = State->get<TrackedRegionMap>(OtherSmartPtrRegion);
if (OtherInnerPtr) {
State = State->set<TrackedRegionMap>(ThisRegion, *OtherInnerPtr);
- auto NullVal = C.getSValBuilder().makeNull();
+
+ auto NullVal = C.getSValBuilder().makeNullWithType(ThisType);
State = State->set<TrackedRegionMap>(OtherSmartPtrRegion, NullVal);
bool IsArgValNull = OtherInnerPtr->isZeroConstant();
@@ -803,7 +810,8 @@ bool SmartPtrModeling::updateMovedSmartPointers(
} else {
// In case we dont know anything about value we are moving from
// remove the entry from map for which smart pointer got moved to.
- auto NullVal = C.getSValBuilder().makeNull();
+ // For unique_ptr<A>, Ty will be 'A*'.
+ auto NullVal = C.getSValBuilder().makeNullWithType(ThisType);
State = State->remove<TrackedRegionMap>(ThisRegion);
State = State->set<TrackedRegionMap>(OtherSmartPtrRegion, NullVal);
C.addTransition(State, C.getNoteTag([OtherSmartPtrRegion,
@@ -830,6 +838,8 @@ void SmartPtrModeling::handleBoolConversion(const CallEvent &Call,
const MemRegion *ThisRegion =
cast<CXXInstanceCall>(&Call)->getCXXThisVal().getAsRegion();
+ QualType ThisType = cast<CXXMethodDecl>(Call.getDecl())->getThisType();
+
SVal InnerPointerVal;
if (const auto *InnerValPtr = State->get<TrackedRegionMap>(ThisRegion)) {
InnerPointerVal = *InnerValPtr;
@@ -868,7 +878,7 @@ void SmartPtrModeling::handleBoolConversion(const CallEvent &Call,
std::tie(NotNullState, NullState) =
State->assume(InnerPointerVal.castAs<DefinedOrUnknownSVal>());
- auto NullVal = C.getSValBuilder().makeNull();
+ auto NullVal = C.getSValBuilder().makeNullWithType(ThisType);
// Explicitly tracking the region as null.
NullState = NullState->set<TrackedRegionMap>(ThisRegion, NullVal);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
index d5e86e86424d..e6cea0fbff8c 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StackAddrEscapeChecker.cpp
@@ -42,7 +42,7 @@ public:
CK_NumCheckKinds
};
- DefaultBool ChecksEnabled[CK_NumCheckKinds];
+ bool ChecksEnabled[CK_NumCheckKinds] = {false};
CheckerNameRef CheckNames[CK_NumCheckKinds];
void checkPreCall(const CallEvent &Call, CheckerContext &C) const;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
index 5a8edcabda71..ef673ae41a3d 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StdLibraryFunctionsChecker.cpp
@@ -38,17 +38,9 @@
// Non-pure functions, for which only partial improvement over the default
// behavior is expected, are modeled via check::PostCall, non-intrusively.
//
-// The following standard C functions are currently supported:
-//
-// fgetc getline isdigit isupper toascii
-// fread isalnum isgraph isxdigit
-// fwrite isalpha islower read
-// getc isascii isprint write
-// getchar isblank ispunct toupper
-// getdelim iscntrl isspace tolower
-//
//===----------------------------------------------------------------------===//
+#include "ErrnoModeling.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
@@ -65,6 +57,19 @@
using namespace clang;
using namespace clang::ento;
+/// Produce a textual description of the state of \c errno (this describes the
+/// way how it is allowed to be used).
+/// The returned string is insertable into a longer warning message (in the form
+/// "the value 'errno' <...>").
+/// Currently only the \c errno_modeling::MustNotBeChecked state is supported.
+/// But later other kind of errno state may be needed if functions with special
+/// handling of \c errno are added.
+static const char *describeErrnoCheckState(errno_modeling::ErrnoCheckState CS) {
+ assert(CS == errno_modeling::MustNotBeChecked &&
+ "Errno description not applicable.");
+ return "may be undefined after the call and should not be used";
+}
+
namespace {
class StdLibraryFunctionsChecker
: public Checker<check::PreCall, check::PostCall, eval::Call> {
@@ -263,7 +268,7 @@ class StdLibraryFunctionsChecker
return State;
DefinedOrUnknownSVal L = V.castAs<DefinedOrUnknownSVal>();
- if (!L.getAs<Loc>())
+ if (!isa<Loc>(L))
return State;
return State->assume(L, CannotBeNull);
@@ -384,7 +389,158 @@ class StdLibraryFunctionsChecker
};
/// The complete list of constraints that defines a single branch.
- typedef std::vector<ValueConstraintPtr> ConstraintSet;
+ using ConstraintSet = std::vector<ValueConstraintPtr>;
+
+ /// Define how a function affects the system variable 'errno'.
+ /// This works together with the ErrnoModeling and ErrnoChecker classes.
+ class ErrnoConstraintBase {
+ public:
+ /// Apply specific state changes related to the errno variable.
+ virtual ProgramStateRef apply(ProgramStateRef State, const CallEvent &Call,
+ const Summary &Summary,
+ CheckerContext &C) const = 0;
+ /// Get a description about what is applied to 'errno' and how is it allowed
+ /// to be used. If ErrnoChecker generates a bug then this message is
+ /// displayed as a note at the function call.
+ /// It may return empty string if no note tag is to be added.
+ virtual std::string describe(StringRef FunctionName) const { return ""; }
+
+ virtual ~ErrnoConstraintBase() {}
+
+ protected:
+ /// Many of the descendant classes use this value.
+ const errno_modeling::ErrnoCheckState CheckState;
+
+ ErrnoConstraintBase(errno_modeling::ErrnoCheckState CS) : CheckState(CS) {}
+
+ /// This is used for conjure symbol for errno to differentiate from the
+ /// original call expression (same expression is used for the errno symbol).
+ static int Tag;
+ };
+
+ /// Set value of 'errno' to be related to 0 in a specified way, with a
+ /// specified "errno check state". For example with \c BO_GT 'errno' is
+ /// constrained to be greater than 0. Use this for failure cases of functions.
+ class ZeroRelatedErrnoConstraint : public ErrnoConstraintBase {
+ BinaryOperatorKind Op;
+
+ public:
+ ZeroRelatedErrnoConstraint(clang::BinaryOperatorKind OpK,
+ errno_modeling::ErrnoCheckState CS)
+ : ErrnoConstraintBase(CS), Op(OpK) {
+ assert(BinaryOperator::isComparisonOp(OpK));
+ }
+
+ ProgramStateRef apply(ProgramStateRef State, const CallEvent &Call,
+ const Summary &Summary,
+ CheckerContext &C) const override {
+ SValBuilder &SVB = C.getSValBuilder();
+ NonLoc ErrnoSVal =
+ SVB.conjureSymbolVal(&Tag, Call.getOriginExpr(),
+ C.getLocationContext(), C.getASTContext().IntTy,
+ C.blockCount())
+ .castAs<NonLoc>();
+ NonLoc ZeroVal =
+ SVB.makeZeroVal(C.getASTContext().IntTy).castAs<NonLoc>();
+ DefinedOrUnknownSVal Cond =
+ SVB.evalBinOp(State, Op, ErrnoSVal, ZeroVal, SVB.getConditionType())
+ .castAs<DefinedOrUnknownSVal>();
+ State = State->assume(Cond, true);
+ if (!State)
+ return State;
+ return errno_modeling::setErrnoValue(State, C.getLocationContext(),
+ ErrnoSVal, CheckState);
+ }
+
+ std::string describe(StringRef FunctionName) const override {
+ if (CheckState == errno_modeling::Irrelevant)
+ return "";
+ return (Twine("Assuming that function '") + FunctionName.str() +
+ "' fails, in this case the value 'errno' becomes " +
+ BinaryOperator::getOpcodeStr(Op).str() + " 0 and " +
+ describeErrnoCheckState(CheckState))
+ .str();
+ }
+ };
+
+ /// Applies the constraints to 'errno' for a common case when a standard
+ /// function is successful. The value of 'errno' after the call is not
+ /// specified by the standard (it may change or not). The \c ErrnoChecker can
+ /// generate a bug if 'errno' is read afterwards.
+ class SuccessErrnoConstraint : public ErrnoConstraintBase {
+ public:
+ SuccessErrnoConstraint()
+ : ErrnoConstraintBase(errno_modeling::MustNotBeChecked) {}
+
+ ProgramStateRef apply(ProgramStateRef State, const CallEvent &Call,
+ const Summary &Summary,
+ CheckerContext &C) const override {
+ return errno_modeling::setErrnoState(State, CheckState);
+ }
+
+ std::string describe(StringRef FunctionName) const override {
+ return (Twine("Assuming that function '") + FunctionName.str() +
+ "' is successful, in this case the value 'errno' " +
+ describeErrnoCheckState(CheckState))
+ .str();
+ }
+ };
+
+ /// Set errno constraints if use of 'errno' is completely irrelevant to the
+ /// modeled function or modeling is not possible.
+ class NoErrnoConstraint : public ErrnoConstraintBase {
+ public:
+ NoErrnoConstraint() : ErrnoConstraintBase(errno_modeling::Irrelevant) {}
+
+ ProgramStateRef apply(ProgramStateRef State, const CallEvent &Call,
+ const Summary &Summary,
+ CheckerContext &C) const override {
+ return errno_modeling::setErrnoState(State, CheckState);
+ }
+ };
+
+ /// A single branch of a function summary.
+ ///
+ /// A branch is defined by a series of constraints - "assumptions" -
+ /// that together form a single possible outcome of invoking the function.
+ /// When static analyzer considers a branch, it tries to introduce
+ /// a child node in the Exploded Graph. The child node has to include
+ /// constraints that define the branch. If the constraints contradict
+ /// existing constraints in the state, the node is not created and the branch
+ /// is dropped; otherwise it's queued for future exploration.
+ /// The branch is accompanied by a note text that may be displayed
+ /// to the user when a bug is found on a path that takes this branch.
+ ///
+ /// For example, consider the branches in `isalpha(x)`:
+ /// Branch 1)
+ /// x is in range ['A', 'Z'] or in ['a', 'z']
+ /// then the return value is not 0. (I.e. out-of-range [0, 0])
+ /// and the note may say "Assuming the character is alphabetical"
+ /// Branch 2)
+ /// x is out-of-range ['A', 'Z'] and out-of-range ['a', 'z']
+ /// then the return value is 0
+ /// and the note may say "Assuming the character is non-alphabetical".
+ class SummaryCase {
+ ConstraintSet Constraints;
+ const ErrnoConstraintBase &ErrnoConstraint;
+ StringRef Note;
+
+ public:
+ SummaryCase(ConstraintSet &&Constraints, const ErrnoConstraintBase &ErrnoC,
+ StringRef Note)
+ : Constraints(std::move(Constraints)), ErrnoConstraint(ErrnoC),
+ Note(Note) {}
+
+ SummaryCase(const ConstraintSet &Constraints,
+ const ErrnoConstraintBase &ErrnoC, StringRef Note)
+ : Constraints(Constraints), ErrnoConstraint(ErrnoC), Note(Note) {}
+
+ const ConstraintSet &getConstraints() const { return Constraints; }
+ const ErrnoConstraintBase &getErrnoConstraint() const {
+ return ErrnoConstraint;
+ }
+ StringRef getNote() const { return Note; }
+ };
using ArgTypes = std::vector<Optional<QualType>>;
using RetType = Optional<QualType>;
@@ -451,23 +607,12 @@ class StdLibraryFunctionsChecker
return T;
}
- using Cases = std::vector<ConstraintSet>;
+ using SummaryCases = std::vector<SummaryCase>;
/// A summary includes information about
/// * function prototype (signature)
/// * approach to invalidation,
- /// * a list of branches - a list of list of ranges -
- /// A branch represents a path in the exploded graph of a function (which
- /// is a tree). So, a branch is a series of assumptions. In other words,
- /// branches represent split states and additional assumptions on top of
- /// the splitting assumption.
- /// For example, consider the branches in `isalpha(x)`
- /// Branch 1)
- /// x is in range ['A', 'Z'] or in ['a', 'z']
- /// then the return value is not 0. (I.e. out-of-range [0, 0])
- /// Branch 2)
- /// x is out-of-range ['A', 'Z'] and out-of-range ['a', 'z']
- /// then the return value is 0.
+ /// * a list of branches - so, a list of list of ranges,
/// * a list of argument constraints, that must be true on every branch.
/// If these constraints are not satisfied that means a fatal error
/// usually resulting in undefined behaviour.
@@ -482,7 +627,7 @@ class StdLibraryFunctionsChecker
/// signature is matched.
class Summary {
const InvalidationKind InvalidationKd;
- Cases CaseConstraints;
+ SummaryCases Cases;
ConstraintSet ArgConstraints;
// The function to which the summary applies. This is set after lookup and
@@ -492,12 +637,14 @@ class StdLibraryFunctionsChecker
public:
Summary(InvalidationKind InvalidationKd) : InvalidationKd(InvalidationKd) {}
- Summary &Case(ConstraintSet &&CS) {
- CaseConstraints.push_back(std::move(CS));
+ Summary &Case(ConstraintSet &&CS, const ErrnoConstraintBase &ErrnoC,
+ StringRef Note = "") {
+ Cases.push_back(SummaryCase(std::move(CS), ErrnoC, Note));
return *this;
}
- Summary &Case(const ConstraintSet &CS) {
- CaseConstraints.push_back(CS);
+ Summary &Case(const ConstraintSet &CS, const ErrnoConstraintBase &ErrnoC,
+ StringRef Note = "") {
+ Cases.push_back(SummaryCase(CS, ErrnoC, Note));
return *this;
}
Summary &ArgConstraint(ValueConstraintPtr VC) {
@@ -508,7 +655,7 @@ class StdLibraryFunctionsChecker
}
InvalidationKind getInvalidationKd() const { return InvalidationKd; }
- const Cases &getCaseConstraints() const { return CaseConstraints; }
+ const SummaryCases &getCases() const { return Cases; }
const ConstraintSet &getArgConstraints() const { return ArgConstraints; }
QualType getArgType(ArgNo ArgN) const {
@@ -530,8 +677,8 @@ class StdLibraryFunctionsChecker
// Once we know the exact type of the function then do validation check on
// all the given constraints.
bool validateByConstraints(const FunctionDecl *FD) const {
- for (const ConstraintSet &Case : CaseConstraints)
- for (const ValueConstraintPtr &Constraint : Case)
+ for (const SummaryCase &Case : Cases)
+ for (const ValueConstraintPtr &Constraint : Case.getConstraints())
if (!Constraint->checkValidity(FD))
return false;
for (const ValueConstraintPtr &Constraint : ArgConstraints)
@@ -563,7 +710,7 @@ public:
CK_StdCLibraryFunctionsTesterChecker,
CK_NumCheckKinds
};
- DefaultBool ChecksEnabled[CK_NumCheckKinds];
+ bool ChecksEnabled[CK_NumCheckKinds] = {false};
CheckerNameRef CheckNames[CK_NumCheckKinds];
bool DisplayLoadedSummaries = false;
@@ -605,8 +752,20 @@ private:
C.emitReport(std::move(R));
}
+
+ /// These are the errno constraints that can be passed to summary cases.
+ /// One of these should fit for a single summary case.
+ /// Usually if a failure return value exists for function, that function
+ /// needs different cases for success and failure with different errno
+ /// constraints (and different return value constraints).
+ const NoErrnoConstraint ErrnoIrrelevant;
+ const SuccessErrnoConstraint ErrnoMustNotBeChecked;
+ const ZeroRelatedErrnoConstraint ErrnoNEZeroIrrelevant{
+ clang::BinaryOperatorKind::BO_NE, errno_modeling::Irrelevant};
};
+int StdLibraryFunctionsChecker::ErrnoConstraintBase::Tag = 0;
+
const StdLibraryFunctionsChecker::ArgNo StdLibraryFunctionsChecker::Ret =
std::numeric_limits<ArgNo>::max();
@@ -842,18 +1001,42 @@ void StdLibraryFunctionsChecker::checkPostCall(const CallEvent &Call,
// Now apply the constraints.
const Summary &Summary = *FoundSummary;
ProgramStateRef State = C.getState();
+ const ExplodedNode *Node = C.getPredecessor();
// Apply case/branch specifications.
- for (const ConstraintSet &Case : Summary.getCaseConstraints()) {
+ for (const SummaryCase &Case : Summary.getCases()) {
ProgramStateRef NewState = State;
- for (const ValueConstraintPtr &Constraint : Case) {
+ for (const ValueConstraintPtr &Constraint : Case.getConstraints()) {
NewState = Constraint->apply(NewState, Call, Summary, C);
if (!NewState)
break;
}
- if (NewState && NewState != State)
- C.addTransition(NewState);
+ if (NewState)
+ NewState = Case.getErrnoConstraint().apply(NewState, Call, Summary, C);
+
+ if (NewState && NewState != State) {
+ if (Case.getNote().empty()) {
+ std::string Note;
+ if (const auto *D = dyn_cast_or_null<FunctionDecl>(Call.getDecl()))
+ Note = Case.getErrnoConstraint().describe(D->getNameAsString());
+ if (Note.empty())
+ C.addTransition(NewState);
+ else
+ C.addTransition(NewState, errno_modeling::getErrnoNoteTag(C, Note));
+ } else {
+ StringRef Note = Case.getNote();
+ const NoteTag *Tag = C.getNoteTag(
+ // Sorry couldn't help myself.
+ [Node, Note]() -> std::string {
+ // Don't emit "Assuming..." note when we ended up
+ // knowing in advance which branch is taken.
+ return (Node->succ_size() > 1) ? Note.str() : "";
+ },
+ /*IsPrunable=*/true);
+ C.addTransition(NewState, Tag);
+ }
+ }
}
}
@@ -872,7 +1055,9 @@ bool StdLibraryFunctionsChecker::evalCall(const CallEvent &Call,
SVal V = C.getSValBuilder().conjureSymbolVal(
CE, LC, CE->getType().getCanonicalType(), C.blockCount());
State = State->BindExpr(CE, LC, V);
+
C.addTransition(State);
+
return true;
}
case NoEvalCall:
@@ -1211,129 +1396,176 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
// Boils down to isupper() or islower() or isdigit().
.Case({ArgumentCondition(0U, WithinRange,
{{'0', '9'}, {'A', 'Z'}, {'a', 'z'}}),
- ReturnValueCondition(OutOfRange, SingleValue(0))})
+ ReturnValueCondition(OutOfRange, SingleValue(0))},
+ ErrnoIrrelevant, "Assuming the character is alphanumeric")
// The locale-specific range.
// No post-condition. We are completely unaware of
// locale-specific return values.
- .Case({ArgumentCondition(0U, WithinRange, {{128, UCharRangeMax}})})
+ .Case({ArgumentCondition(0U, WithinRange, {{128, UCharRangeMax}})},
+ ErrnoIrrelevant)
.Case(
{ArgumentCondition(
0U, OutOfRange,
{{'0', '9'}, {'A', 'Z'}, {'a', 'z'}, {128, UCharRangeMax}}),
- ReturnValueCondition(WithinRange, SingleValue(0))})
+ ReturnValueCondition(WithinRange, SingleValue(0))},
+ ErrnoIrrelevant, "Assuming the character is non-alphanumeric")
.ArgConstraint(ArgumentCondition(
0U, WithinRange, {{EOFv, EOFv}, {0, UCharRangeMax}})));
addToFunctionSummaryMap(
"isalpha", Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(EvalCallAsPure)
.Case({ArgumentCondition(0U, WithinRange, {{'A', 'Z'}, {'a', 'z'}}),
- ReturnValueCondition(OutOfRange, SingleValue(0))})
+ ReturnValueCondition(OutOfRange, SingleValue(0))},
+ ErrnoIrrelevant, "Assuming the character is alphabetical")
// The locale-specific range.
- .Case({ArgumentCondition(0U, WithinRange, {{128, UCharRangeMax}})})
+ .Case({ArgumentCondition(0U, WithinRange, {{128, UCharRangeMax}})},
+ ErrnoIrrelevant)
.Case({ArgumentCondition(
0U, OutOfRange,
{{'A', 'Z'}, {'a', 'z'}, {128, UCharRangeMax}}),
- ReturnValueCondition(WithinRange, SingleValue(0))}));
+ ReturnValueCondition(WithinRange, SingleValue(0))},
+ ErrnoIrrelevant, "Assuming the character is non-alphabetical"));
addToFunctionSummaryMap(
"isascii", Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(EvalCallAsPure)
.Case({ArgumentCondition(0U, WithinRange, Range(0, 127)),
- ReturnValueCondition(OutOfRange, SingleValue(0))})
+ ReturnValueCondition(OutOfRange, SingleValue(0))},
+ ErrnoIrrelevant, "Assuming the character is an ASCII character")
.Case({ArgumentCondition(0U, OutOfRange, Range(0, 127)),
- ReturnValueCondition(WithinRange, SingleValue(0))}));
+ ReturnValueCondition(WithinRange, SingleValue(0))},
+ ErrnoIrrelevant,
+ "Assuming the character is not an ASCII character"));
addToFunctionSummaryMap(
"isblank", Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(EvalCallAsPure)
.Case({ArgumentCondition(0U, WithinRange, {{'\t', '\t'}, {' ', ' '}}),
- ReturnValueCondition(OutOfRange, SingleValue(0))})
+ ReturnValueCondition(OutOfRange, SingleValue(0))},
+ ErrnoIrrelevant, "Assuming the character is a blank character")
.Case({ArgumentCondition(0U, OutOfRange, {{'\t', '\t'}, {' ', ' '}}),
- ReturnValueCondition(WithinRange, SingleValue(0))}));
+ ReturnValueCondition(WithinRange, SingleValue(0))},
+ ErrnoIrrelevant,
+ "Assuming the character is not a blank character"));
addToFunctionSummaryMap(
"iscntrl", Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(EvalCallAsPure)
.Case({ArgumentCondition(0U, WithinRange, {{0, 32}, {127, 127}}),
- ReturnValueCondition(OutOfRange, SingleValue(0))})
+ ReturnValueCondition(OutOfRange, SingleValue(0))},
+ ErrnoIrrelevant,
+ "Assuming the character is a control character")
.Case({ArgumentCondition(0U, OutOfRange, {{0, 32}, {127, 127}}),
- ReturnValueCondition(WithinRange, SingleValue(0))}));
+ ReturnValueCondition(WithinRange, SingleValue(0))},
+ ErrnoIrrelevant,
+ "Assuming the character is not a control character"));
addToFunctionSummaryMap(
"isdigit", Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(EvalCallAsPure)
.Case({ArgumentCondition(0U, WithinRange, Range('0', '9')),
- ReturnValueCondition(OutOfRange, SingleValue(0))})
+ ReturnValueCondition(OutOfRange, SingleValue(0))},
+ ErrnoIrrelevant, "Assuming the character is a digit")
.Case({ArgumentCondition(0U, OutOfRange, Range('0', '9')),
- ReturnValueCondition(WithinRange, SingleValue(0))}));
+ ReturnValueCondition(WithinRange, SingleValue(0))},
+ ErrnoIrrelevant, "Assuming the character is not a digit"));
addToFunctionSummaryMap(
"isgraph", Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(EvalCallAsPure)
.Case({ArgumentCondition(0U, WithinRange, Range(33, 126)),
- ReturnValueCondition(OutOfRange, SingleValue(0))})
- .Case({ArgumentCondition(0U, OutOfRange, Range(33, 126)),
- ReturnValueCondition(WithinRange, SingleValue(0))}));
+ ReturnValueCondition(OutOfRange, SingleValue(0))},
+ ErrnoIrrelevant,
+ "Assuming the character has graphical representation")
+ .Case(
+ {ArgumentCondition(0U, OutOfRange, Range(33, 126)),
+ ReturnValueCondition(WithinRange, SingleValue(0))},
+ ErrnoIrrelevant,
+ "Assuming the character does not have graphical representation"));
addToFunctionSummaryMap(
"islower", Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(EvalCallAsPure)
// Is certainly lowercase.
.Case({ArgumentCondition(0U, WithinRange, Range('a', 'z')),
- ReturnValueCondition(OutOfRange, SingleValue(0))})
+ ReturnValueCondition(OutOfRange, SingleValue(0))},
+ ErrnoIrrelevant, "Assuming the character is a lowercase letter")
// Is ascii but not lowercase.
.Case({ArgumentCondition(0U, WithinRange, Range(0, 127)),
ArgumentCondition(0U, OutOfRange, Range('a', 'z')),
- ReturnValueCondition(WithinRange, SingleValue(0))})
+ ReturnValueCondition(WithinRange, SingleValue(0))},
+ ErrnoIrrelevant,
+ "Assuming the character is not a lowercase letter")
// The locale-specific range.
- .Case({ArgumentCondition(0U, WithinRange, {{128, UCharRangeMax}})})
+ .Case({ArgumentCondition(0U, WithinRange, {{128, UCharRangeMax}})},
+ ErrnoIrrelevant)
// Is not an unsigned char.
.Case({ArgumentCondition(0U, OutOfRange, Range(0, UCharRangeMax)),
- ReturnValueCondition(WithinRange, SingleValue(0))}));
+ ReturnValueCondition(WithinRange, SingleValue(0))},
+ ErrnoIrrelevant));
addToFunctionSummaryMap(
"isprint", Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(EvalCallAsPure)
.Case({ArgumentCondition(0U, WithinRange, Range(32, 126)),
- ReturnValueCondition(OutOfRange, SingleValue(0))})
+ ReturnValueCondition(OutOfRange, SingleValue(0))},
+ ErrnoIrrelevant, "Assuming the character is printable")
.Case({ArgumentCondition(0U, OutOfRange, Range(32, 126)),
- ReturnValueCondition(WithinRange, SingleValue(0))}));
+ ReturnValueCondition(WithinRange, SingleValue(0))},
+ ErrnoIrrelevant, "Assuming the character is non-printable"));
addToFunctionSummaryMap(
"ispunct", Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(EvalCallAsPure)
.Case({ArgumentCondition(
0U, WithinRange,
{{'!', '/'}, {':', '@'}, {'[', '`'}, {'{', '~'}}),
- ReturnValueCondition(OutOfRange, SingleValue(0))})
+ ReturnValueCondition(OutOfRange, SingleValue(0))},
+ ErrnoIrrelevant, "Assuming the character is a punctuation mark")
.Case({ArgumentCondition(
0U, OutOfRange,
{{'!', '/'}, {':', '@'}, {'[', '`'}, {'{', '~'}}),
- ReturnValueCondition(WithinRange, SingleValue(0))}));
+ ReturnValueCondition(WithinRange, SingleValue(0))},
+ ErrnoIrrelevant,
+ "Assuming the character is not a punctuation mark"));
addToFunctionSummaryMap(
"isspace", Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(EvalCallAsPure)
// Space, '\f', '\n', '\r', '\t', '\v'.
.Case({ArgumentCondition(0U, WithinRange, {{9, 13}, {' ', ' '}}),
- ReturnValueCondition(OutOfRange, SingleValue(0))})
+ ReturnValueCondition(OutOfRange, SingleValue(0))},
+ ErrnoIrrelevant,
+ "Assuming the character is a whitespace character")
// The locale-specific range.
- .Case({ArgumentCondition(0U, WithinRange, {{128, UCharRangeMax}})})
+ .Case({ArgumentCondition(0U, WithinRange, {{128, UCharRangeMax}})},
+ ErrnoIrrelevant)
.Case({ArgumentCondition(0U, OutOfRange,
{{9, 13}, {' ', ' '}, {128, UCharRangeMax}}),
- ReturnValueCondition(WithinRange, SingleValue(0))}));
+ ReturnValueCondition(WithinRange, SingleValue(0))},
+ ErrnoIrrelevant,
+ "Assuming the character is not a whitespace character"));
addToFunctionSummaryMap(
"isupper", Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(EvalCallAsPure)
// Is certainly uppercase.
.Case({ArgumentCondition(0U, WithinRange, Range('A', 'Z')),
- ReturnValueCondition(OutOfRange, SingleValue(0))})
+ ReturnValueCondition(OutOfRange, SingleValue(0))},
+ ErrnoIrrelevant,
+ "Assuming the character is an uppercase letter")
// The locale-specific range.
- .Case({ArgumentCondition(0U, WithinRange, {{128, UCharRangeMax}})})
+ .Case({ArgumentCondition(0U, WithinRange, {{128, UCharRangeMax}})},
+ ErrnoIrrelevant)
// Other.
.Case({ArgumentCondition(0U, OutOfRange,
{{'A', 'Z'}, {128, UCharRangeMax}}),
- ReturnValueCondition(WithinRange, SingleValue(0))}));
+ ReturnValueCondition(WithinRange, SingleValue(0))},
+ ErrnoIrrelevant,
+ "Assuming the character is not an uppercase letter"));
addToFunctionSummaryMap(
"isxdigit", Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(EvalCallAsPure)
.Case({ArgumentCondition(0U, WithinRange,
{{'0', '9'}, {'A', 'F'}, {'a', 'f'}}),
- ReturnValueCondition(OutOfRange, SingleValue(0))})
+ ReturnValueCondition(OutOfRange, SingleValue(0))},
+ ErrnoIrrelevant,
+ "Assuming the character is a hexadecimal digit")
.Case({ArgumentCondition(0U, OutOfRange,
{{'0', '9'}, {'A', 'F'}, {'a', 'f'}}),
- ReturnValueCondition(WithinRange, SingleValue(0))}));
+ ReturnValueCondition(WithinRange, SingleValue(0))},
+ ErrnoIrrelevant,
+ "Assuming the character is not a hexadecimal digit"));
addToFunctionSummaryMap(
"toupper", Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(EvalCallAsPure)
@@ -1355,18 +1587,21 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
{"getc", "fgetc"}, Signature(ArgTypes{FilePtrTy}, RetType{IntTy}),
Summary(NoEvalCall)
.Case({ReturnValueCondition(WithinRange,
- {{EOFv, EOFv}, {0, UCharRangeMax}})}));
+ {{EOFv, EOFv}, {0, UCharRangeMax}})},
+ ErrnoIrrelevant));
addToFunctionSummaryMap(
"getchar", Signature(ArgTypes{}, RetType{IntTy}),
Summary(NoEvalCall)
.Case({ReturnValueCondition(WithinRange,
- {{EOFv, EOFv}, {0, UCharRangeMax}})}));
+ {{EOFv, EOFv}, {0, UCharRangeMax}})},
+ ErrnoIrrelevant));
// read()-like functions that never return more than buffer size.
auto FreadSummary =
Summary(NoEvalCall)
.Case({ReturnValueCondition(LessThanOrEq, ArgNo(2)),
- ReturnValueCondition(WithinRange, Range(0, SizeMax))})
+ ReturnValueCondition(WithinRange, Range(0, SizeMax))},
+ ErrnoIrrelevant)
.ArgConstraint(NotNull(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(3)))
.ArgConstraint(BufferSize(/*Buffer=*/ArgNo(0), /*BufSize=*/ArgNo(1),
@@ -1393,7 +1628,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
auto ReadSummary =
Summary(NoEvalCall)
.Case({ReturnValueCondition(LessThanOrEq, ArgNo(2)),
- ReturnValueCondition(WithinRange, Range(-1, Ssize_tMax))});
+ ReturnValueCondition(WithinRange, Range(-1, Ssize_tMax))},
+ ErrnoIrrelevant);
// FIXME these are actually defined by POSIX and not by the C standard, we
// should handle them together with the rest of the POSIX functions.
@@ -1410,7 +1646,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
auto GetLineSummary =
Summary(NoEvalCall)
.Case({ReturnValueCondition(WithinRange,
- Range({-1, -1}, {1, Ssize_tMax}))});
+ Range({-1, -1}, {1, Ssize_tMax}))},
+ ErrnoIrrelevant);
QualType CharPtrPtrRestrictTy = getRestrictTy(getPointerTy(CharPtrTy));
@@ -1435,12 +1672,15 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
GetLineSummary);
{
- Summary GetenvSummary = Summary(NoEvalCall)
- .ArgConstraint(NotNull(ArgNo(0)))
- .Case({NotNull(Ret)});
+ Summary GetenvSummary =
+ Summary(NoEvalCall)
+ .ArgConstraint(NotNull(ArgNo(0)))
+ .Case({NotNull(Ret)}, ErrnoIrrelevant,
+ "Assuming the environment variable exists");
// In untrusted environments the envvar might not exist.
if (!ShouldAssumeControlledEnvironment)
- GetenvSummary.Case({NotNull(Ret)->negate()});
+ GetenvSummary.Case({NotNull(Ret)->negate()}, ErrnoIrrelevant,
+ "Assuming the environment variable does not exist");
// char *getenv(const char *name);
addToFunctionSummaryMap(
@@ -1464,14 +1704,22 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
const auto ReturnsZeroOrMinusOne =
ConstraintSet{ReturnValueCondition(WithinRange, Range(-1, 0))};
+ const auto ReturnsZero =
+ ConstraintSet{ReturnValueCondition(WithinRange, SingleValue(0))};
+ const auto ReturnsMinusOne =
+ ConstraintSet{ReturnValueCondition(WithinRange, SingleValue(-1))};
+ const auto ReturnsNonnegative =
+ ConstraintSet{ReturnValueCondition(WithinRange, Range(0, IntMax))};
const auto ReturnsFileDescriptor =
ConstraintSet{ReturnValueCondition(WithinRange, Range(-1, IntMax))};
+ const auto &ReturnsValidFileDescriptor = ReturnsNonnegative;
// int access(const char *pathname, int amode);
addToFunctionSummaryMap(
"access", Signature(ArgTypes{ConstCharPtrTy, IntTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(NotNull(ArgNo(0))));
// int faccessat(int dirfd, const char *pathname, int mode, int flags);
@@ -1480,21 +1728,25 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{IntTy, ConstCharPtrTy, IntTy, IntTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(NotNull(ArgNo(1))));
// int dup(int fildes);
- addToFunctionSummaryMap("dup", Signature(ArgTypes{IntTy}, RetType{IntTy}),
- Summary(NoEvalCall)
- .Case(ReturnsFileDescriptor)
- .ArgConstraint(ArgumentCondition(
- 0, WithinRange, Range(0, IntMax))));
+ addToFunctionSummaryMap(
+ "dup", Signature(ArgTypes{IntTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsValidFileDescriptor, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .ArgConstraint(
+ ArgumentCondition(0, WithinRange, Range(0, IntMax))));
// int dup2(int fildes1, int filedes2);
addToFunctionSummaryMap(
"dup2", Signature(ArgTypes{IntTy, IntTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsFileDescriptor)
+ .Case(ReturnsValidFileDescriptor, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(
ArgumentCondition(1, WithinRange, Range(0, IntMax))));
@@ -1503,7 +1755,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
addToFunctionSummaryMap("fdatasync",
Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(ArgumentCondition(
0, WithinRange, Range(0, IntMax))));
@@ -1512,14 +1765,15 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"fnmatch",
Signature(ArgTypes{ConstCharPtrTy, ConstCharPtrTy, IntTy},
RetType{IntTy}),
- Summary(EvalCallAsPure)
+ Summary(NoEvalCall)
.ArgConstraint(NotNull(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(1))));
// int fsync(int fildes);
addToFunctionSummaryMap("fsync", Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(ArgumentCondition(
0, WithinRange, Range(0, IntMax))));
@@ -1530,7 +1784,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"truncate",
Signature(ArgTypes{ConstCharPtrTy, Off_tTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(NotNull(ArgNo(0))));
// int symlink(const char *oldpath, const char *newpath);
@@ -1538,7 +1793,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"symlink",
Signature(ArgTypes{ConstCharPtrTy, ConstCharPtrTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(NotNull(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(1))));
@@ -1548,7 +1804,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{ConstCharPtrTy, IntTy, ConstCharPtrTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(NotNull(ArgNo(0)))
.ArgConstraint(ArgumentCondition(1, WithinRange, Range(0, IntMax)))
.ArgConstraint(NotNull(ArgNo(2))));
@@ -1557,7 +1814,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
addToFunctionSummaryMap(
"lockf", Signature(ArgTypes{IntTy, IntTy, Off_tTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax))));
@@ -1567,7 +1825,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
addToFunctionSummaryMap(
"creat", Signature(ArgTypes{ConstCharPtrTy, Mode_tTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsFileDescriptor)
+ .Case(ReturnsValidFileDescriptor, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(NotNull(ArgNo(0))));
// unsigned int sleep(unsigned int seconds);
@@ -1581,11 +1840,12 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Optional<QualType> DirPtrTy = getPointerTy(DirTy);
// int dirfd(DIR *dirp);
- addToFunctionSummaryMap("dirfd",
- Signature(ArgTypes{DirPtrTy}, RetType{IntTy}),
- Summary(NoEvalCall)
- .Case(ReturnsFileDescriptor)
- .ArgConstraint(NotNull(ArgNo(0))));
+ addToFunctionSummaryMap(
+ "dirfd", Signature(ArgTypes{DirPtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsValidFileDescriptor, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .ArgConstraint(NotNull(ArgNo(0))));
// unsigned int alarm(unsigned int seconds);
addToFunctionSummaryMap(
@@ -1598,7 +1858,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
addToFunctionSummaryMap("closedir",
Signature(ArgTypes{DirPtrTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(NotNull(ArgNo(0))));
// char *strdup(const char *s);
@@ -1621,18 +1882,21 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Summary(NoEvalCall).ArgConstraint(NotNull(ArgNo(0))));
// int mkstemp(char *template);
- addToFunctionSummaryMap("mkstemp",
- Signature(ArgTypes{CharPtrTy}, RetType{IntTy}),
- Summary(NoEvalCall)
- .Case(ReturnsFileDescriptor)
- .ArgConstraint(NotNull(ArgNo(0))));
+ addToFunctionSummaryMap(
+ "mkstemp", Signature(ArgTypes{CharPtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsValidFileDescriptor, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .ArgConstraint(NotNull(ArgNo(0))));
// char *mkdtemp(char *template);
+ // FIXME: Improve for errno modeling.
addToFunctionSummaryMap(
"mkdtemp", Signature(ArgTypes{CharPtrTy}, RetType{CharPtrTy}),
Summary(NoEvalCall).ArgConstraint(NotNull(ArgNo(0))));
// char *getcwd(char *buf, size_t size);
+ // FIXME: Improve for errno modeling.
addToFunctionSummaryMap(
"getcwd", Signature(ArgTypes{CharPtrTy, SizeTy}, RetType{CharPtrTy}),
Summary(NoEvalCall)
@@ -1643,7 +1907,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
addToFunctionSummaryMap(
"mkdir", Signature(ArgTypes{ConstCharPtrTy, Mode_tTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(NotNull(ArgNo(0))));
// int mkdirat(int dirfd, const char *pathname, mode_t mode);
@@ -1651,7 +1916,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"mkdirat",
Signature(ArgTypes{IntTy, ConstCharPtrTy, Mode_tTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(NotNull(ArgNo(1))));
Optional<QualType> Dev_tTy = lookupTy("dev_t");
@@ -1661,7 +1927,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"mknod",
Signature(ArgTypes{ConstCharPtrTy, Mode_tTy, Dev_tTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(NotNull(ArgNo(0))));
// int mknodat(int dirfd, const char *pathname, mode_t mode, dev_t dev);
@@ -1670,14 +1937,16 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{IntTy, ConstCharPtrTy, Mode_tTy, Dev_tTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(NotNull(ArgNo(1))));
// int chmod(const char *path, mode_t mode);
addToFunctionSummaryMap(
"chmod", Signature(ArgTypes{ConstCharPtrTy, Mode_tTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(NotNull(ArgNo(0))));
// int fchmodat(int dirfd, const char *pathname, mode_t mode, int flags);
@@ -1686,7 +1955,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{IntTy, ConstCharPtrTy, Mode_tTy, IntTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(NotNull(ArgNo(1))));
@@ -1694,7 +1964,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
addToFunctionSummaryMap(
"fchmod", Signature(ArgTypes{IntTy, Mode_tTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax))));
@@ -1708,7 +1979,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{IntTy, ConstCharPtrTy, Uid_tTy, Gid_tTy, IntTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(NotNull(ArgNo(1))));
@@ -1717,7 +1989,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"chown",
Signature(ArgTypes{ConstCharPtrTy, Uid_tTy, Gid_tTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(NotNull(ArgNo(0))));
// int lchown(const char *path, uid_t owner, gid_t group);
@@ -1725,14 +1998,16 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"lchown",
Signature(ArgTypes{ConstCharPtrTy, Uid_tTy, Gid_tTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(NotNull(ArgNo(0))));
// int fchown(int fildes, uid_t owner, gid_t group);
addToFunctionSummaryMap(
"fchown", Signature(ArgTypes{IntTy, Uid_tTy, Gid_tTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax))));
@@ -1740,14 +2015,16 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
addToFunctionSummaryMap("rmdir",
Signature(ArgTypes{ConstCharPtrTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(NotNull(ArgNo(0))));
// int chdir(const char *path);
addToFunctionSummaryMap("chdir",
Signature(ArgTypes{ConstCharPtrTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(NotNull(ArgNo(0))));
// int link(const char *oldpath, const char *newpath);
@@ -1755,7 +2032,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"link",
Signature(ArgTypes{ConstCharPtrTy, ConstCharPtrTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(NotNull(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(1))));
@@ -1766,7 +2044,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{IntTy, ConstCharPtrTy, IntTy, ConstCharPtrTy, IntTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(NotNull(ArgNo(1)))
.ArgConstraint(ArgumentCondition(2, WithinRange, Range(0, IntMax)))
@@ -1776,7 +2055,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
addToFunctionSummaryMap("unlink",
Signature(ArgTypes{ConstCharPtrTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(NotNull(ArgNo(0))));
// int unlinkat(int fd, const char *path, int flag);
@@ -1784,7 +2064,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"unlinkat",
Signature(ArgTypes{IntTy, ConstCharPtrTy, IntTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(NotNull(ArgNo(1))));
@@ -1796,7 +2077,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
addToFunctionSummaryMap(
"fstat", Signature(ArgTypes{IntTy, StructStatPtrTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(NotNull(ArgNo(1))));
@@ -1806,7 +2088,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{ConstCharPtrRestrictTy, StructStatPtrRestrictTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(NotNull(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(1))));
@@ -1816,7 +2099,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{ConstCharPtrRestrictTy, StructStatPtrRestrictTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(NotNull(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(1))));
@@ -1828,17 +2112,20 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
StructStatPtrRestrictTy, IntTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(NotNull(ArgNo(1)))
.ArgConstraint(NotNull(ArgNo(2))));
// DIR *opendir(const char *name);
+ // FIXME: Improve for errno modeling.
addToFunctionSummaryMap(
"opendir", Signature(ArgTypes{ConstCharPtrTy}, RetType{DirPtrTy}),
Summary(NoEvalCall).ArgConstraint(NotNull(ArgNo(0))));
// DIR *fdopendir(int fd);
+ // FIXME: Improve for errno modeling.
addToFunctionSummaryMap("fdopendir",
Signature(ArgTypes{IntTy}, RetType{DirPtrTy}),
Summary(NoEvalCall)
@@ -1849,11 +2136,13 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
addToFunctionSummaryMap(
"isatty", Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case({ReturnValueCondition(WithinRange, Range(0, 1))})
+ .Case({ReturnValueCondition(WithinRange, Range(0, 1))},
+ ErrnoIrrelevant)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax))));
// FILE *popen(const char *command, const char *type);
+ // FIXME: Improve for errno modeling.
addToFunctionSummaryMap(
"popen",
Signature(ArgTypes{ConstCharPtrTy, ConstCharPtrTy}, RetType{FilePtrTy}),
@@ -1862,6 +2151,7 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
.ArgConstraint(NotNull(ArgNo(1))));
// int pclose(FILE *stream);
+ // FIXME: Improve for errno modeling.
addToFunctionSummaryMap(
"pclose", Signature(ArgTypes{FilePtrTy}, RetType{IntTy}),
Summary(NoEvalCall).ArgConstraint(NotNull(ArgNo(0))));
@@ -1869,7 +2159,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
// int close(int fildes);
addToFunctionSummaryMap("close", Signature(ArgTypes{IntTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(ArgumentCondition(
0, WithinRange, Range(-1, IntMax))));
@@ -1886,6 +2177,7 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Summary(NoEvalCall).ArgConstraint(NotNull(ArgNo(0))));
// FILE *fdopen(int fd, const char *mode);
+ // FIXME: Improve for errno modeling.
addToFunctionSummaryMap(
"fdopen",
Signature(ArgTypes{IntTy, ConstCharPtrTy}, RetType{FilePtrTy}),
@@ -1909,18 +2201,19 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Summary(NoEvalCall).ArgConstraint(NotNull(ArgNo(0))));
// int fileno(FILE *stream);
- addToFunctionSummaryMap("fileno",
- Signature(ArgTypes{FilePtrTy}, RetType{IntTy}),
- Summary(NoEvalCall)
- .Case(ReturnsFileDescriptor)
- .ArgConstraint(NotNull(ArgNo(0))));
+ addToFunctionSummaryMap(
+ "fileno", Signature(ArgTypes{FilePtrTy}, RetType{IntTy}),
+ Summary(NoEvalCall)
+ .Case(ReturnsValidFileDescriptor, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
+ .ArgConstraint(NotNull(ArgNo(0))));
// int fseeko(FILE *stream, off_t offset, int whence);
addToFunctionSummaryMap(
"fseeko",
Signature(ArgTypes{FilePtrTy, Off_tTy, IntTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZeroOrMinusOne, ErrnoIrrelevant)
.ArgConstraint(NotNull(ArgNo(0))));
// off_t ftello(FILE *stream);
@@ -1930,6 +2223,7 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
// void *mmap(void *addr, size_t length, int prot, int flags, int fd,
// off_t offset);
+ // FIXME: Improve for errno modeling.
addToFunctionSummaryMap(
"mmap",
Signature(ArgTypes{VoidPtrTy, SizeTy, IntTy, IntTy, IntTy, Off_tTy},
@@ -1942,6 +2236,7 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Optional<QualType> Off64_tTy = lookupTy("off64_t");
// void *mmap64(void *addr, size_t length, int prot, int flags, int fd,
// off64_t offset);
+ // FIXME: Improve for errno modeling.
addToFunctionSummaryMap(
"mmap64",
Signature(ArgTypes{VoidPtrTy, SizeTy, IntTy, IntTy, IntTy, Off64_tTy},
@@ -1955,13 +2250,20 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
addToFunctionSummaryMap("pipe",
Signature(ArgTypes{IntPtrTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(NotNull(ArgNo(0))));
// off_t lseek(int fildes, off_t offset, int whence);
+ // In the first case we can not tell for sure if it failed or not.
+ // A return value different from of the expected offset (that is unknown
+ // here) may indicate failure. For this reason we do not enforce the errno
+ // check (can cause false positive).
addToFunctionSummaryMap(
"lseek", Signature(ArgTypes{IntTy, Off_tTy, IntTy}, RetType{Off_tTy}),
Summary(NoEvalCall)
+ .Case(ReturnsNonnegative, ErrnoIrrelevant)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax))));
@@ -1973,7 +2275,9 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
RetType{Ssize_tTy}),
Summary(NoEvalCall)
.Case({ReturnValueCondition(LessThanOrEq, ArgNo(2)),
- ReturnValueCondition(WithinRange, Range(-1, Ssize_tMax))})
+ ReturnValueCondition(WithinRange, Range(0, Ssize_tMax))},
+ ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(NotNull(ArgNo(0)))
.ArgConstraint(NotNull(ArgNo(1)))
.ArgConstraint(BufferSize(/*Buffer=*/ArgNo(1),
@@ -1990,7 +2294,9 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
RetType{Ssize_tTy}),
Summary(NoEvalCall)
.Case({ReturnValueCondition(LessThanOrEq, ArgNo(3)),
- ReturnValueCondition(WithinRange, Range(-1, Ssize_tMax))})
+ ReturnValueCondition(WithinRange, Range(0, Ssize_tMax))},
+ ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(NotNull(ArgNo(1)))
.ArgConstraint(NotNull(ArgNo(2)))
@@ -2006,12 +2312,14 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{IntTy, ConstCharPtrTy, IntTy, ConstCharPtrTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(NotNull(ArgNo(1)))
.ArgConstraint(NotNull(ArgNo(3))));
// char *realpath(const char *restrict file_name,
// char *restrict resolved_name);
+ // FIXME: Improve for errno modeling.
addToFunctionSummaryMap(
"realpath",
Signature(ArgTypes{ConstCharPtrRestrictTy, CharPtrRestrictTy},
@@ -2025,7 +2333,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"execv",
Signature(ArgTypes{ConstCharPtrTy, CharPtrConstPtr}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case({ReturnValueCondition(WithinRange, SingleValue(-1))})
+ .Case({ReturnValueCondition(WithinRange, SingleValue(-1))},
+ ErrnoIrrelevant)
.ArgConstraint(NotNull(ArgNo(0))));
// int execvp(const char *file, char *const argv[]);
@@ -2033,7 +2342,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"execvp",
Signature(ArgTypes{ConstCharPtrTy, CharPtrConstPtr}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case({ReturnValueCondition(WithinRange, SingleValue(-1))})
+ .Case({ReturnValueCondition(WithinRange, SingleValue(-1))},
+ ErrnoIrrelevant)
.ArgConstraint(NotNull(ArgNo(0))));
// int getopt(int argc, char * const argv[], const char *optstring);
@@ -2042,7 +2352,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{IntTy, CharPtrConstPtr, ConstCharPtrTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case({ReturnValueCondition(WithinRange, Range(-1, UCharRangeMax))})
+ .Case({ReturnValueCondition(WithinRange, Range(-1, UCharRangeMax))},
+ ErrnoIrrelevant)
.ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(NotNull(ArgNo(1)))
.ArgConstraint(NotNull(ArgNo(2))));
@@ -2068,7 +2379,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
// constraints which require pointer types for the sockaddr param.
auto Accept =
Summary(NoEvalCall)
- .Case(ReturnsFileDescriptor)
+ .Case(ReturnsValidFileDescriptor, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)));
if (!addToFunctionSummaryMap(
"accept",
@@ -2091,7 +2403,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{IntTy, ConstStructSockaddrPtrTy, Socklen_tTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(NotNull(ArgNo(1)))
@@ -2104,7 +2417,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"bind",
Signature(ArgTypes{IntTy, Irrelevant, Socklen_tTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(
@@ -2118,7 +2432,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Socklen_tPtrRestrictTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(NotNull(ArgNo(1)))
@@ -2128,7 +2443,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{IntTy, Irrelevant, Socklen_tPtrRestrictTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax))));
@@ -2140,7 +2456,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Socklen_tPtrRestrictTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(NotNull(ArgNo(1)))
@@ -2150,7 +2467,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{IntTy, Irrelevant, Socklen_tPtrRestrictTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax))));
@@ -2161,7 +2479,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{IntTy, ConstStructSockaddrPtrTy, Socklen_tTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(NotNull(ArgNo(1)))))
@@ -2169,14 +2488,17 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"connect",
Signature(ArgTypes{IntTy, Irrelevant, Socklen_tTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax))));
auto Recvfrom =
Summary(NoEvalCall)
.Case({ReturnValueCondition(LessThanOrEq, ArgNo(2)),
- ReturnValueCondition(WithinRange, Range(-1, Ssize_tMax))})
+ ReturnValueCondition(WithinRange, Range(0, Ssize_tMax))},
+ ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(BufferSize(/*Buffer=*/ArgNo(1),
/*BufSize=*/ArgNo(2)));
@@ -2201,7 +2523,9 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
auto Sendto =
Summary(NoEvalCall)
.Case({ReturnValueCondition(LessThanOrEq, ArgNo(2)),
- ReturnValueCondition(WithinRange, Range(-1, Ssize_tMax))})
+ ReturnValueCondition(WithinRange, Range(0, Ssize_tMax))},
+ ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(BufferSize(/*Buffer=*/ArgNo(1),
/*BufSize=*/ArgNo(2)));
@@ -2225,7 +2549,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
addToFunctionSummaryMap("listen",
Signature(ArgTypes{IntTy, IntTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(ArgumentCondition(
0, WithinRange, Range(0, IntMax))));
@@ -2236,7 +2561,9 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
RetType{Ssize_tTy}),
Summary(NoEvalCall)
.Case({ReturnValueCondition(LessThanOrEq, ArgNo(2)),
- ReturnValueCondition(WithinRange, Range(-1, Ssize_tMax))})
+ ReturnValueCondition(WithinRange, Range(0, Ssize_tMax))},
+ ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(BufferSize(/*Buffer=*/ArgNo(1),
/*BufSize=*/ArgNo(2))));
@@ -2252,7 +2579,9 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{IntTy, StructMsghdrPtrTy, IntTy},
RetType{Ssize_tTy}),
Summary(NoEvalCall)
- .Case({ReturnValueCondition(WithinRange, Range(-1, Ssize_tMax))})
+ .Case({ReturnValueCondition(WithinRange, Range(0, Ssize_tMax))},
+ ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax))));
@@ -2262,7 +2591,9 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{IntTy, ConstStructMsghdrPtrTy, IntTy},
RetType{Ssize_tTy}),
Summary(NoEvalCall)
- .Case({ReturnValueCondition(WithinRange, Range(-1, Ssize_tMax))})
+ .Case({ReturnValueCondition(WithinRange, Range(0, Ssize_tMax))},
+ ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax))));
@@ -2273,7 +2604,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{IntTy, IntTy, IntTy, ConstVoidPtrTy, Socklen_tTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(NotNull(ArgNo(3)))
.ArgConstraint(
BufferSize(/*Buffer=*/ArgNo(3), /*BufSize=*/ArgNo(4)))
@@ -2289,7 +2621,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Socklen_tPtrRestrictTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(NotNull(ArgNo(3)))
.ArgConstraint(NotNull(ArgNo(4))));
@@ -2300,7 +2633,9 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
RetType{Ssize_tTy}),
Summary(NoEvalCall)
.Case({ReturnValueCondition(LessThanOrEq, ArgNo(2)),
- ReturnValueCondition(WithinRange, Range(-1, Ssize_tMax))})
+ ReturnValueCondition(WithinRange, Range(0, Ssize_tMax))},
+ ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(ArgumentCondition(0, WithinRange, Range(0, IntMax)))
.ArgConstraint(BufferSize(/*Buffer=*/ArgNo(1),
/*BufSize=*/ArgNo(2))));
@@ -2310,7 +2645,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"socketpair",
Signature(ArgTypes{IntTy, IntTy, IntTy, IntPtrTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(NotNull(ArgNo(3))));
// int getnameinfo(const struct sockaddr *restrict sa, socklen_t salen,
@@ -2348,7 +2684,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"utime",
Signature(ArgTypes{ConstCharPtrTy, StructUtimbufPtrTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(NotNull(ArgNo(0))));
Optional<QualType> StructTimespecTy = lookupTy("timespec");
@@ -2361,7 +2698,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"futimens",
Signature(ArgTypes{IntTy, ConstStructTimespecPtrTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(
ArgumentCondition(0, WithinRange, Range(0, IntMax))));
@@ -2372,7 +2710,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
ConstStructTimespecPtrTy, IntTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(NotNull(ArgNo(1))));
Optional<QualType> StructTimevalTy = lookupTy("timeval");
@@ -2385,7 +2724,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{ConstCharPtrTy, ConstStructTimevalPtrTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(NotNull(ArgNo(0))));
// int nanosleep(const struct timespec *rqtp, struct timespec *rmtp);
@@ -2394,7 +2734,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
Signature(ArgTypes{ConstStructTimespecPtrTy, StructTimespecPtrTy},
RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(NotNull(ArgNo(0))));
Optional<QualType> Time_tTy = lookupTy("time_t");
@@ -2470,7 +2811,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"clock_gettime",
Signature(ArgTypes{Clockid_tTy, StructTimespecPtrTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(NotNull(ArgNo(1))));
Optional<QualType> StructItimervalTy = lookupTy("itimerval");
@@ -2481,7 +2823,8 @@ void StdLibraryFunctionsChecker::initFunctionSummaries(
"getitimer",
Signature(ArgTypes{IntTy, StructItimervalPtrTy}, RetType{IntTy}),
Summary(NoEvalCall)
- .Case(ReturnsZeroOrMinusOne)
+ .Case(ReturnsZero, ErrnoMustNotBeChecked)
+ .Case(ReturnsMinusOne, ErrnoNEZeroIrrelevant)
.ArgConstraint(NotNull(ArgNo(1))));
Optional<QualType> Pthread_cond_tTy = lookupTy("pthread_cond_t");
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
index 26218b8e0454..1aa665f0ef45 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StreamChecker.cpp
@@ -146,7 +146,7 @@ struct StreamState {
void Profile(llvm::FoldingSetNodeID &ID) const {
ID.AddPointer(LastOperation);
ID.AddInteger(State);
- ID.AddInteger(ErrorState);
+ ErrorState.Profile(ID);
ID.AddBoolean(FilePositionIndeterminate);
}
};
@@ -549,8 +549,9 @@ void StreamChecker::evalFreopen(const FnDescription *Desc,
State->BindExpr(CE, C.getLocationContext(), *StreamVal);
// Generate state for NULL return value.
// Stream switches to OpenFailed state.
- ProgramStateRef StateRetNull = State->BindExpr(CE, C.getLocationContext(),
- C.getSValBuilder().makeNull());
+ ProgramStateRef StateRetNull =
+ State->BindExpr(CE, C.getLocationContext(),
+ C.getSValBuilder().makeNullWithType(CE->getType()));
StateRetNotNull =
StateRetNotNull->set<StreamMap>(StreamSym, StreamState::getOpened(Desc));
@@ -671,24 +672,19 @@ void StreamChecker::evalFreadFwrite(const FnDescription *Desc,
if (!IsFread || (OldSS->ErrorState != ErrorFEof)) {
ProgramStateRef StateNotFailed =
State->BindExpr(CE, C.getLocationContext(), *NMembVal);
- if (StateNotFailed) {
- StateNotFailed = StateNotFailed->set<StreamMap>(
- StreamSym, StreamState::getOpened(Desc));
- C.addTransition(StateNotFailed);
- }
+ StateNotFailed =
+ StateNotFailed->set<StreamMap>(StreamSym, StreamState::getOpened(Desc));
+ C.addTransition(StateNotFailed);
}
// Add transition for the failed state.
- Optional<NonLoc> RetVal = makeRetVal(C, CE).castAs<NonLoc>();
- assert(RetVal && "Value should be NonLoc.");
+ NonLoc RetVal = makeRetVal(C, CE).castAs<NonLoc>();
ProgramStateRef StateFailed =
- State->BindExpr(CE, C.getLocationContext(), *RetVal);
- if (!StateFailed)
- return;
- auto Cond = C.getSValBuilder()
- .evalBinOpNN(State, BO_LT, *RetVal, *NMembVal,
- C.getASTContext().IntTy)
- .getAs<DefinedOrUnknownSVal>();
+ State->BindExpr(CE, C.getLocationContext(), RetVal);
+ auto Cond =
+ C.getSValBuilder()
+ .evalBinOpNN(State, BO_LT, RetVal, *NMembVal, C.getASTContext().IntTy)
+ .getAs<DefinedOrUnknownSVal>();
if (!Cond)
return;
StateFailed = StateFailed->assume(*Cond, true);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StringChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StringChecker.cpp
index 0d745f5d8d6f..2dc9e29ca906 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StringChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/StringChecker.cpp
@@ -69,7 +69,7 @@ void StringChecker::checkPreCall(const CallEvent &Call,
if (!isCharToStringCtor(Call, C.getASTContext()))
return;
const auto Param = Call.getArgSVal(0).getAs<Loc>();
- if (!Param.hasValue())
+ if (!Param)
return;
// We managed to constrain the parameter to non-null.
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Taint.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Taint.cpp
index 71b2ab834a07..44162094e49a 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Taint.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Taint.cpp
@@ -10,7 +10,7 @@
//
//===----------------------------------------------------------------------===//
-#include "Taint.h"
+#include "clang/StaticAnalyzer/Checkers/Taint.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugReporter.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramStateTrait.h"
@@ -37,7 +37,9 @@ void taint::printTaint(ProgramStateRef State, raw_ostream &Out, const char *NL,
Out << I.first << " : " << I.second << NL;
}
-void dumpTaint(ProgramStateRef State) { printTaint(State, llvm::errs()); }
+void taint::dumpTaint(ProgramStateRef State) {
+ printTaint(State, llvm::errs());
+}
ProgramStateRef taint::addTaint(ProgramStateRef State, const Stmt *S,
const LocationContext *LCtx,
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Taint.h b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Taint.h
deleted file mode 100644
index 659a3c898d56..000000000000
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/Taint.h
+++ /dev/null
@@ -1,106 +0,0 @@
-//=== Taint.h - Taint tracking and basic propagation rules. --------*- C++ -*-//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// Defines basic, non-domain-specific mechanisms for tracking tainted values.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_TAINT_H
-#define LLVM_CLANG_LIB_STATICANALYZER_CHECKERS_TAINT_H
-
-#include "clang/StaticAnalyzer/Core/BugReporter/BugReporterVisitors.h"
-#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
-
-namespace clang {
-namespace ento {
-namespace taint {
-
-/// The type of taint, which helps to differentiate between different types of
-/// taint.
-using TaintTagType = unsigned;
-
-static constexpr TaintTagType TaintTagGeneric = 0;
-
-/// Create a new state in which the value of the statement is marked as tainted.
-LLVM_NODISCARD ProgramStateRef addTaint(ProgramStateRef State, const Stmt *S,
- const LocationContext *LCtx,
- TaintTagType Kind = TaintTagGeneric);
-
-/// Create a new state in which the value is marked as tainted.
-LLVM_NODISCARD ProgramStateRef addTaint(ProgramStateRef State, SVal V,
- TaintTagType Kind = TaintTagGeneric);
-
-/// Create a new state in which the symbol is marked as tainted.
-LLVM_NODISCARD ProgramStateRef addTaint(ProgramStateRef State, SymbolRef Sym,
- TaintTagType Kind = TaintTagGeneric);
-
-/// Create a new state in which the pointer represented by the region
-/// is marked as tainted.
-LLVM_NODISCARD ProgramStateRef addTaint(ProgramStateRef State,
- const MemRegion *R,
- TaintTagType Kind = TaintTagGeneric);
-
-LLVM_NODISCARD ProgramStateRef removeTaint(ProgramStateRef State, SVal V);
-
-LLVM_NODISCARD ProgramStateRef removeTaint(ProgramStateRef State,
- const MemRegion *R);
-
-LLVM_NODISCARD ProgramStateRef removeTaint(ProgramStateRef State,
- SymbolRef Sym);
-
-/// Create a new state in a which a sub-region of a given symbol is tainted.
-/// This might be necessary when referring to regions that can not have an
-/// individual symbol, e.g. if they are represented by the default binding of
-/// a LazyCompoundVal.
-LLVM_NODISCARD ProgramStateRef addPartialTaint(
- ProgramStateRef State, SymbolRef ParentSym, const SubRegion *SubRegion,
- TaintTagType Kind = TaintTagGeneric);
-
-/// Check if the statement has a tainted value in the given state.
-bool isTainted(ProgramStateRef State, const Stmt *S,
- const LocationContext *LCtx,
- TaintTagType Kind = TaintTagGeneric);
-
-/// Check if the value is tainted in the given state.
-bool isTainted(ProgramStateRef State, SVal V,
- TaintTagType Kind = TaintTagGeneric);
-
-/// Check if the symbol is tainted in the given state.
-bool isTainted(ProgramStateRef State, SymbolRef Sym,
- TaintTagType Kind = TaintTagGeneric);
-
-/// Check if the pointer represented by the region is tainted in the given
-/// state.
-bool isTainted(ProgramStateRef State, const MemRegion *Reg,
- TaintTagType Kind = TaintTagGeneric);
-
-void printTaint(ProgramStateRef State, raw_ostream &Out, const char *nl = "\n",
- const char *sep = "");
-
-LLVM_DUMP_METHOD void dumpTaint(ProgramStateRef State);
-
-/// The bug visitor prints a diagnostic message at the location where a given
-/// variable was tainted.
-class TaintBugVisitor final : public BugReporterVisitor {
-private:
- const SVal V;
-
-public:
- TaintBugVisitor(const SVal V) : V(V) {}
- void Profile(llvm::FoldingSetNodeID &ID) const override { ID.Add(V); }
-
- PathDiagnosticPieceRef VisitNode(const ExplodedNode *N,
- BugReporterContext &BRC,
- PathSensitiveBugReport &BR) override;
-};
-
-} // namespace taint
-} // namespace ento
-} // namespace clang
-
-#endif
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp
index 916977c10c0c..614a2b2e4ec7 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TaintTesterChecker.cpp
@@ -10,8 +10,8 @@
//
//===----------------------------------------------------------------------===//
-#include "Taint.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Checkers/Taint.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
@@ -22,27 +22,15 @@ using namespace ento;
using namespace taint;
namespace {
-class TaintTesterChecker : public Checker< check::PostStmt<Expr> > {
-
- mutable std::unique_ptr<BugType> BT;
- void initBugType() const;
-
- /// Given a pointer argument, get the symbol of the value it contains
- /// (points to).
- SymbolRef getPointedToSymbol(CheckerContext &C,
- const Expr* Arg,
- bool IssueWarning = true) const;
+class TaintTesterChecker : public Checker<check::PostStmt<Expr>> {
+ std::unique_ptr<BugType> BT =
+ std::make_unique<BugType>(this, "Tainted data", "General");
public:
void checkPostStmt(const Expr *E, CheckerContext &C) const;
};
}
-inline void TaintTesterChecker::initBugType() const {
- if (!BT)
- BT.reset(new BugType(this, "Tainted data", "General"));
-}
-
void TaintTesterChecker::checkPostStmt(const Expr *E,
CheckerContext &C) const {
ProgramStateRef State = C.getState();
@@ -51,7 +39,6 @@ void TaintTesterChecker::checkPostStmt(const Expr *E,
if (isTainted(State, E, C.getLocationContext())) {
if (ExplodedNode *N = C.generateNonFatalErrorNode()) {
- initBugType();
auto report = std::make_unique<PathSensitiveBugReport>(*BT, "tainted", N);
report->addRange(E->getSourceRange());
C.emitReport(std::move(report));
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TrustReturnsNonnullChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TrustReturnsNonnullChecker.cpp
new file mode 100644
index 000000000000..d80559c6a915
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/TrustReturnsNonnullChecker.cpp
@@ -0,0 +1,60 @@
+//== TrustReturnsNonnullChecker.cpp -- API nullability modeling -*- C++ -*--==//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This checker adds nullability-related assumptions to methods annotated with
+// returns_nonnull attribute.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/AST/Attr.h"
+#include "clang/AST/Decl.h"
+#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
+#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
+
+using namespace clang;
+using namespace ento;
+
+namespace {
+
+class TrustReturnsNonnullChecker : public Checker<check::PostCall> {
+
+public:
+ TrustReturnsNonnullChecker(ASTContext &Ctx) {}
+
+ void checkPostCall(const CallEvent &Call, CheckerContext &C) const {
+ ProgramStateRef State = C.getState();
+
+ if (isNonNullPtr(Call))
+ if (auto L = Call.getReturnValue().getAs<Loc>())
+ State = State->assume(*L, /*assumption=*/true);
+
+ C.addTransition(State);
+ }
+
+private:
+ /// \returns Whether the method declaration has the attribute returns_nonnull.
+ bool isNonNullPtr(const CallEvent &Call) const {
+ QualType ExprRetType = Call.getResultType();
+ const Decl *CallDeclaration = Call.getDecl();
+ if (!ExprRetType->isAnyPointerType() || !CallDeclaration)
+ return false;
+
+ return CallDeclaration->hasAttr<ReturnsNonNullAttr>();
+ }
+};
+
+} // namespace
+
+void ento::registerTrustReturnsNonnullChecker(CheckerManager &Mgr) {
+ Mgr.registerChecker<TrustReturnsNonnullChecker>(Mgr.getASTContext());
+}
+
+bool ento::shouldRegisterTrustReturnsNonnullChecker(const CheckerManager &mgr) {
+ return true;
+}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
index 477d910bc653..2658b473a477 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UndefResultChecker.cpp
@@ -53,8 +53,8 @@ static bool isArrayIndexOutOfBounds(CheckerContext &C, const Expr *Ex) {
DefinedOrUnknownSVal Idx = ER->getIndex().castAs<DefinedOrUnknownSVal>();
DefinedOrUnknownSVal ElementCount = getDynamicElementCount(
state, ER->getSuperRegion(), C.getSValBuilder(), ER->getValueType());
- ProgramStateRef StInBound = state->assumeInBound(Idx, ElementCount, true);
- ProgramStateRef StOutBound = state->assumeInBound(Idx, ElementCount, false);
+ ProgramStateRef StInBound, StOutBound;
+ std::tie(StInBound, StOutBound) = state->assumeInBoundDual(Idx, ElementCount);
return StOutBound && !StInBound;
}
@@ -145,7 +145,7 @@ void UndefResultChecker::checkPostStmt(const BinaryOperator *B,
OS << '\'' << I->getSExtValue() << "\', which is";
OS << " greater or equal to the width of type '"
- << B->getLHS()->getType().getAsString() << "'.";
+ << B->getLHS()->getType() << "'.";
} else if (B->getOpcode() == BinaryOperatorKind::BO_Shl &&
C.isNegative(B->getLHS())) {
OS << "The result of the left shift is undefined because the left "
@@ -162,8 +162,7 @@ void UndefResultChecker::checkPostStmt(const BinaryOperator *B,
OS << "The result of the left shift is undefined due to shifting \'"
<< LHS->getSExtValue() << "\' by \'" << RHS->getZExtValue()
<< "\', which is unrepresentable in the unsigned version of "
- << "the return type \'" << B->getLHS()->getType().getAsString()
- << "\'";
+ << "the return type \'" << B->getLHS()->getType() << "\'";
Ex = B->getLHS();
} else {
OS << "The result of the '"
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObjectChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObjectChecker.cpp
index 4182b51c02b0..38e69e81d800 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObjectChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedObjectChecker.cpp
@@ -330,7 +330,7 @@ bool FindUninitializedFields::isNonUnionUninit(const TypedValueRegion *R,
SVal V = State->getSVal(FieldVal);
- if (isDereferencableType(T) || V.getAs<nonloc::LocAsInteger>()) {
+ if (isDereferencableType(T) || isa<nonloc::LocAsInteger>(V)) {
if (isDereferencableUninit(FR, LocalChain))
ContainsUninitField = true;
continue;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedPointee.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedPointee.cpp
index f0dd0bf813af..a6e81b3657a2 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedPointee.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UninitializedObject/UninitializedPointee.cpp
@@ -141,10 +141,10 @@ bool FindUninitializedFields::isDereferencableUninit(
SVal V = State->getSVal(FR);
assert((isDereferencableType(FR->getDecl()->getType()) ||
- V.getAs<nonloc::LocAsInteger>()) &&
+ isa<nonloc::LocAsInteger>(V)) &&
"This method only checks dereferenceable objects!");
- if (V.isUnknown() || V.getAs<loc::ConcreteInt>()) {
+ if (V.isUnknown() || isa<loc::ConcreteInt>(V)) {
IsAnyFieldInitialized = true;
return false;
}
@@ -230,8 +230,8 @@ static llvm::Optional<DereferenceInfo> dereference(ProgramStateRef State,
// If the static type of the field is a void pointer, or it is a
// nonloc::LocAsInteger, we need to cast it back to the dynamic type before
// dereferencing.
- bool NeedsCastBack = isVoidPointer(FR->getDecl()->getType()) ||
- V.getAs<nonloc::LocAsInteger>();
+ bool NeedsCastBack =
+ isVoidPointer(FR->getDecl()->getType()) || isa<nonloc::LocAsInteger>(V);
// The region we'd like to acquire.
const auto *R = V.getAsRegion()->getAs<TypedValueRegion>();
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
index 41adc36b021a..9da44d5c0d39 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/UnixAPIChecker.cpp
@@ -43,8 +43,6 @@ class UnixAPIMisuseChecker : public Checker< check::PreStmt<CallExpr> > {
mutable Optional<uint64_t> Val_O_CREAT;
public:
- DefaultBool CheckMisuse, CheckPortability;
-
void checkPreStmt(const CallExpr *CE, CheckerContext &C) const;
void CheckOpen(CheckerContext &C, const CallExpr *CE) const;
@@ -213,7 +211,7 @@ void UnixAPIMisuseChecker::CheckOpenVariant(CheckerContext &C,
// The definition of O_CREAT is platform specific. We need a better way
// of querying this information from the checking environment.
- if (!Val_O_CREAT.hasValue()) {
+ if (!Val_O_CREAT) {
if (C.getASTContext().getTargetInfo().getTriple().getVendor()
== llvm::Triple::Apple)
Val_O_CREAT = 0x0200;
@@ -229,7 +227,7 @@ void UnixAPIMisuseChecker::CheckOpenVariant(CheckerContext &C,
// Now check if oflags has O_CREAT set.
const Expr *oflagsEx = CE->getArg(FlagsArgIndex);
const SVal V = C.getSVal(oflagsEx);
- if (!V.getAs<NonLoc>()) {
+ if (!isa<NonLoc>(V)) {
// The case where 'V' can be a location can only be due to a bad header,
// so in this case bail out.
return;
@@ -503,7 +501,7 @@ void UnixAPIPortabilityChecker::checkPreStmt(const CallExpr *CE,
mgr.registerChecker<CHECKERNAME>(); \
} \
\
- bool ento::shouldRegister##CHECKERNAME(const CheckerManager &mgr) { \
+ bool ento::shouldRegister##CHECKERNAME(const CheckerManager &mgr) { \
return true; \
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
index 96501215c689..cf519b085892 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/VLASizeChecker.cpp
@@ -13,9 +13,9 @@
//
//===----------------------------------------------------------------------===//
-#include "Taint.h"
#include "clang/AST/CharUnits.h"
#include "clang/StaticAnalyzer/Checkers/BuiltinCheckerRegistration.h"
+#include "clang/StaticAnalyzer/Checkers/Taint.h"
#include "clang/StaticAnalyzer/Core/BugReporter/BugType.h"
#include "clang/StaticAnalyzer/Core/Checker.h"
#include "clang/StaticAnalyzer/Core/CheckerManager.h"
@@ -278,8 +278,7 @@ void VLASizeChecker::checkPreStmt(const DeclStmt *DS, CheckerContext &C) const {
if (!State)
return;
- auto ArraySizeNL = ArraySize.getAs<NonLoc>();
- if (!ArraySizeNL) {
+ if (!isa<NonLoc>(ArraySize)) {
// Array size could not be determined but state may contain new assumptions.
C.addTransition(State);
return;
@@ -289,7 +288,7 @@ void VLASizeChecker::checkPreStmt(const DeclStmt *DS, CheckerContext &C) const {
if (VD) {
State =
setDynamicExtent(State, State->getRegion(VD, C.getLocationContext()),
- ArraySize.castAs<DefinedOrUnknownSVal>(), SVB);
+ ArraySize.castAs<NonLoc>(), SVB);
}
// Remember our assumptions!
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ValistChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ValistChecker.cpp
index 60da4fca12e6..fbefd5f9ffdc 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ValistChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/ValistChecker.cpp
@@ -46,7 +46,7 @@ public:
CK_NumCheckKinds
};
- DefaultBool ChecksEnabled[CK_NumCheckKinds];
+ bool ChecksEnabled[CK_NumCheckKinds] = {false};
CheckerNameRef CheckNames[CK_NumCheckKinds];
void checkPreStmt(const VAArgExpr *VAA, CheckerContext &C) const;
@@ -178,7 +178,7 @@ const MemRegion *ValistChecker::getVAListAsRegion(SVal SV, const Expr *E,
if (isa<ParmVarDecl>(DeclReg->getDecl()))
Reg = C.getState()->getSVal(SV.castAs<Loc>()).getAsRegion();
}
- IsSymbolic = Reg && Reg->getAs<SymbolicRegion>();
+ IsSymbolic = Reg && Reg->getBaseRegion()->getAs<SymbolicRegion>();
// Some VarRegion based VA lists reach here as ElementRegions.
const auto *EReg = dyn_cast_or_null<ElementRegion>(Reg);
return (EReg && VaListModelledAsArray) ? EReg->getSuperRegion() : Reg;
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/cert/InvalidPtrChecker.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/cert/InvalidPtrChecker.cpp
index d7d573cd2d3b..084789509533 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/cert/InvalidPtrChecker.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Checkers/cert/InvalidPtrChecker.cpp
@@ -82,9 +82,7 @@ public:
REGISTER_SET_WITH_PROGRAMSTATE(InvalidMemoryRegions, const MemRegion *)
// Stores the region of the environment pointer of 'main' (if present).
-// Note: This pointer has type 'const MemRegion *', however the trait is only
-// specialized to 'const void*' and 'void*'
-REGISTER_TRAIT_WITH_PROGRAMSTATE(EnvPtrRegion, const void *)
+REGISTER_TRAIT_WITH_PROGRAMSTATE(EnvPtrRegion, const MemRegion *)
// Stores key-value pairs, where key is function declaration and value is
// pointer to memory region returned by previous call of this function
@@ -95,11 +93,9 @@ void InvalidPtrChecker::EnvpInvalidatingCall(const CallEvent &Call,
CheckerContext &C) const {
StringRef FunctionName = Call.getCalleeIdentifier()->getName();
ProgramStateRef State = C.getState();
- const auto *Reg = State->get<EnvPtrRegion>();
- if (!Reg)
+ const MemRegion *SymbolicEnvPtrRegion = State->get<EnvPtrRegion>();
+ if (!SymbolicEnvPtrRegion)
return;
- const auto *SymbolicEnvPtrRegion =
- reinterpret_cast<const MemRegion *>(const_cast<const void *>(Reg));
State = State->add<InvalidMemoryRegions>(SymbolicEnvPtrRegion);
@@ -132,7 +128,7 @@ void InvalidPtrChecker::postPreviousReturnInvalidatingCall(
return;
Out << '\'';
FD->getNameForDiagnostic(Out, FD->getASTContext().getLangOpts(), true);
- Out << "' call may invalidate the the result of the previous " << '\'';
+ Out << "' call may invalidate the result of the previous " << '\'';
FD->getNameForDiagnostic(Out, FD->getASTContext().getLangOpts(), true);
Out << '\'';
});
@@ -245,9 +241,7 @@ void InvalidPtrChecker::checkBeginFunction(CheckerContext &C) const {
// Save the memory region pointed by the environment pointer parameter of
// 'main'.
- State = State->set<EnvPtrRegion>(
- reinterpret_cast<void *>(const_cast<MemRegion *>(EnvpReg)));
- C.addTransition(State);
+ C.addTransition(State->set<EnvPtrRegion>(EnvpReg));
}
// Check if invalidated region is being dereferenced.
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp
index 8cd7f75e4e38..79d19a3b99f2 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/AnalyzerOptions.cpp
@@ -77,7 +77,18 @@ AnalyzerOptions::getExplorationStrategy() const {
.Case("bfs_block_dfs_contents",
ExplorationStrategyKind::BFSBlockDFSContents)
.Default(None);
- assert(K.hasValue() && "User mode is invalid.");
+ assert(K && "User mode is invalid.");
+ return K.getValue();
+}
+
+CTUPhase1InliningKind AnalyzerOptions::getCTUPhase1Inlining() const {
+ auto K = llvm::StringSwitch<llvm::Optional<CTUPhase1InliningKind>>(
+ CTUPhase1InliningMode)
+ .Case("none", CTUPhase1InliningKind::None)
+ .Case("small", CTUPhase1InliningKind::Small)
+ .Case("all", CTUPhase1InliningKind::All)
+ .Default(None);
+ assert(K && "CTU inlining mode is invalid.");
return K.getValue();
}
@@ -89,7 +100,7 @@ IPAKind AnalyzerOptions::getIPAMode() const {
.Case("dynamic", IPAK_DynamicDispatch)
.Case("dynamic-bifurcate", IPAK_DynamicDispatchBifurcate)
.Default(None);
- assert(K.hasValue() && "IPA Mode is invalid.");
+ assert(K && "IPA Mode is invalid.");
return K.getValue();
}
@@ -109,7 +120,7 @@ AnalyzerOptions::mayInlineCXXMemberFunction(
.Case("none", CIMK_None)
.Default(None);
- assert(K.hasValue() && "Invalid c++ member function inlining mode.");
+ assert(K && "Invalid c++ member function inlining mode.");
return *K >= Param;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporter.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
index 771ed2578f6d..a2efe14f1045 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporter.cpp
@@ -1883,7 +1883,7 @@ static bool optimizeEdges(const PathDiagnosticConstruct &C, PathPieces &path,
lexicalContains(PM, s1Start, s1End)) {
SourceRange EdgeRange(PieceI->getEndLocation().asLocation(),
PieceI->getStartLocation().asLocation());
- if (!getLengthOnSingleLine(SM, EdgeRange).hasValue())
+ if (!getLengthOnSingleLine(SM, EdgeRange))
removeEdge = true;
}
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
index e13387fb1fc8..5b72c91ccd74 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/BugReporterVisitors.cpp
@@ -82,6 +82,10 @@ static const Expr *peelOffPointerArithmetic(const BinaryOperator *B) {
return nullptr;
}
+/// \return A subexpression of @c Ex which represents the
+/// expression-of-interest.
+static const Expr *peelOffOuterExpr(const Expr *Ex, const ExplodedNode *N);
+
/// Given that expression S represents a pointer that would be dereferenced,
/// try to find a sub-expression from which the pointer came from.
/// This is used for tracking down origins of a null or undefined value:
@@ -254,7 +258,7 @@ static bool isVarAnInterestingCondition(const Expr *CondVarExpr,
static bool isInterestingExpr(const Expr *E, const ExplodedNode *N,
const PathSensitiveBugReport *B) {
if (Optional<SVal> V = getSValForVar(E, N))
- return B->getInterestingnessKind(*V).hasValue();
+ return B->getInterestingnessKind(*V).has_value();
return false;
}
@@ -523,11 +527,6 @@ public:
ID.AddPointer(RegionOfInterest);
}
- void *getTag() const {
- static int Tag = 0;
- return static_cast<void *>(&Tag);
- }
-
private:
/// \return Whether \c RegionOfInterest was modified at \p CurrN compared to
/// the value it holds in \p CallExitBeginN.
@@ -915,7 +914,7 @@ public:
const SVal V) {
AnalyzerOptions &Options = N->getState()->getAnalysisManager().options;
if (EnableNullFPSuppression && Options.ShouldSuppressNullReturnPaths &&
- V.getAs<Loc>())
+ isa<Loc>(V))
BR.addVisitor<MacroNullReturnSuppressionVisitor>(R->getAs<SubRegion>(),
V);
}
@@ -1031,14 +1030,13 @@ public:
if (RetE->isGLValue()) {
if ((LValue = V.getAs<Loc>())) {
SVal RValue = State->getRawSVal(*LValue, RetE->getType());
- if (RValue.getAs<DefinedSVal>())
+ if (isa<DefinedSVal>(RValue))
V = RValue;
}
}
// Ignore aggregate rvalues.
- if (V.getAs<nonloc::LazyCompoundVal>() ||
- V.getAs<nonloc::CompoundVal>())
+ if (isa<nonloc::LazyCompoundVal, nonloc::CompoundVal>(V))
return nullptr;
RetE = RetE->IgnoreParenCasts();
@@ -1053,7 +1051,7 @@ public:
bool WouldEventBeMeaningless = false;
if (State->isNull(V).isConstrainedTrue()) {
- if (V.getAs<Loc>()) {
+ if (isa<Loc>(V)) {
// If we have counter-suppression enabled, make sure we keep visiting
// future nodes. We want to emit a path note as well, in case
@@ -1083,10 +1081,7 @@ public:
if (N->getCFG().size() == 3)
WouldEventBeMeaningless = true;
- if (V.getAs<Loc>())
- Out << "Returning pointer";
- else
- Out << "Returning value";
+ Out << (isa<Loc>(V) ? "Returning pointer" : "Returning value");
}
}
@@ -1309,7 +1304,7 @@ static void showBRDiagnostics(llvm::raw_svector_ostream &OS, StoreInfo SI) {
llvm_unreachable("Unexpected store kind");
}
- if (SI.Value.getAs<loc::ConcreteInt>()) {
+ if (isa<loc::ConcreteInt>(SI.Value)) {
OS << Action << (isObjCPointer(SI.Dest) ? "nil" : "a null pointer value");
} else if (auto CVal = SI.Value.getAs<nonloc::ConcreteInt>()) {
@@ -1352,7 +1347,7 @@ static void showBRParamDiagnostics(llvm::raw_svector_ostream &OS,
OS << "Passing ";
- if (SI.Value.getAs<loc::ConcreteInt>()) {
+ if (isa<loc::ConcreteInt>(SI.Value)) {
OS << (isObjCPointer(Param) ? "nil object reference"
: "null pointer value");
@@ -1383,7 +1378,7 @@ static void showBRDefaultDiagnostics(llvm::raw_svector_ostream &OS,
StoreInfo SI) {
const bool HasSuffix = SI.Dest->canPrintPretty();
- if (SI.Value.getAs<loc::ConcreteInt>()) {
+ if (isa<loc::ConcreteInt>(SI.Value)) {
OS << (isObjCPointer(SI.Dest) ? "nil object reference stored"
: (HasSuffix ? "Null pointer value stored"
: "Storing null pointer value"));
@@ -1681,7 +1676,7 @@ PathDiagnosticPieceRef TrackConstraintBRVisitor::VisitNode(
SmallString<64> sbuf;
llvm::raw_svector_ostream os(sbuf);
- if (Constraint.getAs<Loc>()) {
+ if (isa<Loc>(Constraint)) {
os << "Assuming pointer value is ";
os << (Assumption ? "non-null" : "null");
}
@@ -1691,6 +1686,13 @@ PathDiagnosticPieceRef TrackConstraintBRVisitor::VisitNode(
// Construct a new PathDiagnosticPiece.
ProgramPoint P = N->getLocation();
+
+ // If this node already have a specialized note, it's probably better
+ // than our generic note.
+ // FIXME: This only looks for note tags, not for other ways to add a note.
+ if (isa_and_nonnull<NoteTag>(P.getTag()))
+ return nullptr;
+
PathDiagnosticLocation L =
PathDiagnosticLocation::create(P, BRC.getSourceManager());
if (!L.isValid())
@@ -1919,11 +1921,33 @@ TrackControlDependencyCondBRVisitor::VisitNode(const ExplodedNode *N,
return nullptr;
if (const Expr *Condition = NB->getLastCondition()) {
+
+ // If we can't retrieve a sensible condition, just bail out.
+ const Expr *InnerExpr = peelOffOuterExpr(Condition, N);
+ if (!InnerExpr)
+ return nullptr;
+
+ // If the condition was a function call, we likely won't gain much from
+ // tracking it either. Evidence suggests that it will mostly trigger in
+ // scenarios like this:
+ //
+ // void f(int *x) {
+ // x = nullptr;
+ // if (alwaysTrue()) // We don't need a whole lot of explanation
+ // // here, the function name is good enough.
+ // *x = 5;
+ // }
+ //
+ // Its easy to create a counterexample where this heuristic would make us
+ // lose valuable information, but we've never really seen one in practice.
+ if (isa<CallExpr>(InnerExpr))
+ return nullptr;
+
// Keeping track of the already tracked conditions on a visitor level
// isn't sufficient, because a new visitor is created for each tracked
// expression, hence the BugReport level set.
if (BR.addTrackedCondition(N)) {
- getParentTracker().track(Condition, N,
+ getParentTracker().track(InnerExpr, N,
{bugreporter::TrackingKind::Condition,
/*EnableNullFPSuppression=*/false});
return constructDebugPieceForTrackedCondition(Condition, N, BRC);
@@ -1938,10 +1962,8 @@ TrackControlDependencyCondBRVisitor::VisitNode(const ExplodedNode *N,
// Implementation of trackExpressionValue.
//===----------------------------------------------------------------------===//
-/// \return A subexpression of @c Ex which represents the
-/// expression-of-interest.
-static const Expr *peelOffOuterExpr(const Expr *Ex,
- const ExplodedNode *N) {
+static const Expr *peelOffOuterExpr(const Expr *Ex, const ExplodedNode *N) {
+
Ex = Ex->IgnoreParenCasts();
if (const auto *FE = dyn_cast<FullExpr>(Ex))
return peelOffOuterExpr(FE->getSubExpr(), N);
@@ -2333,7 +2355,7 @@ public:
// well. Try to use the correct type when looking up the value.
SVal RVal;
if (ExplodedGraph::isInterestingLValueExpr(Inner))
- RVal = LVState->getRawSVal(L.getValue(), Inner->getType());
+ RVal = LVState->getRawSVal(*L, Inner->getType());
else if (CanDereference)
RVal = LVState->getSVal(L->getRegion());
@@ -2927,7 +2949,7 @@ PathDiagnosticPieceRef ConditionBRVisitor::VisitTrueTest(
PathDiagnosticLocation Loc(Cond, SM, LCtx);
auto event = std::make_shared<PathDiagnosticEventPiece>(Loc, Message);
- if (shouldPrune.hasValue())
+ if (shouldPrune)
event->setPrunable(shouldPrune.getValue());
return event;
}
@@ -3055,7 +3077,7 @@ bool ConditionBRVisitor::printValue(const Expr *CondVarExpr, raw_ostream &Out,
if (!IsAssuming)
IntValue = getConcreteIntegerValue(CondVarExpr, N);
- if (IsAssuming || !IntValue.hasValue()) {
+ if (IsAssuming || !IntValue) {
if (Ty->isBooleanType())
Out << (TookTrue ? "true" : "false");
else
@@ -3257,7 +3279,7 @@ void FalsePositiveRefutationBRVisitor::finalizeVisitor(
// And check for satisfiability
Optional<bool> IsSAT = RefutationSolver->check();
- if (!IsSAT.hasValue())
+ if (!IsSAT)
return;
if (!IsSAT.getValue())
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallDescription.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallDescription.cpp
index 810fe365d021..bb8b7492e248 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallDescription.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallDescription.cpp
@@ -13,6 +13,7 @@
//===----------------------------------------------------------------------===//
#include "clang/StaticAnalyzer/Core/PathSensitive/CallDescription.h"
+#include "clang/AST/Decl.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CallEvent.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/CheckerContext.h"
#include "llvm/ADT/ArrayRef.h"
@@ -61,22 +62,39 @@ bool ento::CallDescription::matches(const CallEvent &Call) const {
if (!FD)
return false;
+ return matchesImpl(FD, Call.getNumArgs(), Call.parameters().size());
+}
+
+bool ento::CallDescription::matchesAsWritten(const CallExpr &CE) const {
+ const auto *FD = dyn_cast_or_null<FunctionDecl>(CE.getCalleeDecl());
+ if (!FD)
+ return false;
+
+ return matchesImpl(FD, CE.getNumArgs(), FD->param_size());
+}
+
+bool ento::CallDescription::matchesImpl(const FunctionDecl *Callee,
+ size_t ArgCount,
+ size_t ParamCount) const {
+ const auto *FD = Callee;
+ if (!FD)
+ return false;
+
if (Flags & CDF_MaybeBuiltin) {
return CheckerContext::isCLibraryFunction(FD, getFunctionName()) &&
- (!RequiredArgs || *RequiredArgs <= Call.getNumArgs()) &&
- (!RequiredParams || *RequiredParams <= Call.parameters().size());
+ (!RequiredArgs || *RequiredArgs <= ArgCount) &&
+ (!RequiredParams || *RequiredParams <= ParamCount);
}
- if (!II.hasValue()) {
- II = &Call.getState()->getStateManager().getContext().Idents.get(
- getFunctionName());
+ if (!II) {
+ II = &FD->getASTContext().Idents.get(getFunctionName());
}
const auto MatchNameOnly = [](const CallDescription &CD,
const NamedDecl *ND) -> bool {
DeclarationName Name = ND->getDeclName();
if (const auto *II = Name.getAsIdentifierInfo())
- return II == CD.II.getValue(); // Fast case.
+ return II == *CD.II; // Fast case.
// Fallback to the slow stringification and comparison for:
// C++ overloaded operators, constructors, destructors, etc.
@@ -86,11 +104,11 @@ bool ento::CallDescription::matches(const CallEvent &Call) const {
};
const auto ExactMatchArgAndParamCounts =
- [](const CallEvent &Call, const CallDescription &CD) -> bool {
- const bool ArgsMatch =
- !CD.RequiredArgs || *CD.RequiredArgs == Call.getNumArgs();
+ [](size_t ArgCount, size_t ParamCount,
+ const CallDescription &CD) -> bool {
+ const bool ArgsMatch = !CD.RequiredArgs || *CD.RequiredArgs == ArgCount;
const bool ParamsMatch =
- !CD.RequiredParams || *CD.RequiredParams == Call.parameters().size();
+ !CD.RequiredParams || *CD.RequiredParams == ParamCount;
return ArgsMatch && ParamsMatch;
};
@@ -122,7 +140,7 @@ bool ento::CallDescription::matches(const CallEvent &Call) const {
};
// Let's start matching...
- if (!ExactMatchArgAndParamCounts(Call, *this))
+ if (!ExactMatchArgAndParamCounts(ArgCount, ParamCount, *this))
return false;
if (!MatchNameOnly(*this, FD))
@@ -144,3 +162,7 @@ ento::CallDescriptionSet::CallDescriptionSet(
bool ento::CallDescriptionSet::contains(const CallEvent &Call) const {
return static_cast<bool>(Impl.lookup(Call));
}
+
+bool ento::CallDescriptionSet::containsAsWritten(const CallExpr &CE) const {
+ return static_cast<bool>(Impl.lookupAsWritten(CE));
+}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallEvent.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
index ae46709340d3..3a8d69df7a64 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CallEvent.cpp
@@ -515,20 +515,28 @@ RuntimeDefinition AnyFunctionCall::getRuntimeDefinition() const {
llvm::dbgs() << "Using autosynthesized body for " << FD->getName()
<< "\n";
});
- if (Body) {
- const Decl* Decl = AD->getDecl();
- return RuntimeDefinition(Decl);
- }
ExprEngine &Engine = getState()->getStateManager().getOwningEngine();
+ cross_tu::CrossTranslationUnitContext &CTUCtx =
+ *Engine.getCrossTranslationUnitContext();
+
AnalyzerOptions &Opts = Engine.getAnalysisManager().options;
+ if (Body) {
+ const Decl* Decl = AD->getDecl();
+ if (Opts.IsNaiveCTUEnabled && CTUCtx.isImportedAsNew(Decl)) {
+ // A newly created definition, but we had error(s) during the import.
+ if (CTUCtx.hasError(Decl))
+ return {};
+ return RuntimeDefinition(Decl, /*Foreign=*/true);
+ }
+ return RuntimeDefinition(Decl, /*Foreign=*/false);
+ }
+
// Try to get CTU definition only if CTUDir is provided.
if (!Opts.IsNaiveCTUEnabled)
return {};
- cross_tu::CrossTranslationUnitContext &CTUCtx =
- *Engine.getCrossTranslationUnitContext();
llvm::Expected<const FunctionDecl *> CTUDeclOrError =
CTUCtx.getCrossTUDefinition(FD, Opts.CTUDir, Opts.CTUIndexName,
Opts.DisplayCTUProgress);
@@ -541,7 +549,7 @@ RuntimeDefinition AnyFunctionCall::getRuntimeDefinition() const {
return {};
}
- return RuntimeDefinition(*CTUDeclOrError);
+ return RuntimeDefinition(*CTUDeclOrError, /*Foreign=*/true);
}
void AnyFunctionCall::getInitialStackFrameContents(
@@ -672,7 +680,7 @@ SVal CXXInstanceCall::getCXXThisVal() const {
return UnknownVal();
SVal ThisVal = getSVal(Base);
- assert(ThisVal.isUnknownOrUndef() || ThisVal.getAs<Loc>());
+ assert(ThisVal.isUnknownOrUndef() || isa<Loc>(ThisVal));
return ThisVal;
}
@@ -764,7 +772,7 @@ void CXXInstanceCall::getInitialStackFrameContents(
// FIXME: CallEvent maybe shouldn't be directly accessing StoreManager.
Optional<SVal> V =
StateMgr.getStoreManager().evalBaseToDerived(ThisVal, Ty);
- if (!V.hasValue()) {
+ if (!V) {
// We might have suffered some sort of placement new earlier, so
// we're constructing in a completely unexpected storage.
// Fall back to a generic pointer cast for this-value.
@@ -1184,7 +1192,7 @@ lookupRuntimeDefinition(const ObjCInterfaceDecl *Interface,
PMC[{Interface, LookupSelector, InstanceMethod}];
// Query lookupPrivateMethod() if the cache does not hit.
- if (!Val.hasValue()) {
+ if (!Val) {
Val = Interface->lookupPrivateMethod(LookupSelector, InstanceMethod);
if (!*Val) {
@@ -1193,7 +1201,7 @@ lookupRuntimeDefinition(const ObjCInterfaceDecl *Interface,
}
}
- return Val.getValue();
+ return *Val;
}
RuntimeDefinition ObjCMethodCall::getRuntimeDefinition() const {
@@ -1398,7 +1406,7 @@ CallEventManager::getCaller(const StackFrameContext *CalleeCtx,
Trigger = Dtor->getBody();
return getCXXDestructorCall(Dtor, Trigger, ThisVal.getAsRegion(),
- E.getAs<CFGBaseDtor>().hasValue(), State,
+ E.getAs<CFGBaseDtor>().has_value(), State,
CallerCtx);
}
@@ -1408,6 +1416,8 @@ CallEventRef<> CallEventManager::getCall(const Stmt *S, ProgramStateRef State,
return getSimpleCall(CE, State, LC);
} else if (const auto *NE = dyn_cast<CXXNewExpr>(S)) {
return getCXXAllocatorCall(NE, State, LC);
+ } else if (const auto *DE = dyn_cast<CXXDeleteExpr>(S)) {
+ return getCXXDeallocatorCall(DE, State, LC);
} else if (const auto *ME = dyn_cast<ObjCMessageExpr>(S)) {
return getObjCMethodCall(ME, State, LC);
} else {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp
index 4c684c3ffd9b..1e2532d27633 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CheckerContext.cpp
@@ -55,8 +55,29 @@ bool CheckerContext::isCLibraryFunction(const FunctionDecl *FD,
if (Name.empty())
return true;
StringRef BName = FD->getASTContext().BuiltinInfo.getName(BId);
- if (BName.contains(Name))
- return true;
+ size_t start = BName.find(Name);
+ if (start != StringRef::npos) {
+ // Accept exact match.
+ if (BName.size() == Name.size())
+ return true;
+
+ // v-- match starts here
+ // ...xxxxx...
+ // _xxxxx_
+ // ^ ^ lookbehind and lookahead characters
+
+ const auto MatchPredecessor = [=]() -> bool {
+ return start <= 0 || !llvm::isAlpha(BName[start - 1]);
+ };
+ const auto MatchSuccessor = [=]() -> bool {
+ std::size_t LookbehindPlace = start + Name.size();
+ return LookbehindPlace >= BName.size() ||
+ !llvm::isAlpha(BName[LookbehindPlace]);
+ };
+
+ if (MatchPredecessor() && MatchSuccessor())
+ return true;
+ }
}
const IdentifierInfo *II = FD->getIdentifier();
@@ -106,10 +127,10 @@ static bool evalComparison(SVal LHSVal, BinaryOperatorKind ComparisonOp,
if (LHSVal.isUnknownOrUndef())
return false;
ProgramStateManager &Mgr = State->getStateManager();
- if (!LHSVal.getAs<NonLoc>()) {
+ if (!isa<NonLoc>(LHSVal)) {
LHSVal = Mgr.getStoreManager().getBinding(State->getStore(),
LHSVal.castAs<Loc>());
- if (LHSVal.isUnknownOrUndef() || !LHSVal.getAs<NonLoc>())
+ if (LHSVal.isUnknownOrUndef() || !isa<NonLoc>(LHSVal))
return false;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ConstraintManager.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ConstraintManager.cpp
index d642c3530268..9ef3455a110a 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ConstraintManager.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ConstraintManager.cpp
@@ -16,6 +16,7 @@
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/ProgramState_Fwd.h"
#include "clang/StaticAnalyzer/Core/PathSensitive/SVals.h"
+#include "llvm/ADT/ScopeExit.h"
using namespace clang;
using namespace ento;
@@ -41,3 +42,82 @@ ConditionTruthVal ConstraintManager::checkNull(ProgramStateRef State,
return ConditionTruthVal(true);
return {};
}
+
+template <typename AssumeFunction>
+ConstraintManager::ProgramStatePair
+ConstraintManager::assumeDualImpl(ProgramStateRef &State,
+ AssumeFunction &Assume) {
+ if (LLVM_UNLIKELY(State->isPosteriorlyOverconstrained()))
+ return {State, State};
+
+ // Assume functions might recurse (see `reAssume` or `tryRearrange`). During
+ // the recursion the State might not change anymore, that means we reached a
+ // fixpoint.
+ // We avoid infinite recursion of assume calls by checking already visited
+ // States on the stack of assume function calls.
+ const ProgramState *RawSt = State.get();
+ if (LLVM_UNLIKELY(AssumeStack.contains(RawSt)))
+ return {State, State};
+ AssumeStack.push(RawSt);
+ auto AssumeStackBuilder =
+ llvm::make_scope_exit([this]() { AssumeStack.pop(); });
+
+ ProgramStateRef StTrue = Assume(true);
+
+ if (!StTrue) {
+ ProgramStateRef StFalse = Assume(false);
+ if (LLVM_UNLIKELY(!StFalse)) { // both infeasible
+ ProgramStateRef StInfeasible = State->cloneAsPosteriorlyOverconstrained();
+ assert(StInfeasible->isPosteriorlyOverconstrained());
+ // Checkers might rely on the API contract that both returned states
+ // cannot be null. Thus, we return StInfeasible for both branches because
+ // it might happen that a Checker uncoditionally uses one of them if the
+ // other is a nullptr. This may also happen with the non-dual and
+ // adjacent `assume(true)` and `assume(false)` calls. By implementing
+ // assume in therms of assumeDual, we can keep our API contract there as
+ // well.
+ return ProgramStatePair(StInfeasible, StInfeasible);
+ }
+ return ProgramStatePair(nullptr, StFalse);
+ }
+
+ ProgramStateRef StFalse = Assume(false);
+ if (!StFalse) {
+ return ProgramStatePair(StTrue, nullptr);
+ }
+
+ return ProgramStatePair(StTrue, StFalse);
+}
+
+ConstraintManager::ProgramStatePair
+ConstraintManager::assumeDual(ProgramStateRef State, DefinedSVal Cond) {
+ auto AssumeFun = [&](bool Assumption) {
+ return assumeInternal(State, Cond, Assumption);
+ };
+ return assumeDualImpl(State, AssumeFun);
+}
+
+ConstraintManager::ProgramStatePair
+ConstraintManager::assumeInclusiveRangeDual(ProgramStateRef State, NonLoc Value,
+ const llvm::APSInt &From,
+ const llvm::APSInt &To) {
+ auto AssumeFun = [&](bool Assumption) {
+ return assumeInclusiveRangeInternal(State, Value, From, To, Assumption);
+ };
+ return assumeDualImpl(State, AssumeFun);
+}
+
+ProgramStateRef ConstraintManager::assume(ProgramStateRef State,
+ DefinedSVal Cond, bool Assumption) {
+ ConstraintManager::ProgramStatePair R = assumeDual(State, Cond);
+ return Assumption ? R.first : R.second;
+}
+
+ProgramStateRef
+ConstraintManager::assumeInclusiveRange(ProgramStateRef State, NonLoc Value,
+ const llvm::APSInt &From,
+ const llvm::APSInt &To, bool InBound) {
+ ConstraintManager::ProgramStatePair R =
+ assumeInclusiveRangeDual(State, Value, From, To);
+ return InBound ? R.first : R.second;
+}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp
index d57bab154b61..de90f4a71be0 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/CoreEngine.cpp
@@ -43,6 +43,8 @@ using namespace ento;
STATISTIC(NumSteps,
"The # of steps executed.");
+STATISTIC(NumSTUSteps, "The # of STU steps executed.");
+STATISTIC(NumCTUSteps, "The # of CTU steps executed.");
STATISTIC(NumReachedMaxSteps,
"The # of times we reached the max number of steps.");
STATISTIC(NumPathsExplored,
@@ -73,11 +75,18 @@ static std::unique_ptr<WorkList> generateWorkList(AnalyzerOptions &Opts) {
CoreEngine::CoreEngine(ExprEngine &exprengine, FunctionSummariesTy *FS,
AnalyzerOptions &Opts)
: ExprEng(exprengine), WList(generateWorkList(Opts)),
+ CTUWList(Opts.IsNaiveCTUEnabled ? generateWorkList(Opts) : nullptr),
BCounterFactory(G.getAllocator()), FunctionSummaries(FS) {}
+void CoreEngine::setBlockCounter(BlockCounter C) {
+ WList->setBlockCounter(C);
+ if (CTUWList)
+ CTUWList->setBlockCounter(C);
+}
+
/// ExecuteWorkList - Run the worklist algorithm for a maximum number of steps.
-bool CoreEngine::ExecuteWorkList(const LocationContext *L, unsigned Steps,
- ProgramStateRef InitState) {
+bool CoreEngine::ExecuteWorkList(const LocationContext *L, unsigned MaxSteps,
+ ProgramStateRef InitState) {
if (G.num_roots() == 0) { // Initialize the analysis by constructing
// the root if none exists.
@@ -100,7 +109,7 @@ bool CoreEngine::ExecuteWorkList(const LocationContext *L, unsigned Steps,
BlockEdge StartLoc(Entry, Succ, L);
// Set the current block counter to being empty.
- WList->setBlockCounter(BCounterFactory.GetEmptyCounter());
+ setBlockCounter(BCounterFactory.GetEmptyCounter());
if (!InitState)
InitState = ExprEng.getInitialState(L);
@@ -118,34 +127,54 @@ bool CoreEngine::ExecuteWorkList(const LocationContext *L, unsigned Steps,
}
// Check if we have a steps limit
- bool UnlimitedSteps = Steps == 0;
+ bool UnlimitedSteps = MaxSteps == 0;
+
// Cap our pre-reservation in the event that the user specifies
// a very large number of maximum steps.
const unsigned PreReservationCap = 4000000;
if(!UnlimitedSteps)
- G.reserve(std::min(Steps,PreReservationCap));
-
- while (WList->hasWork()) {
- if (!UnlimitedSteps) {
- if (Steps == 0) {
- NumReachedMaxSteps++;
- break;
+ G.reserve(std::min(MaxSteps, PreReservationCap));
+
+ auto ProcessWList = [this, UnlimitedSteps](unsigned MaxSteps) {
+ unsigned Steps = MaxSteps;
+ while (WList->hasWork()) {
+ if (!UnlimitedSteps) {
+ if (Steps == 0) {
+ NumReachedMaxSteps++;
+ break;
+ }
+ --Steps;
}
- --Steps;
- }
- NumSteps++;
+ NumSteps++;
- const WorkListUnit& WU = WList->dequeue();
+ const WorkListUnit &WU = WList->dequeue();
- // Set the current block counter.
- WList->setBlockCounter(WU.getBlockCounter());
+ // Set the current block counter.
+ setBlockCounter(WU.getBlockCounter());
- // Retrieve the node.
- ExplodedNode *Node = WU.getNode();
+ // Retrieve the node.
+ ExplodedNode *Node = WU.getNode();
- dispatchWorkItem(Node, Node->getLocation(), WU);
+ dispatchWorkItem(Node, Node->getLocation(), WU);
+ }
+ return MaxSteps - Steps;
+ };
+ const unsigned STUSteps = ProcessWList(MaxSteps);
+
+ if (CTUWList) {
+ NumSTUSteps += STUSteps;
+ const unsigned MinCTUSteps =
+ this->ExprEng.getAnalysisManager().options.CTUMaxNodesMin;
+ const unsigned Pct =
+ this->ExprEng.getAnalysisManager().options.CTUMaxNodesPercentage;
+ unsigned MaxCTUSteps = std::max(STUSteps * Pct / 100, MinCTUSteps);
+
+ WList = std::move(CTUWList);
+ const unsigned CTUSteps = ProcessWList(MaxCTUSteps);
+ NumCTUSteps += CTUSteps;
}
+
ExprEng.processEndWorklist();
return WList->hasWork();
}
@@ -282,7 +311,7 @@ void CoreEngine::HandleBlockEntrance(const BlockEntrance &L,
BlockCounter Counter = WList->getBlockCounter();
Counter = BCounterFactory.IncrementCount(Counter, LC->getStackFrame(),
BlockId);
- WList->setBlockCounter(Counter);
+ setBlockCounter(Counter);
// Process the entrance of the block.
if (Optional<CFGElement> E = L.getFirstElement()) {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/DynamicType.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/DynamicType.cpp
index 9ed915aafcab..06052cb99fd1 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/DynamicType.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/DynamicType.cpp
@@ -209,7 +209,7 @@ static raw_ostream &printJson(const DynamicTypeInfo &DTI, raw_ostream &Out,
if (ToPrint->isAnyPointerType())
ToPrint = ToPrint->getPointeeType();
- Out << '\"' << ToPrint.getAsString() << "\", \"sub_classable\": "
+ Out << '\"' << ToPrint << "\", \"sub_classable\": "
<< (DTI.canBeASubClass() ? "true" : "false");
}
return Out;
@@ -217,9 +217,9 @@ static raw_ostream &printJson(const DynamicTypeInfo &DTI, raw_ostream &Out,
static raw_ostream &printJson(const DynamicCastInfo &DCI, raw_ostream &Out,
const char *NL, unsigned int Space, bool IsDot) {
- return Out << "\"from\": \"" << DCI.from().getAsString() << "\", \"to\": \""
- << DCI.to().getAsString() << "\", \"kind\": \""
- << (DCI.succeeds() ? "success" : "fail") << "\"";
+ return Out << "\"from\": \"" << DCI.from() << "\", \"to\": \"" << DCI.to()
+ << "\", \"kind\": \"" << (DCI.succeeds() ? "success" : "fail")
+ << "\"";
}
template <class T, class U>
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Environment.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Environment.cpp
index 64e915a09ceb..719793fa224c 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Environment.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Environment.cpp
@@ -118,7 +118,7 @@ SVal Environment::getSVal(const EnvironmentEntry &Entry,
case Stmt::SizeOfPackExprClass:
case Stmt::PredefinedExprClass:
// Known constants; defer to SValBuilder.
- return svalBuilder.getConstantVal(cast<Expr>(S)).getValue();
+ return *svalBuilder.getConstantVal(cast<Expr>(S));
case Stmt::ReturnStmtClass: {
const auto *RS = cast<ReturnStmt>(S);
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
index a170ef3885b2..45af22de50ae 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngine.cpp
@@ -7,7 +7,7 @@
//===----------------------------------------------------------------------===//
//
// This file defines a meta-engine for path-sensitive dataflow analysis that
-// is built on GREngine, but provides the boilerplate to execute transfer
+// is built on CoreEngine, but provides the boilerplate to execute transfer
// functions and build the ExplodedGraph at the expression level.
//
//===----------------------------------------------------------------------===//
@@ -118,18 +118,10 @@ namespace {
/// the construction context was present and contained references to these
/// AST nodes.
class ConstructedObjectKey {
- typedef std::pair<ConstructionContextItem, const LocationContext *>
- ConstructedObjectKeyImpl;
-
+ using ConstructedObjectKeyImpl =
+ std::pair<ConstructionContextItem, const LocationContext *>;
const ConstructedObjectKeyImpl Impl;
- const void *getAnyASTNodePtr() const {
- if (const Stmt *S = getItem().getStmtOrNull())
- return S;
- else
- return getItem().getCXXCtorInitializer();
- }
-
public:
explicit ConstructedObjectKey(const ConstructionContextItem &Item,
const LocationContext *LC)
@@ -200,24 +192,17 @@ REGISTER_TRAIT_WITH_PROGRAMSTATE(ObjectsUnderConstruction,
static const char* TagProviderName = "ExprEngine";
ExprEngine::ExprEngine(cross_tu::CrossTranslationUnitContext &CTU,
- AnalysisManager &mgr,
- SetOfConstDecls *VisitedCalleesIn,
- FunctionSummariesTy *FS,
- InliningModes HowToInlineIn)
- : CTU(CTU), AMgr(mgr),
- AnalysisDeclContexts(mgr.getAnalysisDeclContextManager()),
+ AnalysisManager &mgr, SetOfConstDecls *VisitedCalleesIn,
+ FunctionSummariesTy *FS, InliningModes HowToInlineIn)
+ : CTU(CTU), IsCTUEnabled(mgr.getAnalyzerOptions().IsNaiveCTUEnabled),
+ AMgr(mgr), AnalysisDeclContexts(mgr.getAnalysisDeclContextManager()),
Engine(*this, FS, mgr.getAnalyzerOptions()), G(Engine.getGraph()),
StateMgr(getContext(), mgr.getStoreManagerCreator(),
- mgr.getConstraintManagerCreator(), G.getAllocator(),
- this),
- SymMgr(StateMgr.getSymbolManager()),
- MRMgr(StateMgr.getRegionManager()),
- svalBuilder(StateMgr.getSValBuilder()),
- ObjCNoRet(mgr.getASTContext()),
- BR(mgr, *this),
- VisitedCallees(VisitedCalleesIn),
- HowToInline(HowToInlineIn)
- {
+ mgr.getConstraintManagerCreator(), G.getAllocator(), this),
+ SymMgr(StateMgr.getSymbolManager()), MRMgr(StateMgr.getRegionManager()),
+ svalBuilder(StateMgr.getSValBuilder()), ObjCNoRet(mgr.getASTContext()),
+ BR(mgr, *this), VisitedCallees(VisitedCalleesIn),
+ HowToInline(HowToInlineIn) {
unsigned TrimInterval = mgr.options.GraphTrimInterval;
if (TrimInterval != 0) {
// Enable eager node reclamation when constructing the ExplodedGraph.
@@ -319,7 +304,7 @@ ProgramStateRef ExprEngine::createTemporaryRegionIfNeeded(
if (!Result) {
// If we don't have an explicit result expression, we're in "if needed"
// mode. Only create a region if the current value is a NonLoc.
- if (!InitValWithAdjustments.getAs<NonLoc>()) {
+ if (!isa<NonLoc>(InitValWithAdjustments)) {
if (OutRegionWithAdjustments)
*OutRegionWithAdjustments = nullptr;
return State;
@@ -328,7 +313,7 @@ ProgramStateRef ExprEngine::createTemporaryRegionIfNeeded(
} else {
// We need to create a region no matter what. Make sure we don't try to
// stuff a Loc into a non-pointer temporary region.
- assert(!InitValWithAdjustments.getAs<Loc>() ||
+ assert(!isa<Loc>(InitValWithAdjustments) ||
Loc::isLocType(Result->getType()) ||
Result->getType()->isMemberPointerType());
}
@@ -620,6 +605,8 @@ void ExprEngine::printJson(raw_ostream &Out, ProgramStateRef State,
}
void ExprEngine::processEndWorklist() {
+ // This prints the name of the top-level function if we crash.
+ PrettyStackTraceLocationContext CrashInfo(getRootLocationContext());
getCheckerManager().runCheckersForEndAnalysis(G, BR, *this);
}
@@ -1251,6 +1238,7 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::OMPParallelForSimdDirectiveClass:
case Stmt::OMPParallelSectionsDirectiveClass:
case Stmt::OMPParallelMasterDirectiveClass:
+ case Stmt::OMPParallelMaskedDirectiveClass:
case Stmt::OMPTaskDirectiveClass:
case Stmt::OMPTaskyieldDirectiveClass:
case Stmt::OMPBarrierDirectiveClass:
@@ -1274,9 +1262,13 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::OMPTaskLoopDirectiveClass:
case Stmt::OMPTaskLoopSimdDirectiveClass:
case Stmt::OMPMasterTaskLoopDirectiveClass:
+ case Stmt::OMPMaskedTaskLoopDirectiveClass:
case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
+ case Stmt::OMPMaskedTaskLoopSimdDirectiveClass:
case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
+ case Stmt::OMPParallelMaskedTaskLoopDirectiveClass:
case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
+ case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass:
case Stmt::OMPDistributeDirectiveClass:
case Stmt::OMPDistributeParallelForDirectiveClass:
case Stmt::OMPDistributeParallelForSimdDirectiveClass:
@@ -1297,6 +1289,10 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::OMPDispatchDirectiveClass:
case Stmt::OMPMaskedDirectiveClass:
case Stmt::OMPGenericLoopDirectiveClass:
+ case Stmt::OMPTeamsGenericLoopDirectiveClass:
+ case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
+ case Stmt::OMPParallelGenericLoopDirectiveClass:
+ case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
case Stmt::CapturedStmtClass:
case Stmt::OMPUnrollDirectiveClass:
case Stmt::OMPMetaDirectiveClass: {
@@ -1342,8 +1338,9 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
case Stmt::GNUNullExprClass: {
// GNU __null is a pointer-width integer, not an actual pointer.
ProgramStateRef state = Pred->getState();
- state = state->BindExpr(S, Pred->getLocationContext(),
- svalBuilder.makeIntValWithPtrWidth(0, false));
+ state = state->BindExpr(
+ S, Pred->getLocationContext(),
+ svalBuilder.makeIntValWithWidth(getContext().VoidPtrTy, 0));
Bldr.generateNode(S, Pred, state);
break;
}
@@ -1370,10 +1367,14 @@ void ExprEngine::Visit(const Stmt *S, ExplodedNode *Pred,
break;
}
+ case Stmt::ArrayInitLoopExprClass:
+ Bldr.takeNodes(Pred);
+ VisitArrayInitLoopExpr(cast<ArrayInitLoopExpr>(S), Pred, Dst);
+ Bldr.addNodes(Dst);
+ break;
// Cases not handled yet; but will handle some day.
case Stmt::DesignatedInitExprClass:
case Stmt::DesignatedInitUpdateExprClass:
- case Stmt::ArrayInitLoopExprClass:
case Stmt::ArrayInitIndexExprClass:
case Stmt::ExtVectorElementExprClass:
case Stmt::ImaginaryLiteralClass:
@@ -2343,7 +2344,7 @@ void ExprEngine::processIndirectGoto(IndirectGotoNodeBuilder &builder) {
llvm_unreachable("No block with label.");
}
- if (V.getAs<loc::ConcreteInt>() || V.getAs<UndefinedVal>()) {
+ if (isa<UndefinedVal, loc::ConcreteInt>(V)) {
// Dispatch to the first target and mark it as a sink.
//ExplodedNode* N = builder.generateNode(builder.begin(), state, true);
// FIXME: add checker visit.
@@ -2598,15 +2599,144 @@ void ExprEngine::VisitCommonDeclRefExpr(const Expr *Ex, const NamedDecl *D,
// operator&.
return;
}
- if (isa<BindingDecl>(D)) {
- // FIXME: proper support for bound declarations.
- // For now, let's just prevent crashing.
+ if (const auto *BD = dyn_cast<BindingDecl>(D)) {
+ const auto *DD = cast<DecompositionDecl>(BD->getDecomposedDecl());
+
+ SVal Base = state->getLValue(DD, LCtx);
+ if (DD->getType()->isReferenceType()) {
+ Base = state->getSVal(Base.getAsRegion());
+ }
+
+ SVal V = UnknownVal();
+
+ // Handle binding to data members
+ if (const auto *ME = dyn_cast<MemberExpr>(BD->getBinding())) {
+ const auto *Field = cast<FieldDecl>(ME->getMemberDecl());
+ V = state->getLValue(Field, Base);
+ }
+ // Handle binding to arrays
+ else if (const auto *ASE = dyn_cast<ArraySubscriptExpr>(BD->getBinding())) {
+ SVal Idx = state->getSVal(ASE->getIdx(), LCtx);
+
+ // Note: the index of an element in a structured binding is automatically
+ // created and it is a unique identifier of the specific element. Thus it
+ // cannot be a value that varies at runtime.
+ assert(Idx.isConstant() && "BindingDecl array index is not a constant!");
+
+ V = state->getLValue(BD->getType(), Idx, Base);
+ }
+ // Handle binding to tuple-like strcutures
+ else if (BD->getHoldingVar()) {
+ // FIXME: handle tuples
+ return;
+ } else
+ llvm_unreachable("An unknown case of structured binding encountered!");
+
+ if (BD->getType()->isReferenceType())
+ V = state->getSVal(V.getAsRegion());
+
+ Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, V), nullptr,
+ ProgramPoint::PostLValueKind);
+
return;
}
llvm_unreachable("Support for this Decl not implemented.");
}
+/// VisitArrayInitLoopExpr - Transfer function for array init loop.
+void ExprEngine::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *Ex,
+ ExplodedNode *Pred,
+ ExplodedNodeSet &Dst) {
+ ExplodedNodeSet CheckerPreStmt;
+ getCheckerManager().runCheckersForPreStmt(CheckerPreStmt, Pred, Ex, *this);
+
+ ExplodedNodeSet EvalSet;
+ StmtNodeBuilder Bldr(CheckerPreStmt, EvalSet, *currBldrCtx);
+
+ const Expr *Arr = Ex->getCommonExpr()->getSourceExpr();
+
+ for (auto *Node : CheckerPreStmt) {
+ const LocationContext *LCtx = Node->getLocationContext();
+ ProgramStateRef state = Node->getState();
+
+ SVal Base = UnknownVal();
+
+ // As in case of this expression the sub-expressions are not visited by any
+ // other transfer functions, they are handled by matching their AST.
+
+ // Case of implicit copy or move ctor of object with array member
+ //
+ // Note: ExprEngine::VisitMemberExpr is not able to bind the array to the
+ // environment.
+ //
+ // struct S {
+ // int arr[2];
+ // };
+ //
+ //
+ // S a;
+ // S b = a;
+ //
+ // The AST in case of a *copy constructor* looks like this:
+ // ArrayInitLoopExpr
+ // |-OpaqueValueExpr
+ // | `-MemberExpr <-- match this
+ // | `-DeclRefExpr
+ // ` ...
+ //
+ //
+ // S c;
+ // S d = std::move(d);
+ //
+ // In case of a *move constructor* the resulting AST looks like:
+ // ArrayInitLoopExpr
+ // |-OpaqueValueExpr
+ // | `-MemberExpr <-- match this first
+ // | `-CXXStaticCastExpr <-- match this after
+ // | `-DeclRefExpr
+ // ` ...
+ if (const auto *ME = dyn_cast<MemberExpr>(Arr)) {
+ Expr *MEBase = ME->getBase();
+
+ // Move ctor
+ if (auto CXXSCE = dyn_cast<CXXStaticCastExpr>(MEBase)) {
+ MEBase = CXXSCE->getSubExpr();
+ }
+
+ auto ObjDeclExpr = cast<DeclRefExpr>(MEBase);
+ SVal Obj = state->getLValue(cast<VarDecl>(ObjDeclExpr->getDecl()), LCtx);
+
+ Base = state->getLValue(cast<FieldDecl>(ME->getMemberDecl()), Obj);
+ }
+
+ // Case of lambda capture and decomposition declaration
+ //
+ // int arr[2];
+ //
+ // [arr]{ int a = arr[0]; }();
+ // auto[a, b] = arr;
+ //
+ // In both of these cases the AST looks like the following:
+ // ArrayInitLoopExpr
+ // |-OpaqueValueExpr
+ // | `-DeclRefExpr <-- match this
+ // ` ...
+ if (const DeclRefExpr *DRE = dyn_cast<DeclRefExpr>(Arr))
+ Base = state->getLValue(cast<VarDecl>(DRE->getDecl()), LCtx);
+
+ // Create a lazy compound value to the original array
+ if (const MemRegion *R = Base.getAsRegion())
+ Base = state->getSVal(R);
+ else
+ Base = UnknownVal();
+
+ Bldr.generateNode(Ex, Pred, state->BindExpr(Ex, LCtx, Base));
+ }
+
+ getCheckerManager().runCheckersForPostStmt(Dst, EvalSet, Ex, *this);
+}
+
/// VisitArraySubscriptExpr - Transfer function for array accesses
void ExprEngine::VisitArraySubscriptExpr(const ArraySubscriptExpr *A,
ExplodedNode *Pred,
@@ -2894,7 +3024,7 @@ void ExprEngine::evalBind(ExplodedNodeSet &Dst, const Stmt *StoreE,
// If the location is not a 'Loc', it will already be handled by
// the checkers. There is nothing left to do.
- if (!location.getAs<Loc>()) {
+ if (!isa<Loc>(location)) {
const ProgramPoint L = PostStore(StoreE, LC, /*Loc*/nullptr,
/*tag*/nullptr);
ProgramStateRef state = Pred->getState();
@@ -2964,7 +3094,7 @@ void ExprEngine::evalLoad(ExplodedNodeSet &Dst,
SVal location,
const ProgramPointTag *tag,
QualType LoadTy) {
- assert(!location.getAs<NonLoc>() && "location cannot be a NonLoc.");
+ assert(!isa<NonLoc>(location) && "location cannot be a NonLoc.");
assert(NodeEx);
assert(BoundEx);
// Evaluate the location (checks for bad dereferences).
@@ -3095,7 +3225,7 @@ void ExprEngine::VisitGCCAsmStmt(const GCCAsmStmt *A, ExplodedNode *Pred,
for (const Expr *O : A->outputs()) {
SVal X = state->getSVal(O, Pred->getLocationContext());
- assert(!X.getAs<NonLoc>()); // Should be an Lval, or unknown, undef.
+ assert(!isa<NonLoc>(X)); // Should be an Lval, or unknown, undef.
if (Optional<Loc> LV = X.getAs<Loc>())
state = state->bindLoc(*LV, UnknownVal(), Pred->getLocationContext());
@@ -3114,7 +3244,6 @@ void ExprEngine::VisitMSAsmStmt(const MSAsmStmt *A, ExplodedNode *Pred,
// Visualization.
//===----------------------------------------------------------------------===//
-#ifndef NDEBUG
namespace llvm {
template<>
@@ -3212,29 +3341,18 @@ struct DOTGraphTraits<ExplodedGraph*> : public DefaultDOTGraphTraits {
};
} // namespace llvm
-#endif
void ExprEngine::ViewGraph(bool trim) {
-#ifndef NDEBUG
std::string Filename = DumpGraph(trim);
llvm::DisplayGraph(Filename, false, llvm::GraphProgram::DOT);
-#else
- llvm::errs() << "Warning: viewing graph requires assertions" << "\n";
-#endif
}
-
-void ExprEngine::ViewGraph(ArrayRef<const ExplodedNode*> Nodes) {
-#ifndef NDEBUG
+void ExprEngine::ViewGraph(ArrayRef<const ExplodedNode *> Nodes) {
std::string Filename = DumpGraph(Nodes);
llvm::DisplayGraph(Filename, false, llvm::GraphProgram::DOT);
-#else
- llvm::errs() << "Warning: viewing graph requires assertions" << "\n";
-#endif
}
std::string ExprEngine::DumpGraph(bool trim, StringRef Filename) {
-#ifndef NDEBUG
if (trim) {
std::vector<const ExplodedNode *> Src;
@@ -3249,35 +3367,26 @@ std::string ExprEngine::DumpGraph(bool trim, StringRef Filename) {
Src.push_back(N);
}
return DumpGraph(Src, Filename);
- } else {
- return llvm::WriteGraph(&G, "ExprEngine", /*ShortNames=*/false,
- /*Title=*/"Exploded Graph",
- /*Filename=*/std::string(Filename));
}
-#else
- llvm::errs() << "Warning: dumping graph requires assertions" << "\n";
- return "";
-#endif
+
+ return llvm::WriteGraph(&G, "ExprEngine", /*ShortNames=*/false,
+ /*Title=*/"Exploded Graph",
+ /*Filename=*/std::string(Filename));
}
-std::string ExprEngine::DumpGraph(ArrayRef<const ExplodedNode*> Nodes,
+std::string ExprEngine::DumpGraph(ArrayRef<const ExplodedNode *> Nodes,
StringRef Filename) {
-#ifndef NDEBUG
std::unique_ptr<ExplodedGraph> TrimmedG(G.trim(Nodes));
if (!TrimmedG.get()) {
llvm::errs() << "warning: Trimmed ExplodedGraph is empty.\n";
return "";
- } else {
- return llvm::WriteGraph(TrimmedG.get(), "TrimmedExprEngine",
- /*ShortNames=*/false,
- /*Title=*/"Trimmed Exploded Graph",
- /*Filename=*/std::string(Filename));
- }
-#else
- llvm::errs() << "Warning: dumping graph requires assertions" << "\n";
- return "";
-#endif
+ }
+
+ return llvm::WriteGraph(TrimmedG.get(), "TrimmedExprEngine",
+ /*ShortNames=*/false,
+ /*Title=*/"Trimmed Exploded Graph",
+ /*Filename=*/std::string(Filename));
}
void *ProgramStateTrait<ReplayWithoutInlining>::GDMIndex() {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
index 302a971a15f2..43e298f3de08 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineC.cpp
@@ -29,8 +29,7 @@ static SVal conjureOffsetSymbolOnLocation(
SVal Symbol, SVal Other, Expr* Expression, SValBuilder &svalBuilder,
unsigned Count, const LocationContext *LCtx) {
QualType Ty = Expression->getType();
- if (Other.getAs<Loc>() &&
- Ty->isIntegralOrEnumerationType() &&
+ if (isa<Loc>(Other) && Ty->isIntegralOrEnumerationType() &&
Symbol.isUnknown()) {
return svalBuilder.conjureSymbolVal(Expression, LCtx, Ty, Count);
}
@@ -271,8 +270,9 @@ ProgramStateRef ExprEngine::handleLValueBitCast(
SVal OrigV = state->getSVal(Ex, LCtx);
SVal V = svalBuilder.evalCast(OrigV, T, ExTy);
// Negate the result if we're treating the boolean as a signed i1
- if (CastE->getCastKind() == CK_BooleanToSignedIntegral)
- V = evalMinus(V);
+ if (CastE->getCastKind() == CK_BooleanToSignedIntegral && V.isValid())
+ V = svalBuilder.evalMinus(V.castAs<NonLoc>());
+
state = state->BindExpr(CastE, LCtx, V);
if (V.isUnknown() && !OrigV.isUnknown()) {
state = escapeValues(state, OrigV, PSK_EscapeOther);
@@ -371,7 +371,7 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
case CK_IntegralToPointer:
case CK_PointerToIntegral: {
SVal V = state->getSVal(Ex, LCtx);
- if (V.getAs<nonloc::PointerToMember>()) {
+ if (isa<nonloc::PointerToMember>(V)) {
state = state->BindExpr(CastE, LCtx, UnknownVal());
Bldr.generateNode(CastE, Pred, state);
continue;
@@ -460,7 +460,8 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
continue;
} else {
// If the cast fails on a pointer, bind to 0.
- state = state->BindExpr(CastE, LCtx, svalBuilder.makeNull());
+ state = state->BindExpr(CastE, LCtx,
+ svalBuilder.makeNullWithType(resultType));
}
} else {
// If we don't know if the cast succeeded, conjure a new symbol.
@@ -498,7 +499,7 @@ void ExprEngine::VisitCast(const CastExpr *CastE, const Expr *Ex,
continue;
}
case CK_NullToPointer: {
- SVal V = svalBuilder.makeNull();
+ SVal V = svalBuilder.makeNullWithType(CastE->getType());
state = state->BindExpr(CastE, LCtx, V);
Bldr.generateNode(CastE, Pred, state);
continue;
@@ -1028,11 +1029,13 @@ void ExprEngine::VisitUnaryOperator(const UnaryOperator* U, ExplodedNode *Pred,
llvm_unreachable("Invalid Opcode.");
case UO_Not:
// FIXME: Do we need to handle promotions?
- state = state->BindExpr(U, LCtx, evalComplement(V.castAs<NonLoc>()));
+ state = state->BindExpr(
+ U, LCtx, svalBuilder.evalComplement(V.castAs<NonLoc>()));
break;
case UO_Minus:
// FIXME: Do we need to handle promotions?
- state = state->BindExpr(U, LCtx, evalMinus(V.castAs<NonLoc>()));
+ state = state->BindExpr(U, LCtx,
+ svalBuilder.evalMinus(V.castAs<NonLoc>()));
break;
case UO_LNot:
// C99 6.5.3.3: "The expression !E is equivalent to (0==E)."
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
index ba105f34a915..6d979da2755f 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCXX.cpp
@@ -883,13 +883,10 @@ void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
// where new can return NULL. If we end up supporting that option, we can
// consider adding a check for it here.
// C++11 [basic.stc.dynamic.allocation]p3.
- if (FD) {
- QualType Ty = FD->getType();
- if (const auto *ProtoType = Ty->getAs<FunctionProtoType>())
- if (!ProtoType->isNothrow())
- if (auto dSymVal = symVal.getAs<DefinedOrUnknownSVal>())
- State = State->assume(*dSymVal, true);
- }
+ if (const auto *ProtoType = FD->getType()->getAs<FunctionProtoType>())
+ if (!ProtoType->isNothrow())
+ if (auto dSymVal = symVal.getAs<DefinedOrUnknownSVal>())
+ State = State->assume(*dSymVal, true);
}
StmtNodeBuilder Bldr(Pred, Dst, *currBldrCtx);
@@ -914,7 +911,7 @@ void ExprEngine::VisitCXXNewExpr(const CXXNewExpr *CNE, ExplodedNode *Pred,
// CXXNewExpr, we need to make sure that the constructed object is not
// immediately invalidated here. (The placement call should happen before
// the constructor call anyway.)
- if (FD && FD->isReservedGlobalPlacementOperator()) {
+ if (FD->isReservedGlobalPlacementOperator()) {
// Non-array placement new should always return the placement location.
SVal PlacementLoc = State->getSVal(CNE->getPlacementArg(0), LCtx);
Result = svalBuilder.evalCast(PlacementLoc, CNE->getType(),
@@ -948,8 +945,17 @@ void ExprEngine::VisitCXXDeleteExpr(const CXXDeleteExpr *CDE,
ExplodedNodeSet DstPreCall;
getCheckerManager().runCheckersForPreCall(DstPreCall, Pred, *Call, *this);
+ ExplodedNodeSet DstPostCall;
- getCheckerManager().runCheckersForPostCall(Dst, DstPreCall, *Call, *this);
+ if (AMgr.getAnalyzerOptions().MayInlineCXXAllocator) {
+ StmtNodeBuilder Bldr(DstPreCall, DstPostCall, *currBldrCtx);
+ for (ExplodedNode *I : DstPreCall) {
+ defaultEvalCall(Bldr, I, *Call);
+ }
+ } else {
+ DstPostCall = DstPreCall;
+ }
+ getCheckerManager().runCheckersForPostCall(Dst, DstPostCall, *Call, *this);
}
void ExprEngine::VisitCXXCatchStmt(const CXXCatchStmt *CS, ExplodedNode *Pred,
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
index e6918e071a4f..5f8a84591b2a 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ExprEngineCallAndReturn.cpp
@@ -129,7 +129,7 @@ static std::pair<const Stmt*,
static SVal adjustReturnValue(SVal V, QualType ExpectedTy, QualType ActualTy,
StoreManager &StoreMgr) {
// For now, the only adjustments we handle apply only to locations.
- if (!V.getAs<Loc>())
+ if (!isa<Loc>(V))
return V;
// If the types already match, don't do any unnecessary work.
@@ -427,10 +427,39 @@ namespace {
REGISTER_MAP_WITH_PROGRAMSTATE(DynamicDispatchBifurcationMap,
const MemRegion *, unsigned)
+REGISTER_TRAIT_WITH_PROGRAMSTATE(CTUDispatchBifurcation, bool)
+
+void ExprEngine::ctuBifurcate(const CallEvent &Call, const Decl *D,
+ NodeBuilder &Bldr, ExplodedNode *Pred,
+ ProgramStateRef State) {
+ ProgramStateRef ConservativeEvalState = nullptr;
+ if (Call.isForeign() && !isSecondPhaseCTU()) {
+ const auto IK = AMgr.options.getCTUPhase1Inlining();
+ const bool DoInline = IK == CTUPhase1InliningKind::All ||
+ (IK == CTUPhase1InliningKind::Small &&
+ isSmall(AMgr.getAnalysisDeclContext(D)));
+ if (DoInline) {
+ inlineCall(Engine.getWorkList(), Call, D, Bldr, Pred, State);
+ return;
+ }
+ const bool BState = State->get<CTUDispatchBifurcation>();
+ if (!BState) { // This is the first time we see this foreign function.
+ // Enqueue it to be analyzed in the second (ctu) phase.
+ inlineCall(Engine.getCTUWorkList(), Call, D, Bldr, Pred, State);
+ // Conservatively evaluate in the first phase.
+ ConservativeEvalState = State->set<CTUDispatchBifurcation>(true);
+ conservativeEvalCall(Call, Bldr, Pred, ConservativeEvalState);
+ } else {
+ conservativeEvalCall(Call, Bldr, Pred, State);
+ }
+ return;
+ }
+ inlineCall(Engine.getWorkList(), Call, D, Bldr, Pred, State);
+}
-bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D,
- NodeBuilder &Bldr, ExplodedNode *Pred,
- ProgramStateRef State) {
+void ExprEngine::inlineCall(WorkList *WList, const CallEvent &Call,
+ const Decl *D, NodeBuilder &Bldr,
+ ExplodedNode *Pred, ProgramStateRef State) {
assert(D);
const LocationContext *CurLC = Pred->getLocationContext();
@@ -465,7 +494,7 @@ bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D,
if (ExplodedNode *N = G.getNode(Loc, State, false, &isNew)) {
N->addPredecessor(Pred, G);
if (isNew)
- Engine.getWorkList()->enqueue(N);
+ WList->enqueue(N);
}
// If we decided to inline the call, the successor has been manually
@@ -475,11 +504,17 @@ bool ExprEngine::inlineCall(const CallEvent &Call, const Decl *D,
NumInlinedCalls++;
Engine.FunctionSummaries->bumpNumTimesInlined(D);
- // Mark the decl as visited.
- if (VisitedCallees)
- VisitedCallees->insert(D);
-
- return true;
+ // Do not mark as visited in the 2nd run (CTUWList), so the function will
+ // be visited as top-level, this way we won't loose reports in non-ctu
+ // mode. Considering the case when a function in a foreign TU calls back
+ // into the main TU.
+ // Note, during the 1st run, it doesn't matter if we mark the foreign
+ // functions as visited (or not) because they can never appear as a top level
+ // function in the main TU.
+ if (!isSecondPhaseCTU())
+ // Mark the decl as visited.
+ if (VisitedCallees)
+ VisitedCallees->insert(D);
}
static ProgramStateRef getInlineFailedState(ProgramStateRef State,
@@ -697,7 +732,7 @@ ProgramStateRef ExprEngine::bindReturnValue(const CallEvent &Call,
// Store the extent of the allocated object(s).
SVal ElementCount;
- if (const Expr *SizeExpr = CNE->getArraySize().getValueOr(nullptr)) {
+ if (const Expr *SizeExpr = CNE->getArraySize().value_or(nullptr)) {
ElementCount = State->getSVal(SizeExpr, LCtx);
} else {
ElementCount = svalBuilder.makeIntVal(1, /*IsUnsigned=*/true);
@@ -980,7 +1015,7 @@ bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D,
// Check if this function has been marked as non-inlinable.
Optional<bool> MayInline = Engine.FunctionSummaries->mayInline(D);
- if (MayInline.hasValue()) {
+ if (MayInline) {
if (!MayInline.getValue())
return false;
@@ -1002,7 +1037,7 @@ bool ExprEngine::shouldInlineCall(const CallEvent &Call, const Decl *D,
CallInlinePolicy CIP = mayInlineCallKind(Call, Pred, Opts, CallOpts);
if (CIP != CIP_Allowed) {
if (CIP == CIP_DisallowedAlways) {
- assert(!MayInline.hasValue() || MayInline.getValue());
+ assert(!MayInline || *MayInline);
Engine.FunctionSummaries->markShouldNotInline(D);
}
return false;
@@ -1068,6 +1103,7 @@ void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred,
State = InlinedFailedState;
} else {
RuntimeDefinition RD = Call->getRuntimeDefinition();
+ Call->setForeign(RD.isForeign());
const Decl *D = RD.getDecl();
if (shouldInlineCall(*Call, D, Pred, CallOpts)) {
if (RD.mayHaveOtherDefinitions()) {
@@ -1085,10 +1121,8 @@ void ExprEngine::defaultEvalCall(NodeBuilder &Bldr, ExplodedNode *Pred,
return;
}
}
-
- // We are not bifurcating and we do have a Decl, so just inline.
- if (inlineCall(*Call, D, Bldr, Pred, State))
- return;
+ ctuBifurcate(*Call, D, Bldr, Pred, State);
+ return;
}
}
@@ -1110,8 +1144,7 @@ void ExprEngine::BifurcateCall(const MemRegion *BifurReg,
if (BState) {
// If we are on "inline path", keep inlining if possible.
if (*BState == DynamicDispatchModeInlined)
- if (inlineCall(Call, D, Bldr, Pred, State))
- return;
+ ctuBifurcate(Call, D, Bldr, Pred, State);
// If inline failed, or we are on the path where we assume we
// don't have enough info about the receiver to inline, conjure the
// return value and invalidate the regions.
@@ -1124,7 +1157,7 @@ void ExprEngine::BifurcateCall(const MemRegion *BifurReg,
ProgramStateRef IState =
State->set<DynamicDispatchBifurcationMap>(BifurReg,
DynamicDispatchModeInlined);
- inlineCall(Call, D, Bldr, Pred, IState);
+ ctuBifurcate(Call, D, Bldr, Pred, IState);
ProgramStateRef NoIState =
State->set<DynamicDispatchBifurcationMap>(BifurReg,
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/LoopUnrolling.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/LoopUnrolling.cpp
index 8bf6fc085c6a..506d61d94d5f 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/LoopUnrolling.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/LoopUnrolling.cpp
@@ -264,8 +264,8 @@ bool shouldCompletelyUnroll(const Stmt *LoopStmt, ASTContext &ASTCtx,
Matches[0].getNodeAs<IntegerLiteral>("initNum")->getValue();
auto CondOp = Matches[0].getNodeAs<BinaryOperator>("conditionOperator");
if (InitNum.getBitWidth() != BoundNum.getBitWidth()) {
- InitNum = InitNum.zextOrSelf(BoundNum.getBitWidth());
- BoundNum = BoundNum.zextOrSelf(InitNum.getBitWidth());
+ InitNum = InitNum.zext(BoundNum.getBitWidth());
+ BoundNum = BoundNum.zext(InitNum.getBitWidth());
}
if (CondOp->getOpcode() == BO_GE || CondOp->getOpcode() == BO_LE)
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/MemRegion.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
index 58bea12f8783..f0cda835e07c 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/MemRegion.cpp
@@ -162,7 +162,9 @@ const StackFrameContext *VarRegion::getStackFrame() const {
}
ObjCIvarRegion::ObjCIvarRegion(const ObjCIvarDecl *ivd, const SubRegion *sReg)
- : DeclRegion(sReg, ObjCIvarRegionKind), IVD(ivd) {}
+ : DeclRegion(sReg, ObjCIvarRegionKind), IVD(ivd) {
+ assert(IVD);
+}
const ObjCIvarDecl *ObjCIvarRegion::getDecl() const { return IVD; }
@@ -480,7 +482,7 @@ void CompoundLiteralRegion::dumpToStream(raw_ostream &os) const {
}
void CXXTempObjectRegion::dumpToStream(raw_ostream &os) const {
- os << "temp_object{" << getValueType().getAsString() << ", "
+ os << "temp_object{" << getValueType() << ", "
<< "S" << Ex->getID(getContext()) << '}';
}
@@ -497,8 +499,8 @@ void CXXThisRegion::dumpToStream(raw_ostream &os) const {
}
void ElementRegion::dumpToStream(raw_ostream &os) const {
- os << "Element{" << superRegion << ','
- << Index << ',' << getElementType().getAsString() << '}';
+ os << "Element{" << superRegion << ',' << Index << ',' << getElementType()
+ << '}';
}
void FieldRegion::dumpToStream(raw_ostream &os) const {
@@ -971,26 +973,14 @@ const VarRegion *MemRegionManager::getVarRegion(const VarDecl *D,
const MemRegion *sReg = nullptr;
if (D->hasGlobalStorage() && !D->isStaticLocal()) {
-
- // First handle the globals defined in system headers.
- if (Ctx.getSourceManager().isInSystemHeader(D->getLocation())) {
- // Allow the system globals which often DO GET modified, assume the
- // rest are immutable.
- if (D->getName().contains("errno"))
- sReg = getGlobalsRegion(MemRegion::GlobalSystemSpaceRegionKind);
- else
- sReg = getGlobalsRegion(MemRegion::GlobalImmutableSpaceRegionKind);
-
- // Treat other globals as GlobalInternal unless they are constants.
+ QualType Ty = D->getType();
+ assert(!Ty.isNull());
+ if (Ty.isConstQualified()) {
+ sReg = getGlobalsRegion(MemRegion::GlobalImmutableSpaceRegionKind);
+ } else if (Ctx.getSourceManager().isInSystemHeader(D->getLocation())) {
+ sReg = getGlobalsRegion(MemRegion::GlobalSystemSpaceRegionKind);
} else {
- QualType GQT = D->getType();
- const Type *GT = GQT.getTypePtrOrNull();
- // TODO: We could walk the complex types here and see if everything is
- // constified.
- if (GT && GQT.isConstQualified() && GT->isArithmeticType())
- sReg = getGlobalsRegion(MemRegion::GlobalImmutableSpaceRegionKind);
- else
- sReg = getGlobalsRegion();
+ sReg = getGlobalsRegion(MemRegion::GlobalInternalSpaceRegionKind);
}
// Finally handle static locals.
@@ -1033,8 +1023,10 @@ const VarRegion *MemRegionManager::getVarRegion(const VarDecl *D,
T = TSI->getType();
if (T.isNull())
T = getContext().VoidTy;
- if (!T->getAs<FunctionType>())
- T = getContext().getFunctionNoProtoType(T);
+ if (!T->getAs<FunctionType>()) {
+ FunctionProtoType::ExtProtoInfo Ext;
+ T = getContext().getFunctionType(T, None, Ext);
+ }
T = getContext().getBlockPointerType(T);
const BlockCodeRegion *BTR =
@@ -1153,9 +1145,12 @@ MemRegionManager::getBlockCodeRegion(const BlockDecl *BD, CanQualType locTy,
return getSubRegion<BlockCodeRegion>(BD, locTy, AC, getCodeRegion());
}
-/// getSymbolicRegion - Retrieve or create a "symbolic" memory region.
-const SymbolicRegion *MemRegionManager::getSymbolicRegion(SymbolRef sym) {
- return getSubRegion<SymbolicRegion>(sym, getUnknownRegion());
+const SymbolicRegion *
+MemRegionManager::getSymbolicRegion(SymbolRef sym,
+ const MemSpaceRegion *MemSpace) {
+ if (MemSpace == nullptr)
+ MemSpace = getUnknownRegion();
+ return getSubRegion<SymbolicRegion>(sym, MemSpace);
}
const SymbolicRegion *MemRegionManager::getSymbolicHeapRegion(SymbolRef Sym) {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
index 92104d628711..93c19a688b9a 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/PlistDiagnostics.cpp
@@ -389,7 +389,7 @@ void PlistPrinter::ReportMacroExpansions(raw_ostream &o, unsigned indent) {
const Optional<StringRef> ExpansionText =
getExpandedMacro(MacroExpansionLoc, CTU, MacroExpansions, SM);
- if (!MacroName.hasValue() || !ExpansionText.hasValue())
+ if (!MacroName || !ExpansionText)
continue;
Indent(o, indent) << "<dict>\n";
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ProgramState.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
index 8d4e0bbb7dec..a6d0e242924b 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/ProgramState.cpp
@@ -55,7 +55,7 @@ ProgramState::ProgramState(ProgramStateManager *mgr, const Environment& env,
ProgramState::ProgramState(const ProgramState &RHS)
: stateMgr(RHS.stateMgr), Env(RHS.Env), store(RHS.store), GDM(RHS.GDM),
- refCount(0) {
+ PosteriorlyOverconstrained(RHS.PosteriorlyOverconstrained), refCount(0) {
stateMgr->getStoreManager().incrementReferenceCount(store);
}
@@ -216,7 +216,7 @@ ProgramState::invalidateRegionsImpl(ValueList Values,
}
ProgramStateRef ProgramState::killBinding(Loc LV) const {
- assert(!LV.getAs<loc::MemRegionVal>() && "Use invalidateRegion instead.");
+ assert(!isa<loc::MemRegionVal>(LV) && "Use invalidateRegion instead.");
Store OldStore = getStore();
const StoreRef &newStore =
@@ -314,12 +314,12 @@ ProgramStateRef ProgramState::BindExpr(const Stmt *S,
return getStateManager().getPersistentState(NewSt);
}
-ProgramStateRef ProgramState::assumeInBound(DefinedOrUnknownSVal Idx,
- DefinedOrUnknownSVal UpperBound,
- bool Assumption,
- QualType indexTy) const {
+LLVM_NODISCARD std::pair<ProgramStateRef, ProgramStateRef>
+ProgramState::assumeInBoundDual(DefinedOrUnknownSVal Idx,
+ DefinedOrUnknownSVal UpperBound,
+ QualType indexTy) const {
if (Idx.isUnknown() || UpperBound.isUnknown())
- return this;
+ return {this, this};
// Build an expression for 0 <= Idx < UpperBound.
// This is the same as Idx + MIN < UpperBound + MIN, if overflow is allowed.
@@ -338,7 +338,7 @@ ProgramStateRef ProgramState::assumeInBound(DefinedOrUnknownSVal Idx,
SVal newIdx = svalBuilder.evalBinOpNN(this, BO_Add,
Idx.castAs<NonLoc>(), Min, indexTy);
if (newIdx.isUnknownOrUndef())
- return this;
+ return {this, this};
// Adjust the upper bound.
SVal newBound =
@@ -346,17 +346,26 @@ ProgramStateRef ProgramState::assumeInBound(DefinedOrUnknownSVal Idx,
Min, indexTy);
if (newBound.isUnknownOrUndef())
- return this;
+ return {this, this};
// Build the actual comparison.
SVal inBound = svalBuilder.evalBinOpNN(this, BO_LT, newIdx.castAs<NonLoc>(),
newBound.castAs<NonLoc>(), Ctx.IntTy);
if (inBound.isUnknownOrUndef())
- return this;
+ return {this, this};
// Finally, let the constraint manager take care of it.
ConstraintManager &CM = SM.getConstraintManager();
- return CM.assume(this, inBound.castAs<DefinedSVal>(), Assumption);
+ return CM.assumeDual(this, inBound.castAs<DefinedSVal>());
+}
+
+ProgramStateRef ProgramState::assumeInBound(DefinedOrUnknownSVal Idx,
+ DefinedOrUnknownSVal UpperBound,
+ bool Assumption,
+ QualType indexTy) const {
+ std::pair<ProgramStateRef, ProgramStateRef> R =
+ assumeInBoundDual(Idx, UpperBound, indexTy);
+ return Assumption ? R.first : R.second;
}
ConditionTruthVal ProgramState::isNonNull(SVal V) const {
@@ -429,6 +438,12 @@ ProgramStateRef ProgramState::makeWithStore(const StoreRef &store) const {
return getStateManager().getPersistentState(NewSt);
}
+ProgramStateRef ProgramState::cloneAsPosteriorlyOverconstrained() const {
+ ProgramState NewSt(*this);
+ NewSt.PosteriorlyOverconstrained = true;
+ return getStateManager().getPersistentState(NewSt);
+}
+
void ProgramState::setStore(const StoreRef &newStore) {
Store newStoreStore = newStore.getStore();
if (newStoreStore)
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
index 4b0d4942e528..e788a7a60830 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangeConstraintManager.cpp
@@ -20,8 +20,8 @@
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/ImmutableSet.h"
#include "llvm/ADT/STLExtras.h"
-#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/StringExtras.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/raw_ostream.h"
#include <algorithm>
@@ -353,6 +353,21 @@ const llvm::APSInt &RangeSet::getMaxValue() const {
return std::prev(end())->To();
}
+bool clang::ento::RangeSet::isUnsigned() const {
+ assert(!isEmpty());
+ return begin()->From().isUnsigned();
+}
+
+uint32_t clang::ento::RangeSet::getBitWidth() const {
+ assert(!isEmpty());
+ return begin()->From().getBitWidth();
+}
+
+APSIntType clang::ento::RangeSet::getAPSIntType() const {
+ assert(!isEmpty());
+ return APSIntType(begin()->From());
+}
+
bool RangeSet::containsImpl(llvm::APSInt &Point) const {
if (isEmpty() || !pin(Point))
return false;
@@ -655,6 +670,181 @@ RangeSet RangeSet::Factory::negate(RangeSet What) {
return makePersistent(std::move(Result));
}
+// Convert range set to the given integral type using truncation and promotion.
+// This works similar to APSIntType::apply function but for the range set.
+RangeSet RangeSet::Factory::castTo(RangeSet What, APSIntType Ty) {
+ // Set is empty or NOOP (aka cast to the same type).
+ if (What.isEmpty() || What.getAPSIntType() == Ty)
+ return What;
+
+ const bool IsConversion = What.isUnsigned() != Ty.isUnsigned();
+ const bool IsTruncation = What.getBitWidth() > Ty.getBitWidth();
+ const bool IsPromotion = What.getBitWidth() < Ty.getBitWidth();
+
+ if (IsTruncation)
+ return makePersistent(truncateTo(What, Ty));
+
+ // Here we handle 2 cases:
+ // - IsConversion && !IsPromotion.
+ // In this case we handle changing a sign with same bitwidth: char -> uchar,
+ // uint -> int. Here we convert negatives to positives and positives which
+ // is out of range to negatives. We use convertTo function for that.
+ // - IsConversion && IsPromotion && !What.isUnsigned().
+ // In this case we handle changing a sign from signeds to unsigneds with
+ // higher bitwidth: char -> uint, int-> uint64. The point is that we also
+ // need convert negatives to positives and use convertTo function as well.
+ // For example, we don't need such a convertion when converting unsigned to
+ // signed with higher bitwidth, because all the values of unsigned is valid
+ // for the such signed.
+ if (IsConversion && (!IsPromotion || !What.isUnsigned()))
+ return makePersistent(convertTo(What, Ty));
+
+ assert(IsPromotion && "Only promotion operation from unsigneds left.");
+ return makePersistent(promoteTo(What, Ty));
+}
+
+RangeSet RangeSet::Factory::castTo(RangeSet What, QualType T) {
+ assert(T->isIntegralOrEnumerationType() && "T shall be an integral type.");
+ return castTo(What, ValueFactory.getAPSIntType(T));
+}
+
+RangeSet::ContainerType RangeSet::Factory::truncateTo(RangeSet What,
+ APSIntType Ty) {
+ using llvm::APInt;
+ using llvm::APSInt;
+ ContainerType Result;
+ ContainerType Dummy;
+ // CastRangeSize is an amount of all possible values of cast type.
+ // Example: `char` has 256 values; `short` has 65536 values.
+ // But in fact we use `amount of values` - 1, because
+ // we can't keep `amount of values of UINT64` inside uint64_t.
+ // E.g. 256 is an amount of all possible values of `char` and we can't keep
+ // it inside `char`.
+ // And it's OK, it's enough to do correct calculations.
+ uint64_t CastRangeSize = APInt::getMaxValue(Ty.getBitWidth()).getZExtValue();
+ for (const Range &R : What) {
+ // Get bounds of the given range.
+ APSInt FromInt = R.From();
+ APSInt ToInt = R.To();
+ // CurrentRangeSize is an amount of all possible values of the current
+ // range minus one.
+ uint64_t CurrentRangeSize = (ToInt - FromInt).getZExtValue();
+ // This is an optimization for a specific case when this Range covers
+ // the whole range of the target type.
+ Dummy.clear();
+ if (CurrentRangeSize >= CastRangeSize) {
+ Dummy.emplace_back(ValueFactory.getMinValue(Ty),
+ ValueFactory.getMaxValue(Ty));
+ Result = std::move(Dummy);
+ break;
+ }
+ // Cast the bounds.
+ Ty.apply(FromInt);
+ Ty.apply(ToInt);
+ const APSInt &PersistentFrom = ValueFactory.getValue(FromInt);
+ const APSInt &PersistentTo = ValueFactory.getValue(ToInt);
+ if (FromInt > ToInt) {
+ Dummy.emplace_back(ValueFactory.getMinValue(Ty), PersistentTo);
+ Dummy.emplace_back(PersistentFrom, ValueFactory.getMaxValue(Ty));
+ } else
+ Dummy.emplace_back(PersistentFrom, PersistentTo);
+ // Every range retrieved after truncation potentialy has garbage values.
+ // So, we have to unite every next range with the previouses.
+ Result = unite(Result, Dummy);
+ }
+
+ return Result;
+}
+
+// Divide the convertion into two phases (presented as loops here).
+// First phase(loop) works when casted values go in ascending order.
+// E.g. char{1,3,5,127} -> uint{1,3,5,127}
+// Interrupt the first phase and go to second one when casted values start
+// go in descending order. That means that we crossed over the middle of
+// the type value set (aka 0 for signeds and MAX/2+1 for unsigneds).
+// For instance:
+// 1: uchar{1,3,5,128,255} -> char{1,3,5,-128,-1}
+// Here we put {1,3,5} to one array and {-128, -1} to another
+// 2: char{-128,-127,-1,0,1,2} -> uchar{128,129,255,0,1,3}
+// Here we put {128,129,255} to one array and {0,1,3} to another.
+// After that we unite both arrays.
+// NOTE: We don't just concatenate the arrays, because they may have
+// adjacent ranges, e.g.:
+// 1: char(-128, 127) -> uchar -> arr1(128, 255), arr2(0, 127) ->
+// unite -> uchar(0, 255)
+// 2: uchar(0, 1)U(254, 255) -> char -> arr1(0, 1), arr2(-2, -1) ->
+// unite -> uchar(-2, 1)
+RangeSet::ContainerType RangeSet::Factory::convertTo(RangeSet What,
+ APSIntType Ty) {
+ using llvm::APInt;
+ using llvm::APSInt;
+ using Bounds = std::pair<const APSInt &, const APSInt &>;
+ ContainerType AscendArray;
+ ContainerType DescendArray;
+ auto CastRange = [Ty, &VF = ValueFactory](const Range &R) -> Bounds {
+ // Get bounds of the given range.
+ APSInt FromInt = R.From();
+ APSInt ToInt = R.To();
+ // Cast the bounds.
+ Ty.apply(FromInt);
+ Ty.apply(ToInt);
+ return {VF.getValue(FromInt), VF.getValue(ToInt)};
+ };
+ // Phase 1. Fill the first array.
+ APSInt LastConvertedInt = Ty.getMinValue();
+ const auto *It = What.begin();
+ const auto *E = What.end();
+ while (It != E) {
+ Bounds NewBounds = CastRange(*(It++));
+ // If values stop going acsending order, go to the second phase(loop).
+ if (NewBounds.first < LastConvertedInt) {
+ DescendArray.emplace_back(NewBounds.first, NewBounds.second);
+ break;
+ }
+ // If the range contains a midpoint, then split the range.
+ // E.g. char(-5, 5) -> uchar(251, 5)
+ // Here we shall add a range (251, 255) to the first array and (0, 5) to the
+ // second one.
+ if (NewBounds.first > NewBounds.second) {
+ DescendArray.emplace_back(ValueFactory.getMinValue(Ty), NewBounds.second);
+ AscendArray.emplace_back(NewBounds.first, ValueFactory.getMaxValue(Ty));
+ } else
+ // Values are going acsending order.
+ AscendArray.emplace_back(NewBounds.first, NewBounds.second);
+ LastConvertedInt = NewBounds.first;
+ }
+ // Phase 2. Fill the second array.
+ while (It != E) {
+ Bounds NewBounds = CastRange(*(It++));
+ DescendArray.emplace_back(NewBounds.first, NewBounds.second);
+ }
+ // Unite both arrays.
+ return unite(AscendArray, DescendArray);
+}
+
+/// Promotion from unsigneds to signeds/unsigneds left.
+RangeSet::ContainerType RangeSet::Factory::promoteTo(RangeSet What,
+ APSIntType Ty) {
+ ContainerType Result;
+ // We definitely know the size of the result set.
+ Result.reserve(What.size());
+
+ // Each unsigned value fits every larger type without any changes,
+ // whether the larger type is signed or unsigned. So just promote and push
+ // back each range one by one.
+ for (const Range &R : What) {
+ // Get bounds of the given range.
+ llvm::APSInt FromInt = R.From();
+ llvm::APSInt ToInt = R.To();
+ // Cast the bounds.
+ Ty.apply(FromInt);
+ Ty.apply(ToInt);
+ Result.emplace_back(ValueFactory.getValue(FromInt),
+ ValueFactory.getValue(ToInt));
+ }
+ return Result;
+}
+
RangeSet RangeSet::Factory::deletePoint(RangeSet From,
const llvm::APSInt &Point) {
if (!From.contains(Point))
@@ -1253,30 +1443,35 @@ private:
return RangeFactory.deletePoint(Domain, IntType.getZeroValue());
}
- // FIXME: Once SValBuilder supports unary minus, we should use SValBuilder to
- // obtain the negated symbolic expression instead of constructing the
- // symbol manually. This will allow us to support finding ranges of not
- // only negated SymSymExpr-type expressions, but also of other, simpler
- // expressions which we currently do not know how to negate.
Optional<RangeSet> getRangeForNegatedSub(SymbolRef Sym) {
- if (const SymSymExpr *SSE = dyn_cast<SymSymExpr>(Sym)) {
+ // Do not negate if the type cannot be meaningfully negated.
+ if (!Sym->getType()->isUnsignedIntegerOrEnumerationType() &&
+ !Sym->getType()->isSignedIntegerOrEnumerationType())
+ return llvm::None;
+
+ const RangeSet *NegatedRange = nullptr;
+ SymbolManager &SymMgr = State->getSymbolManager();
+ if (const auto *USE = dyn_cast<UnarySymExpr>(Sym)) {
+ if (USE->getOpcode() == UO_Minus) {
+ // Just get the operand when we negate a symbol that is already negated.
+ // -(-a) == a
+ NegatedRange = getConstraint(State, USE->getOperand());
+ }
+ } else if (const SymSymExpr *SSE = dyn_cast<SymSymExpr>(Sym)) {
if (SSE->getOpcode() == BO_Sub) {
QualType T = Sym->getType();
-
- // Do not negate unsigned ranges
- if (!T->isUnsignedIntegerOrEnumerationType() &&
- !T->isSignedIntegerOrEnumerationType())
- return llvm::None;
-
- SymbolManager &SymMgr = State->getSymbolManager();
SymbolRef NegatedSym =
SymMgr.getSymSymExpr(SSE->getRHS(), BO_Sub, SSE->getLHS(), T);
-
- if (const RangeSet *NegatedRange = getConstraint(State, NegatedSym)) {
- return RangeFactory.negate(*NegatedRange);
- }
+ NegatedRange = getConstraint(State, NegatedSym);
}
+ } else {
+ SymbolRef NegatedSym =
+ SymMgr.getUnarySymExpr(Sym, UO_Minus, Sym->getType());
+ NegatedRange = getConstraint(State, NegatedSym);
}
+
+ if (NegatedRange)
+ return RangeFactory.negate(*NegatedRange);
return llvm::None;
}
@@ -2318,12 +2513,6 @@ EquivalenceClass::removeMember(ProgramStateRef State, const SymbolRef Old) {
SymbolSet ClsMembers = getClassMembers(State);
assert(ClsMembers.contains(Old));
- // We don't remove `Old`'s Sym->Class relation for two reasons:
- // 1) This way constraints for the old symbol can still be found via it's
- // equivalence class that it used to be the member of.
- // 2) Performance and resource reasons. We can spare one removal and thus one
- // additional tree in the forest of `ClassMap`.
-
// Remove `Old`'s Class->Sym relation.
SymbolSet::Factory &F = getMembersFactory(State);
ClassMembersTy::Factory &EMFactory = State->get_context<ClassMembers>();
@@ -2337,6 +2526,12 @@ EquivalenceClass::removeMember(ProgramStateRef State, const SymbolRef Old) {
ClassMembersMap = EMFactory.add(ClassMembersMap, *this, ClsMembers);
State = State->set<ClassMembers>(ClassMembersMap);
+ // Remove `Old`'s Sym->Class relation.
+ ClassMapTy Classes = State->get<ClassMap>();
+ ClassMapTy::Factory &CMF = State->get_context<ClassMap>();
+ Classes = CMF.remove(Classes, Old);
+ State = State->set<ClassMap>(Classes);
+
return State;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangedConstraintManager.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangedConstraintManager.cpp
index 892d64ea4e4e..4bbe933be212 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangedConstraintManager.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RangedConstraintManager.cpp
@@ -48,47 +48,48 @@ ProgramStateRef RangedConstraintManager::assumeSym(ProgramStateRef State,
if (const auto *SSE = dyn_cast<SymSymExpr>(Sym)) {
BinaryOperator::Opcode Op = SSE->getOpcode();
- assert(BinaryOperator::isComparisonOp(Op));
-
- // We convert equality operations for pointers only.
- if (Loc::isLocType(SSE->getLHS()->getType()) &&
- Loc::isLocType(SSE->getRHS()->getType())) {
- // Translate "a != b" to "(b - a) != 0".
- // We invert the order of the operands as a heuristic for how loop
- // conditions are usually written ("begin != end") as compared to length
- // calculations ("end - begin"). The more correct thing to do would be to
- // canonicalize "a - b" and "b - a", which would allow us to treat
- // "a != b" and "b != a" the same.
-
- SymbolManager &SymMgr = getSymbolManager();
- QualType DiffTy = SymMgr.getContext().getPointerDiffType();
- SymbolRef Subtraction =
- SymMgr.getSymSymExpr(SSE->getRHS(), BO_Sub, SSE->getLHS(), DiffTy);
-
- const llvm::APSInt &Zero = getBasicVals().getValue(0, DiffTy);
- Op = BinaryOperator::reverseComparisonOp(Op);
- if (!Assumption)
- Op = BinaryOperator::negateComparisonOp(Op);
- return assumeSymRel(State, Subtraction, Op, Zero);
- }
+ if (BinaryOperator::isComparisonOp(Op)) {
+
+ // We convert equality operations for pointers only.
+ if (Loc::isLocType(SSE->getLHS()->getType()) &&
+ Loc::isLocType(SSE->getRHS()->getType())) {
+ // Translate "a != b" to "(b - a) != 0".
+ // We invert the order of the operands as a heuristic for how loop
+ // conditions are usually written ("begin != end") as compared to length
+ // calculations ("end - begin"). The more correct thing to do would be
+ // to canonicalize "a - b" and "b - a", which would allow us to treat
+ // "a != b" and "b != a" the same.
+
+ SymbolManager &SymMgr = getSymbolManager();
+ QualType DiffTy = SymMgr.getContext().getPointerDiffType();
+ SymbolRef Subtraction =
+ SymMgr.getSymSymExpr(SSE->getRHS(), BO_Sub, SSE->getLHS(), DiffTy);
+
+ const llvm::APSInt &Zero = getBasicVals().getValue(0, DiffTy);
+ Op = BinaryOperator::reverseComparisonOp(Op);
+ if (!Assumption)
+ Op = BinaryOperator::negateComparisonOp(Op);
+ return assumeSymRel(State, Subtraction, Op, Zero);
+ }
- if (BinaryOperator::isEqualityOp(Op)) {
- SymbolManager &SymMgr = getSymbolManager();
+ if (BinaryOperator::isEqualityOp(Op)) {
+ SymbolManager &SymMgr = getSymbolManager();
- QualType ExprType = SSE->getType();
- SymbolRef CanonicalEquality =
- SymMgr.getSymSymExpr(SSE->getLHS(), BO_EQ, SSE->getRHS(), ExprType);
+ QualType ExprType = SSE->getType();
+ SymbolRef CanonicalEquality =
+ SymMgr.getSymSymExpr(SSE->getLHS(), BO_EQ, SSE->getRHS(), ExprType);
- bool WasEqual = SSE->getOpcode() == BO_EQ;
- bool IsExpectedEqual = WasEqual == Assumption;
+ bool WasEqual = SSE->getOpcode() == BO_EQ;
+ bool IsExpectedEqual = WasEqual == Assumption;
- const llvm::APSInt &Zero = getBasicVals().getValue(0, ExprType);
+ const llvm::APSInt &Zero = getBasicVals().getValue(0, ExprType);
- if (IsExpectedEqual) {
- return assumeSymNE(State, CanonicalEquality, Zero, Zero);
- }
+ if (IsExpectedEqual) {
+ return assumeSymNE(State, CanonicalEquality, Zero, Zero);
+ }
- return assumeSymEQ(State, CanonicalEquality, Zero, Zero);
+ return assumeSymEQ(State, CanonicalEquality, Zero, Zero);
+ }
}
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RegionStore.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
index 135130b35ba7..5e946483a93d 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/RegionStore.cpp
@@ -318,29 +318,6 @@ RegionBindingsRef RegionBindingsRef::removeBinding(const MemRegion *R,
}
//===----------------------------------------------------------------------===//
-// Fine-grained control of RegionStoreManager.
-//===----------------------------------------------------------------------===//
-
-namespace {
-struct minimal_features_tag {};
-struct maximal_features_tag {};
-
-class RegionStoreFeatures {
- bool SupportsFields;
-public:
- RegionStoreFeatures(minimal_features_tag) :
- SupportsFields(false) {}
-
- RegionStoreFeatures(maximal_features_tag) :
- SupportsFields(true) {}
-
- void enableFields(bool t) { SupportsFields = t; }
-
- bool supportsFields() const { return SupportsFields; }
-};
-}
-
-//===----------------------------------------------------------------------===//
// Main RegionStore logic.
//===----------------------------------------------------------------------===//
@@ -349,8 +326,6 @@ class InvalidateRegionsWorker;
class RegionStoreManager : public StoreManager {
public:
- const RegionStoreFeatures Features;
-
RegionBindings::Factory RBFactory;
mutable ClusterBindings::Factory CBFactory;
@@ -370,6 +345,16 @@ private:
/// To disable all small-struct-dependent behavior, set the option to "0".
unsigned SmallStructLimit;
+ /// The largest number of element an array can have and still be
+ /// considered "small".
+ ///
+ /// This is currently used to decide whether or not it is worth "forcing" a
+ /// LazyCompoundVal on bind.
+ ///
+ /// This is controlled by 'region-store-small-struct-limit' option.
+ /// To disable all small-struct-dependent behavior, set the option to "0".
+ unsigned SmallArrayLimit;
+
/// A helper used to populate the work list with the given set of
/// regions.
void populateWorkList(InvalidateRegionsWorker &W,
@@ -377,16 +362,15 @@ private:
InvalidatedRegions *TopLevelRegions);
public:
- RegionStoreManager(ProgramStateManager& mgr, const RegionStoreFeatures &f)
- : StoreManager(mgr), Features(f),
- RBFactory(mgr.getAllocator()), CBFactory(mgr.getAllocator()),
- SmallStructLimit(0) {
+ RegionStoreManager(ProgramStateManager &mgr)
+ : StoreManager(mgr), RBFactory(mgr.getAllocator()),
+ CBFactory(mgr.getAllocator()), SmallStructLimit(0), SmallArrayLimit(0) {
ExprEngine &Eng = StateMgr.getOwningEngine();
AnalyzerOptions &Options = Eng.getAnalysisManager().options;
SmallStructLimit = Options.RegionStoreSmallStructLimit;
+ SmallArrayLimit = Options.RegionStoreSmallArrayLimit;
}
-
/// setImplicitDefaultValue - Set the default binding for the provided
/// MemRegion to the value implicitly defined for compound literals when
/// the value is not specified.
@@ -514,6 +498,11 @@ public: // Part of public interface to class.
RegionBindingsRef bindVector(RegionBindingsConstRef B,
const TypedValueRegion* R, SVal V);
+ Optional<RegionBindingsRef> tryBindSmallArray(RegionBindingsConstRef B,
+ const TypedValueRegion *R,
+ const ArrayType *AT,
+ nonloc::LazyCompoundVal LCV);
+
RegionBindingsRef bindArray(RegionBindingsConstRef B,
const TypedValueRegion* R,
SVal V);
@@ -674,18 +663,9 @@ public: // Part of public interface to class.
std::unique_ptr<StoreManager>
ento::CreateRegionStoreManager(ProgramStateManager &StMgr) {
- RegionStoreFeatures F = maximal_features_tag();
- return std::make_unique<RegionStoreManager>(StMgr, F);
+ return std::make_unique<RegionStoreManager>(StMgr);
}
-std::unique_ptr<StoreManager>
-ento::CreateFieldsOnlyRegionStoreManager(ProgramStateManager &StMgr) {
- RegionStoreFeatures F = minimal_features_tag();
- F.enableFields(true);
- return std::make_unique<RegionStoreManager>(StMgr, F);
-}
-
-
//===----------------------------------------------------------------------===//
// Region Cluster analysis.
//===----------------------------------------------------------------------===//
@@ -1397,10 +1377,10 @@ RegionStoreManager::invalidateRegions(Store store,
/// the array). This is called by ExprEngine when evaluating casts
/// from arrays to pointers.
SVal RegionStoreManager::ArrayToPointer(Loc Array, QualType T) {
- if (Array.getAs<loc::ConcreteInt>())
+ if (isa<loc::ConcreteInt>(Array))
return Array;
- if (!Array.getAs<loc::MemRegionVal>())
+ if (!isa<loc::MemRegionVal>(Array))
return UnknownVal();
const SubRegion *R =
@@ -1414,8 +1394,8 @@ SVal RegionStoreManager::ArrayToPointer(Loc Array, QualType T) {
//===----------------------------------------------------------------------===//
SVal RegionStoreManager::getBinding(RegionBindingsConstRef B, Loc L, QualType T) {
- assert(!L.getAs<UnknownVal>() && "location unknown");
- assert(!L.getAs<UndefinedVal>() && "location undefined");
+ assert(!isa<UnknownVal>(L) && "location unknown");
+ assert(!isa<UndefinedVal>(L) && "location undefined");
// For access to concrete addresses, return UnknownVal. Checks
// for null dereferences (and similar errors) are done by checkers, not
@@ -1789,7 +1769,7 @@ Optional<SVal> RegionStoreManager::getConstantValFromConstArrayInitializer(
SmallVector<uint64_t, 2> Extents = getConstantArrayExtents(CAT);
// The number of offsets should equal to the numbers of extents,
- // otherwise wrong type punning occured. For instance:
+ // otherwise wrong type punning occurred. For instance:
// int arr[1][2][3];
// auto ptr = (int(*)[42])arr;
// auto x = ptr[4][2]; // UB
@@ -1983,15 +1963,9 @@ SVal RegionStoreManager::getBindingForField(RegionBindingsConstRef B,
if (const Optional<SVal> &V = B.getDirectBinding(R))
return *V;
- // Is the field declared constant and has an in-class initializer?
+ // If the containing record was initialized, try to get its constant value.
const FieldDecl *FD = R->getDecl();
QualType Ty = FD->getType();
- if (Ty.isConstQualified())
- if (const Expr *Init = FD->getInClassInitializer())
- if (Optional<SVal> V = svalBuilder.getConstantVal(Init))
- return *V;
-
- // If the containing record was initialized, try to get its constant value.
const MemRegion* superR = R->getSuperRegion();
if (const auto *VR = dyn_cast<VarRegion>(superR)) {
const VarDecl *VD = VR->getDecl();
@@ -2024,7 +1998,7 @@ RegionStoreManager::getBindingForDerivedDefaultValue(RegionBindingsConstRef B,
QualType Ty) {
if (const Optional<SVal> &D = B.getDefaultBinding(superR)) {
- const SVal &val = D.getValue();
+ const SVal &val = *D;
if (SymbolRef parentSym = val.getAsSymbol())
return svalBuilder.getDerivedRegionValueSymbolVal(parentSym, R);
@@ -2036,8 +2010,7 @@ RegionStoreManager::getBindingForDerivedDefaultValue(RegionBindingsConstRef B,
// Lazy bindings are usually handled through getExistingLazyBinding().
// We should unify these two code paths at some point.
- if (val.getAs<nonloc::LazyCompoundVal>() ||
- val.getAs<nonloc::CompoundVal>())
+ if (isa<nonloc::LazyCompoundVal, nonloc::CompoundVal>(val))
return val;
llvm_unreachable("Unknown default value");
@@ -2153,8 +2126,13 @@ RegionStoreManager::getBindingForFieldOrElementCommon(RegionBindingsConstRef B,
return UnknownVal();
// Additionally allow introspection of a block's internal layout.
- if (!hasPartialLazyBinding && !isa<BlockDataRegion>(R->getBaseRegion()))
+ // Try to get direct binding if all other attempts failed thus far.
+ // Else, return UndefinedVal()
+ if (!hasPartialLazyBinding && !isa<BlockDataRegion>(R->getBaseRegion())) {
+ if (const Optional<SVal> &V = B.getDefaultBinding(R))
+ return *V;
return UndefinedVal();
+ }
}
// All other values are symbolic.
@@ -2235,7 +2213,7 @@ SVal RegionStoreManager::getBindingForVar(RegionBindingsConstRef B,
if (Optional<SVal> V = getBindingForDerivedDefaultValue(B, MS, R, T)) {
assert(!V->getAs<nonloc::LazyCompoundVal>());
- return V.getValue();
+ return *V;
}
return svalBuilder.getRegionValueSymbolVal(R);
@@ -2410,7 +2388,7 @@ RegionStoreManager::setImplicitDefaultValue(RegionBindingsConstRef B,
SVal V;
if (Loc::isLocType(T))
- V = svalBuilder.makeNull();
+ V = svalBuilder.makeNullWithType(T);
else if (T->isIntegralOrEnumerationType())
V = svalBuilder.makeZeroVal(T);
else if (T->isStructureOrClassType() || T->isArrayType()) {
@@ -2430,6 +2408,40 @@ RegionStoreManager::setImplicitDefaultValue(RegionBindingsConstRef B,
return B.addBinding(R, BindingKey::Default, V);
}
+Optional<RegionBindingsRef> RegionStoreManager::tryBindSmallArray(
+ RegionBindingsConstRef B, const TypedValueRegion *R, const ArrayType *AT,
+ nonloc::LazyCompoundVal LCV) {
+
+ auto CAT = dyn_cast<ConstantArrayType>(AT);
+
+ // If we don't know the size, create a lazyCompoundVal instead.
+ if (!CAT)
+ return None;
+
+ QualType Ty = CAT->getElementType();
+ if (!(Ty->isScalarType() || Ty->isReferenceType()))
+ return None;
+
+ // If the array is too big, create a LCV instead.
+ uint64_t ArrSize = CAT->getSize().getLimitedValue();
+ if (ArrSize > SmallArrayLimit)
+ return None;
+
+ RegionBindingsRef NewB = B;
+
+ for (uint64_t i = 0; i < ArrSize; ++i) {
+ auto Idx = svalBuilder.makeArrayIndex(i);
+ const ElementRegion *SrcER =
+ MRMgr.getElementRegion(Ty, Idx, LCV.getRegion(), Ctx);
+ SVal V = getBindingForElement(getRegionBindings(LCV.getStore()), SrcER);
+
+ const ElementRegion *DstER = MRMgr.getElementRegion(Ty, Idx, R, Ctx);
+ NewB = bind(NewB, loc::MemRegionVal(DstER), V);
+ }
+
+ return NewB;
+}
+
RegionBindingsRef
RegionStoreManager::bindArray(RegionBindingsConstRef B,
const TypedValueRegion* R,
@@ -2451,8 +2463,13 @@ RegionStoreManager::bindArray(RegionBindingsConstRef B,
}
// Handle lazy compound values.
- if (Init.getAs<nonloc::LazyCompoundVal>())
+ if (Optional<nonloc::LazyCompoundVal> LCV =
+ Init.getAs<nonloc::LazyCompoundVal>()) {
+ if (Optional<RegionBindingsRef> NewB = tryBindSmallArray(B, R, AT, *LCV))
+ return *NewB;
+
return bindAggregate(B, R, Init);
+ }
if (Init.isUnknown())
return bindAggregate(B, R, UnknownVal());
@@ -2464,7 +2481,7 @@ RegionStoreManager::bindArray(RegionBindingsConstRef B,
RegionBindingsRef NewB(B);
- for (; Size.hasValue() ? i < Size.getValue() : true ; ++i, ++VI) {
+ for (; Size ? i < *Size : true; ++i, ++VI) {
// The init list might be shorter than the array length.
if (VI == VE)
break;
@@ -2483,7 +2500,7 @@ RegionStoreManager::bindArray(RegionBindingsConstRef B,
// If the init list is shorter than the array length (or the array has
// variable length), set the array default value. Values that are already set
// are not overwritten.
- if (!Size.hasValue() || i < Size.getValue())
+ if (!Size || i < *Size)
NewB = setImplicitDefaultValue(NewB, R, ElementTy);
return NewB;
@@ -2496,13 +2513,13 @@ RegionBindingsRef RegionStoreManager::bindVector(RegionBindingsConstRef B,
const VectorType *VT = T->castAs<VectorType>(); // Use castAs for typedefs.
// Handle lazy compound values and symbolic values.
- if (V.getAs<nonloc::LazyCompoundVal>() || V.getAs<nonloc::SymbolVal>())
+ if (isa<nonloc::LazyCompoundVal, nonloc::SymbolVal>(V))
return bindAggregate(B, R, V);
// We may get non-CompoundVal accidentally due to imprecise cast logic or
// that we are binding symbolic struct value. Kill the field values, and if
// the value is symbolic go and bind it as a "default" binding.
- if (!V.getAs<nonloc::CompoundVal>()) {
+ if (!isa<nonloc::CompoundVal>(V)) {
return bindAggregate(B, R, UnknownVal());
}
@@ -2570,11 +2587,8 @@ RegionStoreManager::tryBindSmallStruct(RegionBindingsConstRef B,
}
RegionBindingsRef RegionStoreManager::bindStruct(RegionBindingsConstRef B,
- const TypedValueRegion* R,
+ const TypedValueRegion *R,
SVal V) {
- if (!Features.supportsFields())
- return B;
-
QualType T = R->getValueType();
assert(T->isStructureOrClassType());
@@ -2591,13 +2605,13 @@ RegionBindingsRef RegionStoreManager::bindStruct(RegionBindingsConstRef B,
return *NewB;
return bindAggregate(B, R, V);
}
- if (V.getAs<nonloc::SymbolVal>())
+ if (isa<nonloc::SymbolVal>(V))
return bindAggregate(B, R, V);
// We may get non-CompoundVal accidentally due to imprecise cast logic or
// that we are binding symbolic struct value. Kill the field values, and if
// the value is symbolic go and bind it as a "default" binding.
- if (V.isUnknown() || !V.getAs<nonloc::CompoundVal>())
+ if (V.isUnknown() || !isa<nonloc::CompoundVal>(V))
return bindAggregate(B, R, UnknownVal());
// The raw CompoundVal is essentially a symbolic InitListExpr: an (immutable)
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
index bb3261bae3bf..13fac37899cd 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SValBuilder.cpp
@@ -61,7 +61,7 @@ SValBuilder::SValBuilder(llvm::BumpPtrAllocator &alloc, ASTContext &context,
DefinedOrUnknownSVal SValBuilder::makeZeroVal(QualType type) {
if (Loc::isLocType(type))
- return makeNull();
+ return makeNullWithType(type);
if (type->isIntegralOrEnumerationType())
return makeIntVal(0, type);
@@ -74,8 +74,10 @@ DefinedOrUnknownSVal SValBuilder::makeZeroVal(QualType type) {
return UnknownVal();
}
-NonLoc SValBuilder::makeNonLoc(const SymExpr *lhs, BinaryOperator::Opcode op,
- const llvm::APSInt& rhs, QualType type) {
+nonloc::SymbolVal SValBuilder::makeNonLoc(const SymExpr *lhs,
+ BinaryOperator::Opcode op,
+ const llvm::APSInt &rhs,
+ QualType type) {
// The Environment ensures we always get a persistent APSInt in
// BasicValueFactory, so we don't need to get the APSInt from
// BasicValueFactory again.
@@ -84,23 +86,31 @@ NonLoc SValBuilder::makeNonLoc(const SymExpr *lhs, BinaryOperator::Opcode op,
return nonloc::SymbolVal(SymMgr.getSymIntExpr(lhs, op, rhs, type));
}
-NonLoc SValBuilder::makeNonLoc(const llvm::APSInt& lhs,
- BinaryOperator::Opcode op, const SymExpr *rhs,
- QualType type) {
+nonloc::SymbolVal SValBuilder::makeNonLoc(const llvm::APSInt &lhs,
+ BinaryOperator::Opcode op,
+ const SymExpr *rhs, QualType type) {
assert(rhs);
assert(!Loc::isLocType(type));
return nonloc::SymbolVal(SymMgr.getIntSymExpr(lhs, op, rhs, type));
}
-NonLoc SValBuilder::makeNonLoc(const SymExpr *lhs, BinaryOperator::Opcode op,
- const SymExpr *rhs, QualType type) {
+nonloc::SymbolVal SValBuilder::makeNonLoc(const SymExpr *lhs,
+ BinaryOperator::Opcode op,
+ const SymExpr *rhs, QualType type) {
assert(lhs && rhs);
assert(!Loc::isLocType(type));
return nonloc::SymbolVal(SymMgr.getSymSymExpr(lhs, op, rhs, type));
}
-NonLoc SValBuilder::makeNonLoc(const SymExpr *operand,
- QualType fromTy, QualType toTy) {
+NonLoc SValBuilder::makeNonLoc(const SymExpr *operand, UnaryOperator::Opcode op,
+ QualType type) {
+ assert(operand);
+ assert(!Loc::isLocType(type));
+ return nonloc::SymbolVal(SymMgr.getUnarySymExpr(operand, op, type));
+}
+
+nonloc::SymbolVal SValBuilder::makeNonLoc(const SymExpr *operand,
+ QualType fromTy, QualType toTy) {
assert(operand);
assert(!Loc::isLocType(toTy));
return nonloc::SymbolVal(SymMgr.getCastSymbol(operand, fromTy, toTy));
@@ -359,7 +369,7 @@ Optional<SVal> SValBuilder::getConstantVal(const Expr *E) {
return makeBoolVal(cast<ObjCBoolLiteralExpr>(E));
case Stmt::CXXNullPtrLiteralExprClass:
- return makeNull();
+ return makeNullWithType(E->getType());
case Stmt::CStyleCastExprClass:
case Stmt::CXXFunctionalCastExprClass:
@@ -399,7 +409,7 @@ Optional<SVal> SValBuilder::getConstantVal(const Expr *E) {
if (Loc::isLocType(E->getType()))
if (E->isNullPointerConstant(Ctx, Expr::NPC_ValueDependentIsNotNull))
- return makeNull();
+ return makeNullWithType(E->getType());
return None;
}
@@ -431,6 +441,43 @@ SVal SValBuilder::makeSymExprValNN(BinaryOperator::Opcode Op,
return UnknownVal();
}
+SVal SValBuilder::evalMinus(NonLoc X) {
+ switch (X.getSubKind()) {
+ case nonloc::ConcreteIntKind:
+ return makeIntVal(-X.castAs<nonloc::ConcreteInt>().getValue());
+ case nonloc::SymbolValKind:
+ return makeNonLoc(X.castAs<nonloc::SymbolVal>().getSymbol(), UO_Minus,
+ X.getType(Context));
+ default:
+ return UnknownVal();
+ }
+}
+
+SVal SValBuilder::evalComplement(NonLoc X) {
+ switch (X.getSubKind()) {
+ case nonloc::ConcreteIntKind:
+ return makeIntVal(~X.castAs<nonloc::ConcreteInt>().getValue());
+ case nonloc::SymbolValKind:
+ return makeNonLoc(X.castAs<nonloc::SymbolVal>().getSymbol(), UO_Not,
+ X.getType(Context));
+ default:
+ return UnknownVal();
+ }
+}
+
+SVal SValBuilder::evalUnaryOp(ProgramStateRef state, UnaryOperator::Opcode opc,
+ SVal operand, QualType type) {
+ auto OpN = operand.getAs<NonLoc>();
+ if (!OpN)
+ return UnknownVal();
+
+ if (opc == UO_Minus)
+ return evalMinus(*OpN);
+ if (opc == UO_Not)
+ return evalComplement(*OpN);
+ llvm_unreachable("Unexpected unary operator");
+}
+
SVal SValBuilder::evalBinOp(ProgramStateRef state, BinaryOperator::Opcode op,
SVal lhs, SVal rhs, QualType type) {
if (lhs.isUndef() || rhs.isUndef())
@@ -439,8 +486,7 @@ SVal SValBuilder::evalBinOp(ProgramStateRef state, BinaryOperator::Opcode op,
if (lhs.isUnknown() || rhs.isUnknown())
return UnknownVal();
- if (lhs.getAs<nonloc::LazyCompoundVal>() ||
- rhs.getAs<nonloc::LazyCompoundVal>()) {
+ if (isa<nonloc::LazyCompoundVal>(lhs) || isa<nonloc::LazyCompoundVal>(rhs)) {
return UnknownVal();
}
@@ -563,8 +609,7 @@ SVal SValBuilder::evalIntegralCast(ProgramStateRef state, SVal val,
std::tie(IsNotTruncated, IsTruncated) = state->assume(CompVal);
if (!IsNotTruncated && IsTruncated) {
// Symbol is truncated so we evaluate it as a cast.
- NonLoc CastVal = makeNonLoc(se, originalTy, castTy);
- return CastVal;
+ return makeNonLoc(se, originalTy, castTy);
}
return evalCast(val, castTy, originalTy);
}
@@ -682,8 +727,11 @@ SVal SValBuilder::evalCastSubKind(loc::ConcreteInt V, QualType CastTy,
}
// Pointer to any pointer.
- if (Loc::isLocType(CastTy))
- return V;
+ if (Loc::isLocType(CastTy)) {
+ llvm::APSInt Value = V.getValue();
+ BasicVals.getAPSIntType(CastTy).apply(Value);
+ return loc::ConcreteInt(BasicVals.getValue(Value));
+ }
// Pointer to whatever else.
return UnknownVal();
@@ -742,9 +790,6 @@ SVal SValBuilder::evalCastSubKind(loc::MemRegionVal V, QualType CastTy,
// This change is needed for architectures with varying
// pointer widths. See the amdgcn opencl reproducer with
// this change as an example: solver-sym-simplification-ptr-bool.cl
- // FIXME: Cleanup remainder of `getZeroWithPtrWidth ()`
- // and `getIntWithPtrWidth()` functions to prevent future
- // confusion
if (!Ty->isReferenceType())
return makeNonLoc(Sym, BO_NE, BasicVals.getZeroWithTypeSize(Ty),
CastTy);
@@ -983,8 +1028,8 @@ SVal SValBuilder::evalCastSubKind(nonloc::SymbolVal V, QualType CastTy,
// Produce SymbolCast if CastTy and T are different integers.
// NOTE: In the end the type of SymbolCast shall be equal to CastTy.
- if (T->isIntegralOrEnumerationType() &&
- CastTy->isIntegralOrEnumerationType()) {
+ if (T->isIntegralOrUnscopedEnumerationType() &&
+ CastTy->isIntegralOrUnscopedEnumerationType()) {
AnalyzerOptions &Opts =
StateMgr.getOwningEngine().getAnalysisManager().getAnalyzerOptions();
// If appropriate option is disabled, ignore the cast.
@@ -1009,7 +1054,7 @@ SVal SValBuilder::evalCastSubKind(nonloc::PointerToMember V, QualType CastTy,
return V;
}
-SVal clang::ento::SValBuilder::simplifySymbolCast(nonloc::SymbolVal V,
+nonloc::SymbolVal SValBuilder::simplifySymbolCast(nonloc::SymbolVal V,
QualType CastTy) {
// We use seven conditions to recognize a simplification case.
// For the clarity let `CastTy` be `C`, SE->getType() - `T`, root type - `R`,
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SVals.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SVals.cpp
index 117546e43b1a..67913a55b3dc 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SVals.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SVals.cpp
@@ -43,25 +43,6 @@ using namespace ento;
// Utility methods.
//===----------------------------------------------------------------------===//
-bool SVal::hasConjuredSymbol() const {
- if (Optional<nonloc::SymbolVal> SV = getAs<nonloc::SymbolVal>()) {
- SymbolRef sym = SV->getSymbol();
- if (isa<SymbolConjured>(sym))
- return true;
- }
-
- if (Optional<loc::MemRegionVal> RV = getAs<loc::MemRegionVal>()) {
- const MemRegion *R = RV->getRegion();
- if (const auto *SR = dyn_cast<SymbolicRegion>(R)) {
- SymbolRef sym = SR->getSymbol();
- if (isa<SymbolConjured>(sym))
- return true;
- }
- }
-
- return false;
-}
-
const FunctionDecl *SVal::getAsFunctionDecl() const {
if (Optional<loc::MemRegionVal> X = getAs<loc::MemRegionVal>()) {
const MemRegion* R = X->getRegion();
@@ -196,8 +177,7 @@ QualType SVal::getType(const ASTContext &Context) const {
}
const MemRegion *loc::MemRegionVal::stripCasts(bool StripBaseCasts) const {
- const MemRegion *R = getRegion();
- return R ? R->StripCasts(StripBaseCasts) : nullptr;
+ return getRegion()->StripCasts(StripBaseCasts);
}
const void *nonloc::LazyCompoundVal::getStore() const {
@@ -273,49 +253,6 @@ bool SVal::isZeroConstant() const {
}
//===----------------------------------------------------------------------===//
-// Transfer function dispatch for Non-Locs.
-//===----------------------------------------------------------------------===//
-
-SVal nonloc::ConcreteInt::evalBinOp(SValBuilder &svalBuilder,
- BinaryOperator::Opcode Op,
- const nonloc::ConcreteInt& R) const {
- const llvm::APSInt* X =
- svalBuilder.getBasicValueFactory().evalAPSInt(Op, getValue(), R.getValue());
-
- if (X)
- return nonloc::ConcreteInt(*X);
- else
- return UndefinedVal();
-}
-
-nonloc::ConcreteInt
-nonloc::ConcreteInt::evalComplement(SValBuilder &svalBuilder) const {
- return svalBuilder.makeIntVal(~getValue());
-}
-
-nonloc::ConcreteInt
-nonloc::ConcreteInt::evalMinus(SValBuilder &svalBuilder) const {
- return svalBuilder.makeIntVal(-getValue());
-}
-
-//===----------------------------------------------------------------------===//
-// Transfer function dispatch for Locs.
-//===----------------------------------------------------------------------===//
-
-SVal loc::ConcreteInt::evalBinOp(BasicValueFactory& BasicVals,
- BinaryOperator::Opcode Op,
- const loc::ConcreteInt& R) const {
- assert(BinaryOperator::isComparisonOp(Op) || Op == BO_Sub);
-
- const llvm::APSInt *X = BasicVals.evalAPSInt(Op, getValue(), R.getValue());
-
- if (X)
- return nonloc::ConcreteInt(*X);
- else
- return UndefinedVal();
-}
-
-//===----------------------------------------------------------------------===//
// Pretty-Printing.
//===----------------------------------------------------------------------===//
@@ -401,7 +338,7 @@ void NonLoc::dumpToStream(raw_ostream &os) const {
else
os << ", ";
- os << (*I).getType().getAsString();
+ os << I->getType();
}
os << '}';
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp
index e1319a4c2e41..ad3110792592 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SarifDiagnostics.cpp
@@ -97,12 +97,12 @@ static std::string fileNameToURI(StringRef Filename) {
assert(Iter != End && "Expected there to be a non-root path component.");
// Add the rest of the path components, encoding any reserved characters;
// we skip past the first path component, as it was handled it above.
- std::for_each(++Iter, End, [&Ret](StringRef Component) {
+ for (StringRef Component : llvm::make_range(++Iter, End)) {
// For reasons unknown to me, we may get a backslash with Windows native
// paths for the initial backslash following the drive component, which
// we need to ignore as a URI path part.
if (Component == "\\")
- return;
+ continue;
// Add the separator between the previous path part and the one being
// currently processed.
@@ -112,7 +112,7 @@ static std::string fileNameToURI(StringRef Filename) {
for (char C : Component) {
Ret += percentEncodeURICharacter(C);
}
- });
+ }
return std::string(Ret);
}
@@ -341,14 +341,14 @@ static json::Array createRules(std::vector<const PathDiagnostic *> &Diags,
json::Array Rules;
llvm::StringSet<> Seen;
- llvm::for_each(Diags, [&](const PathDiagnostic *D) {
+ for (const PathDiagnostic *D : Diags) {
StringRef RuleID = D->getCheckerName();
std::pair<llvm::StringSet<>::iterator, bool> P = Seen.insert(RuleID);
if (P.second) {
RuleMapping[RuleID] = Rules.size(); // Maps RuleID to an Array Index.
Rules.push_back(createRule(*D));
}
- });
+ }
return Rules;
}
@@ -368,10 +368,9 @@ static json::Object createRun(const LangOptions &LO,
json::Array Results, Artifacts;
StringMap<unsigned> RuleMapping;
json::Object Tool = createTool(Diags, RuleMapping);
-
- llvm::for_each(Diags, [&](const PathDiagnostic *D) {
+
+ for (const PathDiagnostic *D : Diags)
Results.push_back(createResult(LO, *D, Artifacts, RuleMapping));
- });
return json::Object{{"tool", std::move(Tool)},
{"results", std::move(Results)},
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
index f96974f97dcc..dcb6043e9df3 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SimpleConstraintManager.cpp
@@ -22,9 +22,9 @@ namespace ento {
SimpleConstraintManager::~SimpleConstraintManager() {}
-ProgramStateRef SimpleConstraintManager::assume(ProgramStateRef State,
- DefinedSVal Cond,
- bool Assumption) {
+ProgramStateRef SimpleConstraintManager::assumeInternal(ProgramStateRef State,
+ DefinedSVal Cond,
+ bool Assumption) {
// If we have a Loc value, cast it to a bool NonLoc first.
if (Optional<Loc> LV = Cond.getAs<Loc>()) {
SValBuilder &SVB = State->getStateManager().getSValBuilder();
@@ -44,7 +44,7 @@ ProgramStateRef SimpleConstraintManager::assume(ProgramStateRef State,
ProgramStateRef SimpleConstraintManager::assume(ProgramStateRef State,
NonLoc Cond, bool Assumption) {
State = assumeAux(State, Cond, Assumption);
- if (NotifyAssumeClients && EE)
+ if (EE)
return EE->processAssume(State, Cond, Assumption);
return State;
}
@@ -86,12 +86,12 @@ ProgramStateRef SimpleConstraintManager::assumeAux(ProgramStateRef State,
}
case nonloc::LocAsIntegerKind:
- return assume(State, Cond.castAs<nonloc::LocAsInteger>().getLoc(),
- Assumption);
+ return assumeInternal(State, Cond.castAs<nonloc::LocAsInteger>().getLoc(),
+ Assumption);
} // end switch
}
-ProgramStateRef SimpleConstraintManager::assumeInclusiveRange(
+ProgramStateRef SimpleConstraintManager::assumeInclusiveRangeInternal(
ProgramStateRef State, NonLoc Value, const llvm::APSInt &From,
const llvm::APSInt &To, bool InRange) {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
index 0bd47ced15a5..762ecc18ecea 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SimpleSValBuilder.cpp
@@ -22,6 +22,13 @@ using namespace ento;
namespace {
class SimpleSValBuilder : public SValBuilder {
+ // Query the constraint manager whether the SVal has only one possible
+ // (integer) value. If that is the case, the value is returned. Otherwise,
+ // returns NULL.
+ // This is an implementation detail. Checkers should use `getKnownValue()`
+ // instead.
+ const llvm::APSInt *getConstValue(ProgramStateRef state, SVal V);
+
// With one `simplifySValOnce` call, a compound symbols might collapse to
// simpler symbol tree that is still possible to further simplify. Thus, we
// do the simplification on a new symbol tree until we reach the simplest
@@ -45,7 +52,7 @@ class SimpleSValBuilder : public SValBuilder {
SVal simplifyUntilFixpoint(ProgramStateRef State, SVal Val);
// Recursively descends into symbolic expressions and replaces symbols
- // with their known values (in the sense of the getKnownValue() method).
+ // with their known values (in the sense of the getConstValue() method).
// We traverse the symbol tree and query the constraint values for the
// sub-trees and if a value is a constant we do the constant folding.
SVal simplifySValOnce(ProgramStateRef State, SVal V);
@@ -56,8 +63,6 @@ public:
: SValBuilder(alloc, context, stateMgr) {}
~SimpleSValBuilder() override {}
- SVal evalMinus(NonLoc val) override;
- SVal evalComplement(NonLoc val) override;
SVal evalBinOpNN(ProgramStateRef state, BinaryOperator::Opcode op,
NonLoc lhs, NonLoc rhs, QualType resultTy) override;
SVal evalBinOpLL(ProgramStateRef state, BinaryOperator::Opcode op,
@@ -65,8 +70,9 @@ public:
SVal evalBinOpLN(ProgramStateRef state, BinaryOperator::Opcode op,
Loc lhs, NonLoc rhs, QualType resultTy) override;
- /// getKnownValue - evaluates a given SVal. If the SVal has only one possible
- /// (integer) value, that value is returned. Otherwise, returns NULL.
+ /// Evaluates a given SVal by recursively evaluating and
+ /// simplifying the children SVals. If the SVal has only one possible
+ /// (integer) value, that value is returned. Otherwise, returns NULL.
const llvm::APSInt *getKnownValue(ProgramStateRef state, SVal V) override;
SVal simplifySVal(ProgramStateRef State, SVal V) override;
@@ -82,26 +88,21 @@ SValBuilder *ento::createSimpleSValBuilder(llvm::BumpPtrAllocator &alloc,
return new SimpleSValBuilder(alloc, context, stateMgr);
}
-//===----------------------------------------------------------------------===//
-// Transfer function for unary operators.
-//===----------------------------------------------------------------------===//
-
-SVal SimpleSValBuilder::evalMinus(NonLoc val) {
- switch (val.getSubKind()) {
- case nonloc::ConcreteIntKind:
- return val.castAs<nonloc::ConcreteInt>().evalMinus(*this);
- default:
- return UnknownVal();
+// Checks if the negation the value and flipping sign preserve
+// the semantics on the operation in the resultType
+static bool isNegationValuePreserving(const llvm::APSInt &Value,
+ APSIntType ResultType) {
+ const unsigned ValueBits = Value.getSignificantBits();
+ if (ValueBits == ResultType.getBitWidth()) {
+ // The value is the lowest negative value that is representable
+ // in signed integer with bitWith of result type. The
+ // negation is representable if resultType is unsigned.
+ return ResultType.isUnsigned();
}
-}
-SVal SimpleSValBuilder::evalComplement(NonLoc X) {
- switch (X.getSubKind()) {
- case nonloc::ConcreteIntKind:
- return X.castAs<nonloc::ConcreteInt>().evalComplement(*this);
- default:
- return UnknownVal();
- }
+ // If resultType bitWith is higher that number of bits required
+ // to represent RHS, the sign flip produce same value.
+ return ValueBits < ResultType.getBitWidth();
}
//===----------------------------------------------------------------------===//
@@ -197,6 +198,17 @@ SVal SimpleSValBuilder::MakeSymIntVal(const SymExpr *LHS,
if (RHS.isSigned() && !SymbolType->isSignedIntegerOrEnumerationType())
ConvertedRHS = &BasicVals.Convert(SymbolType, RHS);
}
+ } else if (BinaryOperator::isAdditiveOp(op) && RHS.isNegative()) {
+ // Change a+(-N) into a-N, and a-(-N) into a+N
+ // Adjust addition/subtraction of negative value, to
+ // subtraction/addition of the negated value.
+ APSIntType resultIntTy = BasicVals.getAPSIntType(resultTy);
+ if (isNegationValuePreserving(RHS, resultIntTy)) {
+ ConvertedRHS = &BasicVals.getValue(-resultIntTy.convert(RHS));
+ op = (op == BO_Add) ? BO_Sub : BO_Add;
+ } else {
+ ConvertedRHS = &BasicVals.Convert(resultTy, RHS);
+ }
} else
ConvertedRHS = &BasicVals.Convert(resultTy, RHS);
@@ -286,7 +298,6 @@ static NonLoc doRearrangeUnchecked(ProgramStateRef State,
else
llvm_unreachable("Operation not suitable for unchecked rearrangement!");
- // FIXME: Can we use assume() without getting into an infinite recursion?
if (LSym == RSym)
return SVB.evalBinOpNN(State, Op, nonloc::ConcreteInt(LInt),
nonloc::ConcreteInt(RInt), ResultTy)
@@ -499,7 +510,7 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
llvm::APSInt LHSValue = lhs.castAs<nonloc::ConcreteInt>().getValue();
// If we're dealing with two known constants, just perform the operation.
- if (const llvm::APSInt *KnownRHSValue = getKnownValue(state, rhs)) {
+ if (const llvm::APSInt *KnownRHSValue = getConstValue(state, rhs)) {
llvm::APSInt RHSValue = *KnownRHSValue;
if (BinaryOperator::isComparisonOp(op)) {
// We're looking for a type big enough to compare the two values.
@@ -619,7 +630,7 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
}
// For now, only handle expressions whose RHS is a constant.
- if (const llvm::APSInt *RHSValue = getKnownValue(state, rhs)) {
+ if (const llvm::APSInt *RHSValue = getConstValue(state, rhs)) {
// If both the LHS and the current expression are additive,
// fold their constants and try again.
if (BinaryOperator::isAdditiveOp(op)) {
@@ -636,16 +647,26 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
const llvm::APSInt &first = IntType.convert(symIntExpr->getRHS());
const llvm::APSInt &second = IntType.convert(*RHSValue);
+ // If the op and lop agrees, then we just need to
+ // sum the constants. Otherwise, we change to operation
+ // type if substraction would produce negative value
+ // (and cause overflow for unsigned integers),
+ // as consequence x+1U-10 produces x-9U, instead
+ // of x+4294967287U, that would be produced without this
+ // additional check.
const llvm::APSInt *newRHS;
- if (lop == op)
+ if (lop == op) {
newRHS = BasicVals.evalAPSInt(BO_Add, first, second);
- else
+ } else if (first >= second) {
newRHS = BasicVals.evalAPSInt(BO_Sub, first, second);
+ op = lop;
+ } else {
+ newRHS = BasicVals.evalAPSInt(BO_Sub, second, first);
+ }
assert(newRHS && "Invalid operation despite common type!");
rhs = nonloc::ConcreteInt(*newRHS);
lhs = nonloc::SymbolVal(symIntExpr->getLHS());
- op = lop;
continue;
}
}
@@ -656,7 +677,7 @@ SVal SimpleSValBuilder::evalBinOpNN(ProgramStateRef state,
}
// Is the RHS a constant?
- if (const llvm::APSInt *RHSValue = getKnownValue(state, rhs))
+ if (const llvm::APSInt *RHSValue = getConstValue(state, rhs))
return MakeSymIntVal(Sym, op, *RHSValue, resultTy);
if (Optional<NonLoc> V = tryRearrange(state, op, lhs, rhs, resultTy))
@@ -715,11 +736,41 @@ static SVal evalBinOpFieldRegionFieldRegion(const FieldRegion *LeftFR,
llvm_unreachable("Fields not found in parent record's definition");
}
+// This is used in debug builds only for now because some downstream users
+// may hit this assert in their subsequent merges.
+// There are still places in the analyzer where equal bitwidth Locs
+// are compared, and need to be found and corrected. Recent previous fixes have
+// addressed the known problems of making NULLs with specific bitwidths
+// for Loc comparisons along with deprecation of APIs for the same purpose.
+//
+static void assertEqualBitWidths(ProgramStateRef State, Loc RhsLoc,
+ Loc LhsLoc) {
+ // Implements a "best effort" check for RhsLoc and LhsLoc bit widths
+ ASTContext &Ctx = State->getStateManager().getContext();
+ uint64_t RhsBitwidth =
+ RhsLoc.getType(Ctx).isNull() ? 0 : Ctx.getTypeSize(RhsLoc.getType(Ctx));
+ uint64_t LhsBitwidth =
+ LhsLoc.getType(Ctx).isNull() ? 0 : Ctx.getTypeSize(LhsLoc.getType(Ctx));
+ if (RhsBitwidth && LhsBitwidth &&
+ (LhsLoc.getSubKind() == RhsLoc.getSubKind())) {
+ assert(RhsBitwidth == LhsBitwidth &&
+ "RhsLoc and LhsLoc bitwidth must be same!");
+ }
+}
+
// FIXME: all this logic will change if/when we have MemRegion::getLocation().
SVal SimpleSValBuilder::evalBinOpLL(ProgramStateRef state,
BinaryOperator::Opcode op,
Loc lhs, Loc rhs,
QualType resultTy) {
+
+ // Assert that bitwidth of lhs and rhs are the same.
+ // This can happen if two different address spaces are used,
+ // and the bitwidths of the address spaces are different.
+ // See LIT case clang/test/Analysis/cstring-checker-addressspace.c
+ // FIXME: See comment above in the function assertEqualBitWidths
+ assertEqualBitWidths(state, rhs, lhs);
+
// Only comparisons and subtractions are valid operations on two pointers.
// See [C99 6.5.5 through 6.5.14] or [C++0x 5.6 through 5.15].
// However, if a pointer is casted to an integer, evalBinOpNN may end up
@@ -777,6 +828,8 @@ SVal SimpleSValBuilder::evalBinOpLL(ProgramStateRef state,
return UnknownVal();
case loc::ConcreteIntKind: {
+ auto L = lhs.castAs<loc::ConcreteInt>();
+
// If one of the operands is a symbol and the other is a constant,
// build an expression for use by the constraint manager.
if (SymbolRef rSym = rhs.getAsLocSymbol()) {
@@ -785,19 +838,17 @@ SVal SimpleSValBuilder::evalBinOpLL(ProgramStateRef state,
if (!BinaryOperator::isComparisonOp(op) || op == BO_Cmp)
return UnknownVal();
- const llvm::APSInt &lVal = lhs.castAs<loc::ConcreteInt>().getValue();
op = BinaryOperator::reverseComparisonOp(op);
- return makeNonLoc(rSym, op, lVal, resultTy);
+ return makeNonLoc(rSym, op, L.getValue(), resultTy);
}
// If both operands are constants, just perform the operation.
if (Optional<loc::ConcreteInt> rInt = rhs.getAs<loc::ConcreteInt>()) {
- SVal ResultVal =
- lhs.castAs<loc::ConcreteInt>().evalBinOp(BasicVals, op, *rInt);
- if (Optional<NonLoc> Result = ResultVal.getAs<NonLoc>())
- return evalCast(*Result, resultTy, QualType{});
+ assert(BinaryOperator::isComparisonOp(op) || op == BO_Sub);
- assert(!ResultVal.getAs<Loc>() && "Loc-Loc ops should not produce Locs");
+ if (const auto *ResultInt =
+ BasicVals.evalAPSInt(op, L.getValue(), rInt->getValue()))
+ return evalCast(nonloc::ConcreteInt(*ResultInt), resultTy, QualType{});
return UnknownVal();
}
@@ -805,7 +856,7 @@ SVal SimpleSValBuilder::evalBinOpLL(ProgramStateRef state,
// This must come after the test if the RHS is a symbol, which is used to
// build constraints. The address of any non-symbolic region is guaranteed
// to be non-NULL, as is any label.
- assert(rhs.getAs<loc::MemRegionVal>() || rhs.getAs<loc::GotoLabel>());
+ assert((isa<loc::MemRegionVal, loc::GotoLabel>(rhs)));
if (lhs.isZeroConstant()) {
switch (op) {
default:
@@ -1114,9 +1165,8 @@ SVal SimpleSValBuilder::evalBinOpLN(ProgramStateRef state,
return UnknownVal();
}
-const llvm::APSInt *SimpleSValBuilder::getKnownValue(ProgramStateRef state,
- SVal V) {
- V = simplifySVal(state, V);
+const llvm::APSInt *SimpleSValBuilder::getConstValue(ProgramStateRef state,
+ SVal V) {
if (V.isUnknownOrUndef())
return nullptr;
@@ -1132,6 +1182,11 @@ const llvm::APSInt *SimpleSValBuilder::getKnownValue(ProgramStateRef state,
return nullptr;
}
+const llvm::APSInt *SimpleSValBuilder::getKnownValue(ProgramStateRef state,
+ SVal V) {
+ return getConstValue(state, simplifySVal(state, V));
+}
+
SVal SimpleSValBuilder::simplifyUntilFixpoint(ProgramStateRef State, SVal Val) {
SVal SimplifiedVal = simplifySValOnce(State, Val);
while (SimplifiedVal != Val) {
@@ -1198,14 +1253,12 @@ SVal SimpleSValBuilder::simplifySValOnce(ProgramStateRef State, SVal V) {
SVal VisitSymbolData(const SymbolData *S) {
// No cache here.
if (const llvm::APSInt *I =
- SVB.getKnownValue(State, SVB.makeSymbolVal(S)))
+ State->getConstraintManager().getSymVal(State, S))
return Loc::isLocType(S->getType()) ? (SVal)SVB.makeIntLocVal(*I)
: (SVal)SVB.makeIntVal(*I);
return SVB.makeSymbolVal(S);
}
- // TODO: Support SymbolCast.
-
SVal VisitSymIntExpr(const SymIntExpr *S) {
auto I = Cached.find(S);
if (I != Cached.end())
@@ -1275,6 +1328,30 @@ SVal SimpleSValBuilder::simplifySValOnce(ProgramStateRef State, SVal V) {
S, SVB.evalBinOp(State, S->getOpcode(), LHS, RHS, S->getType()));
}
+ SVal VisitSymbolCast(const SymbolCast *S) {
+ auto I = Cached.find(S);
+ if (I != Cached.end())
+ return I->second;
+ const SymExpr *OpSym = S->getOperand();
+ SVal OpVal = getConstOrVisit(OpSym);
+ if (isUnchanged(OpSym, OpVal))
+ return skip(S);
+
+ return cache(S, SVB.evalCast(OpVal, S->getType(), OpSym->getType()));
+ }
+
+ SVal VisitUnarySymExpr(const UnarySymExpr *S) {
+ auto I = Cached.find(S);
+ if (I != Cached.end())
+ return I->second;
+ SVal Op = getConstOrVisit(S->getOperand());
+ if (isUnchanged(S->getOperand(), Op))
+ return skip(S);
+
+ return cache(
+ S, SVB.evalUnaryOp(State, S->getOpcode(), Op, S->getType()));
+ }
+
SVal VisitSymExpr(SymbolRef S) { return nonloc::SymbolVal(S); }
SVal VisitMemRegion(const MemRegion *R) { return loc::MemRegionVal(R); }
@@ -1288,14 +1365,6 @@ SVal SimpleSValBuilder::simplifySValOnce(ProgramStateRef State, SVal V) {
SVal VisitSVal(SVal V) { return V; }
};
- // A crude way of preventing this function from calling itself from evalBinOp.
- static bool isReentering = false;
- if (isReentering)
- return V;
-
- isReentering = true;
SVal SimplifiedV = Simplifier(State).Visit(V);
- isReentering = false;
-
return SimplifiedV;
}
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Store.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Store.cpp
index 2bcdb0faf5da..96e8878da616 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Store.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/Store.cpp
@@ -459,10 +459,10 @@ SVal StoreManager::getLValueElement(QualType elementType, NonLoc Offset,
// FIXME: For absolute pointer addresses, we just return that value back as
// well, although in reality we should return the offset added to that
// value. See also the similar FIXME in getLValueFieldOrIvar().
- if (Base.isUnknownOrUndef() || Base.getAs<loc::ConcreteInt>())
+ if (Base.isUnknownOrUndef() || isa<loc::ConcreteInt>(Base))
return Base;
- if (Base.getAs<loc::GotoLabel>())
+ if (isa<loc::GotoLabel>(Base))
return UnknownVal();
const SubRegion *BaseRegion =
@@ -488,7 +488,7 @@ SVal StoreManager::getLValueElement(QualType elementType, NonLoc Offset,
SVal BaseIdx = ElemR->getIndex();
- if (!BaseIdx.getAs<nonloc::ConcreteInt>())
+ if (!isa<nonloc::ConcreteInt>(BaseIdx))
return UnknownVal();
const llvm::APSInt &BaseIdxI =
@@ -497,7 +497,7 @@ SVal StoreManager::getLValueElement(QualType elementType, NonLoc Offset,
// Only allow non-integer offsets if the base region has no offset itself.
// FIXME: This is a somewhat arbitrary restriction. We should be using
// SValBuilder here to add the two offsets without checking their types.
- if (!Offset.getAs<nonloc::ConcreteInt>()) {
+ if (!isa<nonloc::ConcreteInt>(Offset)) {
if (isa<ElementRegion>(BaseRegion->StripCasts()))
return UnknownVal();
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp
index 1ae1f97efd2e..2227bd324adc 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/SymbolManager.cpp
@@ -65,14 +65,23 @@ void BinarySymExpr::dumpToStreamImpl(raw_ostream &OS,
}
void SymbolCast::dumpToStream(raw_ostream &os) const {
- os << '(' << ToTy.getAsString() << ") (";
+ os << '(' << ToTy << ") (";
Operand->dumpToStream(os);
os << ')';
}
+void UnarySymExpr::dumpToStream(raw_ostream &os) const {
+ os << UnaryOperator::getOpcodeStr(Op);
+ bool Binary = isa<BinarySymExpr>(Operand);
+ if (Binary)
+ os << '(';
+ Operand->dumpToStream(os);
+ if (Binary)
+ os << ')';
+}
+
void SymbolConjured::dumpToStream(raw_ostream &os) const {
- os << getKindStr() << getSymbolID() << '{' << T.getAsString() << ", LC"
- << LCtx->getID();
+ os << getKindStr() << getSymbolID() << '{' << T << ", LC" << LCtx->getID();
if (S)
os << ", S" << S->getID(LCtx->getDecl()->getASTContext());
else
@@ -90,15 +99,13 @@ void SymbolExtent::dumpToStream(raw_ostream &os) const {
}
void SymbolMetadata::dumpToStream(raw_ostream &os) const {
- os << getKindStr() << getSymbolID() << '{' << getRegion() << ','
- << T.getAsString() << '}';
+ os << getKindStr() << getSymbolID() << '{' << getRegion() << ',' << T << '}';
}
void SymbolData::anchor() {}
void SymbolRegionValue::dumpToStream(raw_ostream &os) const {
- os << getKindStr() << getSymbolID() << '<' << getType().getAsString() << ' '
- << R << '>';
+ os << getKindStr() << getSymbolID() << '<' << getType() << ' ' << R << '>';
}
bool SymExpr::symbol_iterator::operator==(const symbol_iterator &X) const {
@@ -137,6 +144,9 @@ void SymExpr::symbol_iterator::expand() {
case SymExpr::SymbolCastKind:
itr.push_back(cast<SymbolCast>(SE)->getOperand());
return;
+ case SymExpr::UnarySymExprKind:
+ itr.push_back(cast<UnarySymExpr>(SE)->getOperand());
+ return;
case SymExpr::SymIntExprKind:
itr.push_back(cast<SymIntExpr>(SE)->getLHS());
return;
@@ -309,6 +319,22 @@ const SymSymExpr *SymbolManager::getSymSymExpr(const SymExpr *lhs,
return cast<SymSymExpr>(data);
}
+const UnarySymExpr *SymbolManager::getUnarySymExpr(const SymExpr *Operand,
+ UnaryOperator::Opcode Opc,
+ QualType T) {
+ llvm::FoldingSetNodeID ID;
+ UnarySymExpr::Profile(ID, Operand, Opc, T);
+ void *InsertPos;
+ SymExpr *data = DataSet.FindNodeOrInsertPos(ID, InsertPos);
+ if (!data) {
+ data = (UnarySymExpr *)BPAlloc.Allocate<UnarySymExpr>();
+ new (data) UnarySymExpr(Operand, Opc, T);
+ DataSet.InsertNode(data, InsertPos);
+ }
+
+ return cast<UnarySymExpr>(data);
+}
+
QualType SymbolConjured::getType() const {
return T;
}
@@ -468,6 +494,9 @@ bool SymbolReaper::isLive(SymbolRef sym) {
case SymExpr::SymbolCastKind:
KnownLive = isLive(cast<SymbolCast>(sym)->getOperand());
break;
+ case SymExpr::UnarySymExprKind:
+ KnownLive = isLive(cast<UnarySymExpr>(sym)->getOperand());
+ break;
}
if (KnownLive)
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/TextDiagnostics.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/TextDiagnostics.cpp
index 4f3be7cae331..48c82cfb82b2 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/TextDiagnostics.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/TextDiagnostics.cpp
@@ -129,7 +129,7 @@ public:
Rewriter Rewrite(SM, LO);
if (!applyAllReplacements(Repls, Rewrite)) {
- llvm::errs() << "An error occured during applying fix-it.\n";
+ llvm::errs() << "An error occurred during applying fix-it.\n";
}
Rewrite.overwriteChangedFiles();
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/WorkList.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/WorkList.cpp
index 348552ba73a9..7042a9020837 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/WorkList.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Core/WorkList.cpp
@@ -205,12 +205,6 @@ class UnexploredFirstPriorityQueue : public WorkList {
using QueuePriority = std::pair<int, unsigned long>;
using QueueItem = std::pair<WorkListUnit, QueuePriority>;
- struct ExplorationComparator {
- bool operator() (const QueueItem &LHS, const QueueItem &RHS) {
- return LHS.second < RHS.second;
- }
- };
-
// Number of inserted nodes, used to emulate DFS ordering in the priority
// queue when insertions are equal.
unsigned long Counter = 0;
@@ -219,7 +213,7 @@ class UnexploredFirstPriorityQueue : public WorkList {
VisitedTimesMap NumReached;
// The top item is the largest one.
- llvm::PriorityQueue<QueueItem, std::vector<QueueItem>, ExplorationComparator>
+ llvm::PriorityQueue<QueueItem, std::vector<QueueItem>, llvm::less_second>
queue;
public:
@@ -267,12 +261,6 @@ class UnexploredFirstPriorityLocationQueue : public WorkList {
using QueuePriority = std::pair<int, unsigned long>;
using QueueItem = std::pair<WorkListUnit, QueuePriority>;
- struct ExplorationComparator {
- bool operator() (const QueueItem &LHS, const QueueItem &RHS) {
- return LHS.second < RHS.second;
- }
- };
-
// Number of inserted nodes, used to emulate DFS ordering in the priority
// queue when insertions are equal.
unsigned long Counter = 0;
@@ -281,7 +269,7 @@ class UnexploredFirstPriorityLocationQueue : public WorkList {
VisitedTimesMap NumReached;
// The top item is the largest one.
- llvm::PriorityQueue<QueueItem, std::vector<QueueItem>, ExplorationComparator>
+ llvm::PriorityQueue<QueueItem, std::vector<QueueItem>, llvm::less_second>
queue;
public:
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
index b152f9d80ef5..ca0a66f54ced 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalysisConsumer.cpp
@@ -171,13 +171,7 @@ public:
}
// Create the analyzer component creators.
- switch (Opts->AnalysisStoreOpt) {
- default:
- llvm_unreachable("Unknown store manager.");
-#define ANALYSIS_STORE(NAME, CMDFLAG, DESC, CREATEFN) \
- case NAME##Model: CreateStoreMgr = CREATEFN; break;
-#include "clang/StaticAnalyzer/Core/Analyses.def"
- }
+ CreateStoreMgr = &CreateRegionStoreManager;
switch (Opts->AnalysisConstraintsOpt) {
default:
@@ -289,7 +283,7 @@ public:
return true;
if (VD->hasExternalStorage() || VD->isStaticDataMember()) {
- if (!cross_tu::containsConst(VD, *Ctx))
+ if (!cross_tu::shouldImport(VD, *Ctx))
return true;
} else {
// Cannot be initialized in another TU.
@@ -358,7 +352,6 @@ public:
private:
void storeTopLevelDecls(DeclGroupRef DG);
- std::string getFunctionName(const Decl *D);
/// Check if we should skip (not analyze) the given function.
AnalysisMode getModeForDecl(Decl *D, AnalysisMode Mode);
@@ -383,14 +376,14 @@ void AnalysisConsumer::HandleTopLevelDeclInObjCContainer(DeclGroupRef DG) {
}
void AnalysisConsumer::storeTopLevelDecls(DeclGroupRef DG) {
- for (DeclGroupRef::iterator I = DG.begin(), E = DG.end(); I != E; ++I) {
+ for (auto &I : DG) {
// Skip ObjCMethodDecl, wait for the objc container to avoid
// analyzing twice.
- if (isa<ObjCMethodDecl>(*I))
+ if (isa<ObjCMethodDecl>(I))
continue;
- LocalTUDecls.push_back(*I);
+ LocalTUDecls.push_back(I);
}
}
@@ -462,11 +455,9 @@ void AnalysisConsumer::HandleDeclsCallGraph(const unsigned LocalTUDeclsSize) {
SetOfConstDecls Visited;
SetOfConstDecls VisitedAsTopLevel;
llvm::ReversePostOrderTraversal<clang::CallGraph*> RPOT(&CG);
- for (llvm::ReversePostOrderTraversal<clang::CallGraph*>::rpo_iterator
- I = RPOT.begin(), E = RPOT.end(); I != E; ++I) {
+ for (auto &N : RPOT) {
NumFunctionTopLevel++;
- CallGraphNode *N = *I;
Decl *D = N->getDecl();
// Skip the abstract root node.
@@ -478,6 +469,18 @@ void AnalysisConsumer::HandleDeclsCallGraph(const unsigned LocalTUDeclsSize) {
if (shouldSkipFunction(D, Visited, VisitedAsTopLevel))
continue;
+ // The CallGraph might have declarations as callees. However, during CTU
+ // the declaration might form a declaration chain with the newly imported
+ // definition from another TU. In this case we don't want to analyze the
+ // function definition as toplevel.
+ if (const auto *FD = dyn_cast<FunctionDecl>(D)) {
+ // Calling 'hasBody' replaces 'FD' in place with the FunctionDecl
+ // that has the body.
+ FD->hasBody(FD);
+ if (CTU.isImportedAsNew(FD))
+ continue;
+ }
+
// Analyze the function.
SetOfConstDecls VisitedCallees;
@@ -501,6 +504,28 @@ static bool fileContainsString(StringRef Substring, ASTContext &C) {
return Buffer.contains(Substring);
}
+static void reportAnalyzerFunctionMisuse(const AnalyzerOptions &Opts,
+ const ASTContext &Ctx) {
+ llvm::errs() << "Every top-level function was skipped.\n";
+
+ if (!Opts.AnalyzerDisplayProgress)
+ llvm::errs() << "Pass the -analyzer-display-progress for tracking which "
+ "functions are analyzed.\n";
+
+ bool HasBrackets =
+ Opts.AnalyzeSpecificFunction.find("(") != std::string::npos;
+
+ if (Ctx.getLangOpts().CPlusPlus && !HasBrackets) {
+ llvm::errs()
+ << "For analyzing C++ code you need to pass the function parameter "
+ "list: -analyze-function=\"foobar(int, _Bool)\"\n";
+ } else if (!Ctx.getLangOpts().CPlusPlus && HasBrackets) {
+ llvm::errs() << "For analyzing C code you shouldn't pass the function "
+ "parameter list, only the name of the function: "
+ "-analyze-function=foobar\n";
+ }
+}
+
void AnalysisConsumer::runAnalysisOnTranslationUnit(ASTContext &C) {
BugReporter BR(*Mgr);
TranslationUnitDecl *TU = C.getTranslationUnitDecl();
@@ -537,6 +562,14 @@ void AnalysisConsumer::runAnalysisOnTranslationUnit(ASTContext &C) {
BR.FlushReports();
RecVisitorBR = nullptr;
+
+ // If the user wanted to analyze a specific function and the number of basic
+ // blocks analyzed is zero, than the user might not specified the function
+ // name correctly.
+ // FIXME: The user might have analyzed the requested function in Syntax mode,
+ // but we are unaware of that.
+ if (!Opts->AnalyzeSpecificFunction.empty() && NumFunctionsAnalyzed == 0)
+ reportAnalyzerFunctionMisuse(*Opts, *Ctx);
}
void AnalysisConsumer::reportAnalyzerProgress(StringRef S) {
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalyzerHelpFlags.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalyzerHelpFlags.cpp
index eb6014a0629d..7cd15f0f6595 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalyzerHelpFlags.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/AnalyzerHelpFlags.cpp
@@ -101,10 +101,7 @@ OPTIONS:
#undef ANALYZER_OPTION_DEPENDS_ON_USER_MODE
};
- llvm::sort(PrintableOptions, [](const OptionAndDescriptionTy &LHS,
- const OptionAndDescriptionTy &RHS) {
- return LHS.first < RHS.first;
- });
+ llvm::sort(PrintableOptions, llvm::less_first());
for (const auto &Pair : PrintableOptions) {
AnalyzerOptions::printFormattedEntry(out, Pair, /*InitialPad*/ 2,
diff --git a/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/CheckerRegistry.cpp b/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/CheckerRegistry.cpp
index 528284ca8985..9ee6ef4f9519 100644
--- a/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/CheckerRegistry.cpp
+++ b/contrib/llvm-project/clang/lib/StaticAnalyzer/Frontend/CheckerRegistry.cpp
@@ -480,9 +480,7 @@ static void isOptionContainedIn(const CmdLineOptionList &OptionList,
return Opt.OptionName == SuppliedOption;
};
- const auto *OptionIt = llvm::find_if(OptionList, SameOptName);
-
- if (OptionIt == OptionList.end()) {
+ if (llvm::none_of(OptionList, SameOptName)) {
Diags.Report(diag::err_analyzer_checker_option_unknown)
<< SuppliedChecker << SuppliedOption;
return;
diff --git a/contrib/llvm-project/clang/lib/Support/RISCVVIntrinsicUtils.cpp b/contrib/llvm-project/clang/lib/Support/RISCVVIntrinsicUtils.cpp
new file mode 100644
index 000000000000..d4d3f22c9327
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Support/RISCVVIntrinsicUtils.cpp
@@ -0,0 +1,955 @@
+//===- RISCVVIntrinsicUtils.cpp - RISC-V Vector Intrinsic Utils -*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Support/RISCVVIntrinsicUtils.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/StringExtras.h"
+#include "llvm/ADT/StringMap.h"
+#include "llvm/ADT/StringSet.h"
+#include "llvm/ADT/Twine.h"
+#include "llvm/Support/raw_ostream.h"
+#include <numeric>
+#include <set>
+#include <unordered_map>
+
+using namespace llvm;
+
+namespace clang {
+namespace RISCV {
+
+const PrototypeDescriptor PrototypeDescriptor::Mask = PrototypeDescriptor(
+ BaseTypeModifier::Vector, VectorTypeModifier::MaskVector);
+const PrototypeDescriptor PrototypeDescriptor::VL =
+ PrototypeDescriptor(BaseTypeModifier::SizeT);
+const PrototypeDescriptor PrototypeDescriptor::Vector =
+ PrototypeDescriptor(BaseTypeModifier::Vector);
+
+//===----------------------------------------------------------------------===//
+// Type implementation
+//===----------------------------------------------------------------------===//
+
+LMULType::LMULType(int NewLog2LMUL) {
+ // Check Log2LMUL is -3, -2, -1, 0, 1, 2, 3
+ assert(NewLog2LMUL <= 3 && NewLog2LMUL >= -3 && "Bad LMUL number!");
+ Log2LMUL = NewLog2LMUL;
+}
+
+std::string LMULType::str() const {
+ if (Log2LMUL < 0)
+ return "mf" + utostr(1ULL << (-Log2LMUL));
+ return "m" + utostr(1ULL << Log2LMUL);
+}
+
+VScaleVal LMULType::getScale(unsigned ElementBitwidth) const {
+ int Log2ScaleResult = 0;
+ switch (ElementBitwidth) {
+ default:
+ break;
+ case 8:
+ Log2ScaleResult = Log2LMUL + 3;
+ break;
+ case 16:
+ Log2ScaleResult = Log2LMUL + 2;
+ break;
+ case 32:
+ Log2ScaleResult = Log2LMUL + 1;
+ break;
+ case 64:
+ Log2ScaleResult = Log2LMUL;
+ break;
+ }
+ // Illegal vscale result would be less than 1
+ if (Log2ScaleResult < 0)
+ return llvm::None;
+ return 1 << Log2ScaleResult;
+}
+
+void LMULType::MulLog2LMUL(int log2LMUL) { Log2LMUL += log2LMUL; }
+
+RVVType::RVVType(BasicType BT, int Log2LMUL,
+ const PrototypeDescriptor &prototype)
+ : BT(BT), LMUL(LMULType(Log2LMUL)) {
+ applyBasicType();
+ applyModifier(prototype);
+ Valid = verifyType();
+ if (Valid) {
+ initBuiltinStr();
+ initTypeStr();
+ if (isVector()) {
+ initClangBuiltinStr();
+ }
+ }
+}
+
+// clang-format off
+// boolean type are encoded the ratio of n (SEW/LMUL)
+// SEW/LMUL | 1 | 2 | 4 | 8 | 16 | 32 | 64
+// c type | vbool64_t | vbool32_t | vbool16_t | vbool8_t | vbool4_t | vbool2_t | vbool1_t
+// IR type | nxv1i1 | nxv2i1 | nxv4i1 | nxv8i1 | nxv16i1 | nxv32i1 | nxv64i1
+
+// type\lmul | 1/8 | 1/4 | 1/2 | 1 | 2 | 4 | 8
+// -------- |------ | -------- | ------- | ------- | -------- | -------- | --------
+// i64 | N/A | N/A | N/A | nxv1i64 | nxv2i64 | nxv4i64 | nxv8i64
+// i32 | N/A | N/A | nxv1i32 | nxv2i32 | nxv4i32 | nxv8i32 | nxv16i32
+// i16 | N/A | nxv1i16 | nxv2i16 | nxv4i16 | nxv8i16 | nxv16i16 | nxv32i16
+// i8 | nxv1i8 | nxv2i8 | nxv4i8 | nxv8i8 | nxv16i8 | nxv32i8 | nxv64i8
+// double | N/A | N/A | N/A | nxv1f64 | nxv2f64 | nxv4f64 | nxv8f64
+// float | N/A | N/A | nxv1f32 | nxv2f32 | nxv4f32 | nxv8f32 | nxv16f32
+// half | N/A | nxv1f16 | nxv2f16 | nxv4f16 | nxv8f16 | nxv16f16 | nxv32f16
+// clang-format on
+
+bool RVVType::verifyType() const {
+ if (ScalarType == Invalid)
+ return false;
+ if (isScalar())
+ return true;
+ if (!Scale)
+ return false;
+ if (isFloat() && ElementBitwidth == 8)
+ return false;
+ unsigned V = Scale.getValue();
+ switch (ElementBitwidth) {
+ case 1:
+ case 8:
+ // Check Scale is 1,2,4,8,16,32,64
+ return (V <= 64 && isPowerOf2_32(V));
+ case 16:
+ // Check Scale is 1,2,4,8,16,32
+ return (V <= 32 && isPowerOf2_32(V));
+ case 32:
+ // Check Scale is 1,2,4,8,16
+ return (V <= 16 && isPowerOf2_32(V));
+ case 64:
+ // Check Scale is 1,2,4,8
+ return (V <= 8 && isPowerOf2_32(V));
+ }
+ return false;
+}
+
+void RVVType::initBuiltinStr() {
+ assert(isValid() && "RVVType is invalid");
+ switch (ScalarType) {
+ case ScalarTypeKind::Void:
+ BuiltinStr = "v";
+ return;
+ case ScalarTypeKind::Size_t:
+ BuiltinStr = "z";
+ if (IsImmediate)
+ BuiltinStr = "I" + BuiltinStr;
+ if (IsPointer)
+ BuiltinStr += "*";
+ return;
+ case ScalarTypeKind::Ptrdiff_t:
+ BuiltinStr = "Y";
+ return;
+ case ScalarTypeKind::UnsignedLong:
+ BuiltinStr = "ULi";
+ return;
+ case ScalarTypeKind::SignedLong:
+ BuiltinStr = "Li";
+ return;
+ case ScalarTypeKind::Boolean:
+ assert(ElementBitwidth == 1);
+ BuiltinStr += "b";
+ break;
+ case ScalarTypeKind::SignedInteger:
+ case ScalarTypeKind::UnsignedInteger:
+ switch (ElementBitwidth) {
+ case 8:
+ BuiltinStr += "c";
+ break;
+ case 16:
+ BuiltinStr += "s";
+ break;
+ case 32:
+ BuiltinStr += "i";
+ break;
+ case 64:
+ BuiltinStr += "Wi";
+ break;
+ default:
+ llvm_unreachable("Unhandled ElementBitwidth!");
+ }
+ if (isSignedInteger())
+ BuiltinStr = "S" + BuiltinStr;
+ else
+ BuiltinStr = "U" + BuiltinStr;
+ break;
+ case ScalarTypeKind::Float:
+ switch (ElementBitwidth) {
+ case 16:
+ BuiltinStr += "x";
+ break;
+ case 32:
+ BuiltinStr += "f";
+ break;
+ case 64:
+ BuiltinStr += "d";
+ break;
+ default:
+ llvm_unreachable("Unhandled ElementBitwidth!");
+ }
+ break;
+ default:
+ llvm_unreachable("ScalarType is invalid!");
+ }
+ if (IsImmediate)
+ BuiltinStr = "I" + BuiltinStr;
+ if (isScalar()) {
+ if (IsConstant)
+ BuiltinStr += "C";
+ if (IsPointer)
+ BuiltinStr += "*";
+ return;
+ }
+ BuiltinStr = "q" + utostr(*Scale) + BuiltinStr;
+ // Pointer to vector types. Defined for segment load intrinsics.
+ // segment load intrinsics have pointer type arguments to store the loaded
+ // vector values.
+ if (IsPointer)
+ BuiltinStr += "*";
+}
+
+void RVVType::initClangBuiltinStr() {
+ assert(isValid() && "RVVType is invalid");
+ assert(isVector() && "Handle Vector type only");
+
+ ClangBuiltinStr = "__rvv_";
+ switch (ScalarType) {
+ case ScalarTypeKind::Boolean:
+ ClangBuiltinStr += "bool" + utostr(64 / *Scale) + "_t";
+ return;
+ case ScalarTypeKind::Float:
+ ClangBuiltinStr += "float";
+ break;
+ case ScalarTypeKind::SignedInteger:
+ ClangBuiltinStr += "int";
+ break;
+ case ScalarTypeKind::UnsignedInteger:
+ ClangBuiltinStr += "uint";
+ break;
+ default:
+ llvm_unreachable("ScalarTypeKind is invalid");
+ }
+ ClangBuiltinStr += utostr(ElementBitwidth) + LMUL.str() + "_t";
+}
+
+void RVVType::initTypeStr() {
+ assert(isValid() && "RVVType is invalid");
+
+ if (IsConstant)
+ Str += "const ";
+
+ auto getTypeString = [&](StringRef TypeStr) {
+ if (isScalar())
+ return Twine(TypeStr + Twine(ElementBitwidth) + "_t").str();
+ return Twine("v" + TypeStr + Twine(ElementBitwidth) + LMUL.str() + "_t")
+ .str();
+ };
+
+ switch (ScalarType) {
+ case ScalarTypeKind::Void:
+ Str = "void";
+ return;
+ case ScalarTypeKind::Size_t:
+ Str = "size_t";
+ if (IsPointer)
+ Str += " *";
+ return;
+ case ScalarTypeKind::Ptrdiff_t:
+ Str = "ptrdiff_t";
+ return;
+ case ScalarTypeKind::UnsignedLong:
+ Str = "unsigned long";
+ return;
+ case ScalarTypeKind::SignedLong:
+ Str = "long";
+ return;
+ case ScalarTypeKind::Boolean:
+ if (isScalar())
+ Str += "bool";
+ else
+ // Vector bool is special case, the formulate is
+ // `vbool<N>_t = MVT::nxv<64/N>i1` ex. vbool16_t = MVT::4i1
+ Str += "vbool" + utostr(64 / *Scale) + "_t";
+ break;
+ case ScalarTypeKind::Float:
+ if (isScalar()) {
+ if (ElementBitwidth == 64)
+ Str += "double";
+ else if (ElementBitwidth == 32)
+ Str += "float";
+ else if (ElementBitwidth == 16)
+ Str += "_Float16";
+ else
+ llvm_unreachable("Unhandled floating type.");
+ } else
+ Str += getTypeString("float");
+ break;
+ case ScalarTypeKind::SignedInteger:
+ Str += getTypeString("int");
+ break;
+ case ScalarTypeKind::UnsignedInteger:
+ Str += getTypeString("uint");
+ break;
+ default:
+ llvm_unreachable("ScalarType is invalid!");
+ }
+ if (IsPointer)
+ Str += " *";
+}
+
+void RVVType::initShortStr() {
+ switch (ScalarType) {
+ case ScalarTypeKind::Boolean:
+ assert(isVector());
+ ShortStr = "b" + utostr(64 / *Scale);
+ return;
+ case ScalarTypeKind::Float:
+ ShortStr = "f" + utostr(ElementBitwidth);
+ break;
+ case ScalarTypeKind::SignedInteger:
+ ShortStr = "i" + utostr(ElementBitwidth);
+ break;
+ case ScalarTypeKind::UnsignedInteger:
+ ShortStr = "u" + utostr(ElementBitwidth);
+ break;
+ default:
+ llvm_unreachable("Unhandled case!");
+ }
+ if (isVector())
+ ShortStr += LMUL.str();
+}
+
+void RVVType::applyBasicType() {
+ switch (BT) {
+ case BasicType::Int8:
+ ElementBitwidth = 8;
+ ScalarType = ScalarTypeKind::SignedInteger;
+ break;
+ case BasicType::Int16:
+ ElementBitwidth = 16;
+ ScalarType = ScalarTypeKind::SignedInteger;
+ break;
+ case BasicType::Int32:
+ ElementBitwidth = 32;
+ ScalarType = ScalarTypeKind::SignedInteger;
+ break;
+ case BasicType::Int64:
+ ElementBitwidth = 64;
+ ScalarType = ScalarTypeKind::SignedInteger;
+ break;
+ case BasicType::Float16:
+ ElementBitwidth = 16;
+ ScalarType = ScalarTypeKind::Float;
+ break;
+ case BasicType::Float32:
+ ElementBitwidth = 32;
+ ScalarType = ScalarTypeKind::Float;
+ break;
+ case BasicType::Float64:
+ ElementBitwidth = 64;
+ ScalarType = ScalarTypeKind::Float;
+ break;
+ default:
+ llvm_unreachable("Unhandled type code!");
+ }
+ assert(ElementBitwidth != 0 && "Bad element bitwidth!");
+}
+
+Optional<PrototypeDescriptor> PrototypeDescriptor::parsePrototypeDescriptor(
+ llvm::StringRef PrototypeDescriptorStr) {
+ PrototypeDescriptor PD;
+ BaseTypeModifier PT = BaseTypeModifier::Invalid;
+ VectorTypeModifier VTM = VectorTypeModifier::NoModifier;
+
+ if (PrototypeDescriptorStr.empty())
+ return PD;
+
+ // Handle base type modifier
+ auto PType = PrototypeDescriptorStr.back();
+ switch (PType) {
+ case 'e':
+ PT = BaseTypeModifier::Scalar;
+ break;
+ case 'v':
+ PT = BaseTypeModifier::Vector;
+ break;
+ case 'w':
+ PT = BaseTypeModifier::Vector;
+ VTM = VectorTypeModifier::Widening2XVector;
+ break;
+ case 'q':
+ PT = BaseTypeModifier::Vector;
+ VTM = VectorTypeModifier::Widening4XVector;
+ break;
+ case 'o':
+ PT = BaseTypeModifier::Vector;
+ VTM = VectorTypeModifier::Widening8XVector;
+ break;
+ case 'm':
+ PT = BaseTypeModifier::Vector;
+ VTM = VectorTypeModifier::MaskVector;
+ break;
+ case '0':
+ PT = BaseTypeModifier::Void;
+ break;
+ case 'z':
+ PT = BaseTypeModifier::SizeT;
+ break;
+ case 't':
+ PT = BaseTypeModifier::Ptrdiff;
+ break;
+ case 'u':
+ PT = BaseTypeModifier::UnsignedLong;
+ break;
+ case 'l':
+ PT = BaseTypeModifier::SignedLong;
+ break;
+ default:
+ llvm_unreachable("Illegal primitive type transformers!");
+ }
+ PD.PT = static_cast<uint8_t>(PT);
+ PrototypeDescriptorStr = PrototypeDescriptorStr.drop_back();
+
+ // Compute the vector type transformers, it can only appear one time.
+ if (PrototypeDescriptorStr.startswith("(")) {
+ assert(VTM == VectorTypeModifier::NoModifier &&
+ "VectorTypeModifier should only have one modifier");
+ size_t Idx = PrototypeDescriptorStr.find(')');
+ assert(Idx != StringRef::npos);
+ StringRef ComplexType = PrototypeDescriptorStr.slice(1, Idx);
+ PrototypeDescriptorStr = PrototypeDescriptorStr.drop_front(Idx + 1);
+ assert(!PrototypeDescriptorStr.contains('(') &&
+ "Only allow one vector type modifier");
+
+ auto ComplexTT = ComplexType.split(":");
+ if (ComplexTT.first == "Log2EEW") {
+ uint32_t Log2EEW;
+ if (ComplexTT.second.getAsInteger(10, Log2EEW)) {
+ llvm_unreachable("Invalid Log2EEW value!");
+ return None;
+ }
+ switch (Log2EEW) {
+ case 3:
+ VTM = VectorTypeModifier::Log2EEW3;
+ break;
+ case 4:
+ VTM = VectorTypeModifier::Log2EEW4;
+ break;
+ case 5:
+ VTM = VectorTypeModifier::Log2EEW5;
+ break;
+ case 6:
+ VTM = VectorTypeModifier::Log2EEW6;
+ break;
+ default:
+ llvm_unreachable("Invalid Log2EEW value, should be [3-6]");
+ return None;
+ }
+ } else if (ComplexTT.first == "FixedSEW") {
+ uint32_t NewSEW;
+ if (ComplexTT.second.getAsInteger(10, NewSEW)) {
+ llvm_unreachable("Invalid FixedSEW value!");
+ return None;
+ }
+ switch (NewSEW) {
+ case 8:
+ VTM = VectorTypeModifier::FixedSEW8;
+ break;
+ case 16:
+ VTM = VectorTypeModifier::FixedSEW16;
+ break;
+ case 32:
+ VTM = VectorTypeModifier::FixedSEW32;
+ break;
+ case 64:
+ VTM = VectorTypeModifier::FixedSEW64;
+ break;
+ default:
+ llvm_unreachable("Invalid FixedSEW value, should be 8, 16, 32 or 64");
+ return None;
+ }
+ } else if (ComplexTT.first == "LFixedLog2LMUL") {
+ int32_t Log2LMUL;
+ if (ComplexTT.second.getAsInteger(10, Log2LMUL)) {
+ llvm_unreachable("Invalid LFixedLog2LMUL value!");
+ return None;
+ }
+ switch (Log2LMUL) {
+ case -3:
+ VTM = VectorTypeModifier::LFixedLog2LMULN3;
+ break;
+ case -2:
+ VTM = VectorTypeModifier::LFixedLog2LMULN2;
+ break;
+ case -1:
+ VTM = VectorTypeModifier::LFixedLog2LMULN1;
+ break;
+ case 0:
+ VTM = VectorTypeModifier::LFixedLog2LMUL0;
+ break;
+ case 1:
+ VTM = VectorTypeModifier::LFixedLog2LMUL1;
+ break;
+ case 2:
+ VTM = VectorTypeModifier::LFixedLog2LMUL2;
+ break;
+ case 3:
+ VTM = VectorTypeModifier::LFixedLog2LMUL3;
+ break;
+ default:
+ llvm_unreachable("Invalid LFixedLog2LMUL value, should be [-3, 3]");
+ return None;
+ }
+ } else if (ComplexTT.first == "SFixedLog2LMUL") {
+ int32_t Log2LMUL;
+ if (ComplexTT.second.getAsInteger(10, Log2LMUL)) {
+ llvm_unreachable("Invalid SFixedLog2LMUL value!");
+ return None;
+ }
+ switch (Log2LMUL) {
+ case -3:
+ VTM = VectorTypeModifier::SFixedLog2LMULN3;
+ break;
+ case -2:
+ VTM = VectorTypeModifier::SFixedLog2LMULN2;
+ break;
+ case -1:
+ VTM = VectorTypeModifier::SFixedLog2LMULN1;
+ break;
+ case 0:
+ VTM = VectorTypeModifier::SFixedLog2LMUL0;
+ break;
+ case 1:
+ VTM = VectorTypeModifier::SFixedLog2LMUL1;
+ break;
+ case 2:
+ VTM = VectorTypeModifier::SFixedLog2LMUL2;
+ break;
+ case 3:
+ VTM = VectorTypeModifier::SFixedLog2LMUL3;
+ break;
+ default:
+ llvm_unreachable("Invalid LFixedLog2LMUL value, should be [-3, 3]");
+ return None;
+ }
+
+ } else {
+ llvm_unreachable("Illegal complex type transformers!");
+ }
+ }
+ PD.VTM = static_cast<uint8_t>(VTM);
+
+ // Compute the remain type transformers
+ TypeModifier TM = TypeModifier::NoModifier;
+ for (char I : PrototypeDescriptorStr) {
+ switch (I) {
+ case 'P':
+ if ((TM & TypeModifier::Const) == TypeModifier::Const)
+ llvm_unreachable("'P' transformer cannot be used after 'C'");
+ if ((TM & TypeModifier::Pointer) == TypeModifier::Pointer)
+ llvm_unreachable("'P' transformer cannot be used twice");
+ TM |= TypeModifier::Pointer;
+ break;
+ case 'C':
+ TM |= TypeModifier::Const;
+ break;
+ case 'K':
+ TM |= TypeModifier::Immediate;
+ break;
+ case 'U':
+ TM |= TypeModifier::UnsignedInteger;
+ break;
+ case 'I':
+ TM |= TypeModifier::SignedInteger;
+ break;
+ case 'F':
+ TM |= TypeModifier::Float;
+ break;
+ case 'S':
+ TM |= TypeModifier::LMUL1;
+ break;
+ default:
+ llvm_unreachable("Illegal non-primitive type transformer!");
+ }
+ }
+ PD.TM = static_cast<uint8_t>(TM);
+
+ return PD;
+}
+
+void RVVType::applyModifier(const PrototypeDescriptor &Transformer) {
+ // Handle primitive type transformer
+ switch (static_cast<BaseTypeModifier>(Transformer.PT)) {
+ case BaseTypeModifier::Scalar:
+ Scale = 0;
+ break;
+ case BaseTypeModifier::Vector:
+ Scale = LMUL.getScale(ElementBitwidth);
+ break;
+ case BaseTypeModifier::Void:
+ ScalarType = ScalarTypeKind::Void;
+ break;
+ case BaseTypeModifier::SizeT:
+ ScalarType = ScalarTypeKind::Size_t;
+ break;
+ case BaseTypeModifier::Ptrdiff:
+ ScalarType = ScalarTypeKind::Ptrdiff_t;
+ break;
+ case BaseTypeModifier::UnsignedLong:
+ ScalarType = ScalarTypeKind::UnsignedLong;
+ break;
+ case BaseTypeModifier::SignedLong:
+ ScalarType = ScalarTypeKind::SignedLong;
+ break;
+ case BaseTypeModifier::Invalid:
+ ScalarType = ScalarTypeKind::Invalid;
+ return;
+ }
+
+ switch (static_cast<VectorTypeModifier>(Transformer.VTM)) {
+ case VectorTypeModifier::Widening2XVector:
+ ElementBitwidth *= 2;
+ LMUL.MulLog2LMUL(1);
+ Scale = LMUL.getScale(ElementBitwidth);
+ break;
+ case VectorTypeModifier::Widening4XVector:
+ ElementBitwidth *= 4;
+ LMUL.MulLog2LMUL(2);
+ Scale = LMUL.getScale(ElementBitwidth);
+ break;
+ case VectorTypeModifier::Widening8XVector:
+ ElementBitwidth *= 8;
+ LMUL.MulLog2LMUL(3);
+ Scale = LMUL.getScale(ElementBitwidth);
+ break;
+ case VectorTypeModifier::MaskVector:
+ ScalarType = ScalarTypeKind::Boolean;
+ Scale = LMUL.getScale(ElementBitwidth);
+ ElementBitwidth = 1;
+ break;
+ case VectorTypeModifier::Log2EEW3:
+ applyLog2EEW(3);
+ break;
+ case VectorTypeModifier::Log2EEW4:
+ applyLog2EEW(4);
+ break;
+ case VectorTypeModifier::Log2EEW5:
+ applyLog2EEW(5);
+ break;
+ case VectorTypeModifier::Log2EEW6:
+ applyLog2EEW(6);
+ break;
+ case VectorTypeModifier::FixedSEW8:
+ applyFixedSEW(8);
+ break;
+ case VectorTypeModifier::FixedSEW16:
+ applyFixedSEW(16);
+ break;
+ case VectorTypeModifier::FixedSEW32:
+ applyFixedSEW(32);
+ break;
+ case VectorTypeModifier::FixedSEW64:
+ applyFixedSEW(64);
+ break;
+ case VectorTypeModifier::LFixedLog2LMULN3:
+ applyFixedLog2LMUL(-3, FixedLMULType::LargerThan);
+ break;
+ case VectorTypeModifier::LFixedLog2LMULN2:
+ applyFixedLog2LMUL(-2, FixedLMULType::LargerThan);
+ break;
+ case VectorTypeModifier::LFixedLog2LMULN1:
+ applyFixedLog2LMUL(-1, FixedLMULType::LargerThan);
+ break;
+ case VectorTypeModifier::LFixedLog2LMUL0:
+ applyFixedLog2LMUL(0, FixedLMULType::LargerThan);
+ break;
+ case VectorTypeModifier::LFixedLog2LMUL1:
+ applyFixedLog2LMUL(1, FixedLMULType::LargerThan);
+ break;
+ case VectorTypeModifier::LFixedLog2LMUL2:
+ applyFixedLog2LMUL(2, FixedLMULType::LargerThan);
+ break;
+ case VectorTypeModifier::LFixedLog2LMUL3:
+ applyFixedLog2LMUL(3, FixedLMULType::LargerThan);
+ break;
+ case VectorTypeModifier::SFixedLog2LMULN3:
+ applyFixedLog2LMUL(-3, FixedLMULType::SmallerThan);
+ break;
+ case VectorTypeModifier::SFixedLog2LMULN2:
+ applyFixedLog2LMUL(-2, FixedLMULType::SmallerThan);
+ break;
+ case VectorTypeModifier::SFixedLog2LMULN1:
+ applyFixedLog2LMUL(-1, FixedLMULType::SmallerThan);
+ break;
+ case VectorTypeModifier::SFixedLog2LMUL0:
+ applyFixedLog2LMUL(0, FixedLMULType::SmallerThan);
+ break;
+ case VectorTypeModifier::SFixedLog2LMUL1:
+ applyFixedLog2LMUL(1, FixedLMULType::SmallerThan);
+ break;
+ case VectorTypeModifier::SFixedLog2LMUL2:
+ applyFixedLog2LMUL(2, FixedLMULType::SmallerThan);
+ break;
+ case VectorTypeModifier::SFixedLog2LMUL3:
+ applyFixedLog2LMUL(3, FixedLMULType::SmallerThan);
+ break;
+ case VectorTypeModifier::NoModifier:
+ break;
+ }
+
+ for (unsigned TypeModifierMaskShift = 0;
+ TypeModifierMaskShift <= static_cast<unsigned>(TypeModifier::MaxOffset);
+ ++TypeModifierMaskShift) {
+ unsigned TypeModifierMask = 1 << TypeModifierMaskShift;
+ if ((static_cast<unsigned>(Transformer.TM) & TypeModifierMask) !=
+ TypeModifierMask)
+ continue;
+ switch (static_cast<TypeModifier>(TypeModifierMask)) {
+ case TypeModifier::Pointer:
+ IsPointer = true;
+ break;
+ case TypeModifier::Const:
+ IsConstant = true;
+ break;
+ case TypeModifier::Immediate:
+ IsImmediate = true;
+ IsConstant = true;
+ break;
+ case TypeModifier::UnsignedInteger:
+ ScalarType = ScalarTypeKind::UnsignedInteger;
+ break;
+ case TypeModifier::SignedInteger:
+ ScalarType = ScalarTypeKind::SignedInteger;
+ break;
+ case TypeModifier::Float:
+ ScalarType = ScalarTypeKind::Float;
+ break;
+ case TypeModifier::LMUL1:
+ LMUL = LMULType(0);
+ // Update ElementBitwidth need to update Scale too.
+ Scale = LMUL.getScale(ElementBitwidth);
+ break;
+ default:
+ llvm_unreachable("Unknown type modifier mask!");
+ }
+ }
+}
+
+void RVVType::applyLog2EEW(unsigned Log2EEW) {
+ // update new elmul = (eew/sew) * lmul
+ LMUL.MulLog2LMUL(Log2EEW - Log2_32(ElementBitwidth));
+ // update new eew
+ ElementBitwidth = 1 << Log2EEW;
+ ScalarType = ScalarTypeKind::SignedInteger;
+ Scale = LMUL.getScale(ElementBitwidth);
+}
+
+void RVVType::applyFixedSEW(unsigned NewSEW) {
+ // Set invalid type if src and dst SEW are same.
+ if (ElementBitwidth == NewSEW) {
+ ScalarType = ScalarTypeKind::Invalid;
+ return;
+ }
+ // Update new SEW
+ ElementBitwidth = NewSEW;
+ Scale = LMUL.getScale(ElementBitwidth);
+}
+
+void RVVType::applyFixedLog2LMUL(int Log2LMUL, enum FixedLMULType Type) {
+ switch (Type) {
+ case FixedLMULType::LargerThan:
+ if (Log2LMUL < LMUL.Log2LMUL) {
+ ScalarType = ScalarTypeKind::Invalid;
+ return;
+ }
+ break;
+ case FixedLMULType::SmallerThan:
+ if (Log2LMUL > LMUL.Log2LMUL) {
+ ScalarType = ScalarTypeKind::Invalid;
+ return;
+ }
+ break;
+ }
+
+ // Update new LMUL
+ LMUL = LMULType(Log2LMUL);
+ Scale = LMUL.getScale(ElementBitwidth);
+}
+
+Optional<RVVTypes>
+RVVType::computeTypes(BasicType BT, int Log2LMUL, unsigned NF,
+ ArrayRef<PrototypeDescriptor> Prototype) {
+ // LMUL x NF must be less than or equal to 8.
+ if ((Log2LMUL >= 1) && (1 << Log2LMUL) * NF > 8)
+ return llvm::None;
+
+ RVVTypes Types;
+ for (const PrototypeDescriptor &Proto : Prototype) {
+ auto T = computeType(BT, Log2LMUL, Proto);
+ if (!T)
+ return llvm::None;
+ // Record legal type index
+ Types.push_back(T.getValue());
+ }
+ return Types;
+}
+
+// Compute the hash value of RVVType, used for cache the result of computeType.
+static uint64_t computeRVVTypeHashValue(BasicType BT, int Log2LMUL,
+ PrototypeDescriptor Proto) {
+ // Layout of hash value:
+ // 0 8 16 24 32 40
+ // | Log2LMUL + 3 | BT | Proto.PT | Proto.TM | Proto.VTM |
+ assert(Log2LMUL >= -3 && Log2LMUL <= 3);
+ return (Log2LMUL + 3) | (static_cast<uint64_t>(BT) & 0xff) << 8 |
+ ((uint64_t)(Proto.PT & 0xff) << 16) |
+ ((uint64_t)(Proto.TM & 0xff) << 24) |
+ ((uint64_t)(Proto.VTM & 0xff) << 32);
+}
+
+Optional<RVVTypePtr> RVVType::computeType(BasicType BT, int Log2LMUL,
+ PrototypeDescriptor Proto) {
+ // Concat BasicType, LMUL and Proto as key
+ static std::unordered_map<uint64_t, RVVType> LegalTypes;
+ static std::set<uint64_t> IllegalTypes;
+ uint64_t Idx = computeRVVTypeHashValue(BT, Log2LMUL, Proto);
+ // Search first
+ auto It = LegalTypes.find(Idx);
+ if (It != LegalTypes.end())
+ return &(It->second);
+
+ if (IllegalTypes.count(Idx))
+ return llvm::None;
+
+ // Compute type and record the result.
+ RVVType T(BT, Log2LMUL, Proto);
+ if (T.isValid()) {
+ // Record legal type index and value.
+ LegalTypes.insert({Idx, T});
+ return &(LegalTypes[Idx]);
+ }
+ // Record illegal type index.
+ IllegalTypes.insert(Idx);
+ return llvm::None;
+}
+
+//===----------------------------------------------------------------------===//
+// RVVIntrinsic implementation
+//===----------------------------------------------------------------------===//
+RVVIntrinsic::RVVIntrinsic(
+ StringRef NewName, StringRef Suffix, StringRef NewOverloadedName,
+ StringRef OverloadedSuffix, StringRef IRName, bool IsMasked,
+ bool HasMaskedOffOperand, bool HasVL, PolicyScheme Scheme,
+ bool HasUnMaskedOverloaded, bool HasBuiltinAlias, StringRef ManualCodegen,
+ const RVVTypes &OutInTypes, const std::vector<int64_t> &NewIntrinsicTypes,
+ const std::vector<StringRef> &RequiredFeatures, unsigned NF)
+ : IRName(IRName), IsMasked(IsMasked), HasVL(HasVL), Scheme(Scheme),
+ HasUnMaskedOverloaded(HasUnMaskedOverloaded),
+ HasBuiltinAlias(HasBuiltinAlias), ManualCodegen(ManualCodegen.str()),
+ NF(NF) {
+
+ // Init BuiltinName, Name and OverloadedName
+ BuiltinName = NewName.str();
+ Name = BuiltinName;
+ if (NewOverloadedName.empty())
+ OverloadedName = NewName.split("_").first.str();
+ else
+ OverloadedName = NewOverloadedName.str();
+ if (!Suffix.empty())
+ Name += "_" + Suffix.str();
+ if (!OverloadedSuffix.empty())
+ OverloadedName += "_" + OverloadedSuffix.str();
+ if (IsMasked) {
+ BuiltinName += "_m";
+ Name += "_m";
+ }
+
+ // Init RISC-V extensions
+ for (const auto &T : OutInTypes) {
+ if (T->isFloatVector(16) || T->isFloat(16))
+ RISCVPredefinedMacros |= RISCVPredefinedMacro::Zvfh;
+ if (T->isFloatVector(32))
+ RISCVPredefinedMacros |= RISCVPredefinedMacro::VectorMaxELenFp32;
+ if (T->isFloatVector(64))
+ RISCVPredefinedMacros |= RISCVPredefinedMacro::VectorMaxELenFp64;
+ if (T->isVector(64))
+ RISCVPredefinedMacros |= RISCVPredefinedMacro::VectorMaxELen64;
+ }
+ for (auto Feature : RequiredFeatures) {
+ if (Feature == "RV64")
+ RISCVPredefinedMacros |= RISCVPredefinedMacro::RV64;
+ // Note: Full multiply instruction (mulh, mulhu, mulhsu, smul) for EEW=64
+ // require V.
+ if (Feature == "FullMultiply" &&
+ (RISCVPredefinedMacros & RISCVPredefinedMacro::VectorMaxELen64))
+ RISCVPredefinedMacros |= RISCVPredefinedMacro::V;
+ }
+
+ // Init OutputType and InputTypes
+ OutputType = OutInTypes[0];
+ InputTypes.assign(OutInTypes.begin() + 1, OutInTypes.end());
+
+ // IntrinsicTypes is unmasked TA version index. Need to update it
+ // if there is merge operand (It is always in first operand).
+ IntrinsicTypes = NewIntrinsicTypes;
+ if ((IsMasked && HasMaskedOffOperand) ||
+ (!IsMasked && hasPassthruOperand())) {
+ for (auto &I : IntrinsicTypes) {
+ if (I >= 0)
+ I += NF;
+ }
+ }
+}
+
+std::string RVVIntrinsic::getBuiltinTypeStr() const {
+ std::string S;
+ S += OutputType->getBuiltinStr();
+ for (const auto &T : InputTypes) {
+ S += T->getBuiltinStr();
+ }
+ return S;
+}
+
+std::string RVVIntrinsic::getSuffixStr(
+ BasicType Type, int Log2LMUL,
+ llvm::ArrayRef<PrototypeDescriptor> PrototypeDescriptors) {
+ SmallVector<std::string> SuffixStrs;
+ for (auto PD : PrototypeDescriptors) {
+ auto T = RVVType::computeType(Type, Log2LMUL, PD);
+ SuffixStrs.push_back((*T)->getShortStr());
+ }
+ return join(SuffixStrs, "_");
+}
+
+SmallVector<PrototypeDescriptor> parsePrototypes(StringRef Prototypes) {
+ SmallVector<PrototypeDescriptor> PrototypeDescriptors;
+ const StringRef Primaries("evwqom0ztul");
+ while (!Prototypes.empty()) {
+ size_t Idx = 0;
+ // Skip over complex prototype because it could contain primitive type
+ // character.
+ if (Prototypes[0] == '(')
+ Idx = Prototypes.find_first_of(')');
+ Idx = Prototypes.find_first_of(Primaries, Idx);
+ assert(Idx != StringRef::npos);
+ auto PD = PrototypeDescriptor::parsePrototypeDescriptor(
+ Prototypes.slice(0, Idx + 1));
+ if (!PD)
+ llvm_unreachable("Error during parsing prototype.");
+ PrototypeDescriptors.push_back(*PD);
+ Prototypes = Prototypes.drop_front(Idx + 1);
+ }
+ return PrototypeDescriptors;
+}
+
+} // end namespace RISCV
+} // end namespace clang
diff --git a/contrib/llvm-project/clang/lib/Testing/CommandLineArgs.cpp b/contrib/llvm-project/clang/lib/Testing/CommandLineArgs.cpp
index cd4d8c188da9..bf517e2dd312 100644
--- a/contrib/llvm-project/clang/lib/Testing/CommandLineArgs.cpp
+++ b/contrib/llvm-project/clang/lib/Testing/CommandLineArgs.cpp
@@ -36,6 +36,9 @@ std::vector<std::string> getCommandLineArgsForTesting(TestLanguage Lang) {
case Lang_CXX20:
Args = {"-std=c++20", "-frtti"};
break;
+ case Lang_OBJC:
+ Args = {"-x", "objective-c", "-frtti", "-fobjc-nonfragile-abi"};
+ break;
case Lang_OBJCXX:
Args = {"-x", "objective-c++", "-frtti"};
break;
@@ -45,6 +48,42 @@ std::vector<std::string> getCommandLineArgsForTesting(TestLanguage Lang) {
return Args;
}
+std::vector<std::string> getCC1ArgsForTesting(TestLanguage Lang) {
+ std::vector<std::string> Args;
+ switch (Lang) {
+ case Lang_C89:
+ Args = {"-xc", "-std=c89"};
+ break;
+ case Lang_C99:
+ Args = {"-xc", "-std=c99"};
+ break;
+ case Lang_CXX03:
+ Args = {"-std=c++03"};
+ break;
+ case Lang_CXX11:
+ Args = {"-std=c++11"};
+ break;
+ case Lang_CXX14:
+ Args = {"-std=c++14"};
+ break;
+ case Lang_CXX17:
+ Args = {"-std=c++17"};
+ break;
+ case Lang_CXX20:
+ Args = {"-std=c++20"};
+ break;
+ case Lang_OBJC:
+ Args = {"-xobjective-c"};
+ break;
+ case Lang_OBJCXX:
+ Args = {"-xobjective-c++"};
+ break;
+ case Lang_OpenCL:
+ llvm_unreachable("Not implemented yet!");
+ }
+ return Args;
+}
+
StringRef getFilenameForTesting(TestLanguage Lang) {
switch (Lang) {
case Lang_C89:
@@ -61,6 +100,9 @@ StringRef getFilenameForTesting(TestLanguage Lang) {
case Lang_OpenCL:
return "input.cl";
+ case Lang_OBJC:
+ return "input.m";
+
case Lang_OBJCXX:
return "input.mm";
}
diff --git a/contrib/llvm-project/clang/lib/Testing/TestAST.cpp b/contrib/llvm-project/clang/lib/Testing/TestAST.cpp
new file mode 100644
index 000000000000..5817230f4b09
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Testing/TestAST.cpp
@@ -0,0 +1,162 @@
+//===--- TestAST.cpp ------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Testing/TestAST.h"
+#include "clang/Basic/Diagnostic.h"
+#include "clang/Basic/LangOptions.h"
+#include "clang/Frontend/FrontendActions.h"
+#include "clang/Frontend/TextDiagnostic.h"
+#include "clang/Testing/CommandLineArgs.h"
+#include "llvm/ADT/ScopeExit.h"
+#include "llvm/Support/VirtualFileSystem.h"
+
+#include "gtest/gtest.h"
+
+namespace clang {
+namespace {
+
+// Captures diagnostics into a vector, optionally reporting errors to gtest.
+class StoreDiagnostics : public DiagnosticConsumer {
+ std::vector<StoredDiagnostic> &Out;
+ bool ReportErrors;
+ LangOptions LangOpts;
+
+public:
+ StoreDiagnostics(std::vector<StoredDiagnostic> &Out, bool ReportErrors)
+ : Out(Out), ReportErrors(ReportErrors) {}
+
+ void BeginSourceFile(const LangOptions &LangOpts,
+ const Preprocessor *) override {
+ this->LangOpts = LangOpts;
+ }
+
+ void HandleDiagnostic(DiagnosticsEngine::Level DiagLevel,
+ const Diagnostic &Info) override {
+ Out.emplace_back(DiagLevel, Info);
+ if (ReportErrors && DiagLevel >= DiagnosticsEngine::Error) {
+ std::string Text;
+ llvm::raw_string_ostream OS(Text);
+ TextDiagnostic Renderer(OS, LangOpts,
+ &Info.getDiags()->getDiagnosticOptions());
+ Renderer.emitStoredDiagnostic(Out.back());
+ ADD_FAILURE() << Text;
+ }
+ }
+};
+
+// Fills in the bits of a CompilerInstance that weren't initialized yet.
+// Provides "empty" ASTContext etc if we fail before parsing gets started.
+void createMissingComponents(CompilerInstance &Clang) {
+ if (!Clang.hasDiagnostics())
+ Clang.createDiagnostics();
+ if (!Clang.hasFileManager())
+ Clang.createFileManager();
+ if (!Clang.hasSourceManager())
+ Clang.createSourceManager(Clang.getFileManager());
+ if (!Clang.hasTarget())
+ Clang.createTarget();
+ if (!Clang.hasPreprocessor())
+ Clang.createPreprocessor(TU_Complete);
+ if (!Clang.hasASTConsumer())
+ Clang.setASTConsumer(std::make_unique<ASTConsumer>());
+ if (!Clang.hasASTContext())
+ Clang.createASTContext();
+ if (!Clang.hasSema())
+ Clang.createSema(TU_Complete, /*CodeCompleteConsumer=*/nullptr);
+}
+
+} // namespace
+
+TestAST::TestAST(const TestInputs &In) {
+ Clang = std::make_unique<CompilerInstance>(
+ std::make_shared<PCHContainerOperations>());
+ // If we don't manage to finish parsing, create CompilerInstance components
+ // anyway so that the test will see an empty AST instead of crashing.
+ auto RecoverFromEarlyExit =
+ llvm::make_scope_exit([&] { createMissingComponents(*Clang); });
+
+ // Extra error conditions are reported through diagnostics, set that up first.
+ bool ErrorOK = In.ErrorOK || llvm::StringRef(In.Code).contains("error-ok");
+ Clang->createDiagnostics(new StoreDiagnostics(Diagnostics, !ErrorOK));
+
+ // Parse cc1 argv, (typically [-std=c++20 input.cc]) into CompilerInvocation.
+ std::vector<const char *> Argv;
+ std::vector<std::string> LangArgs = getCC1ArgsForTesting(In.Language);
+ for (const auto &S : LangArgs)
+ Argv.push_back(S.c_str());
+ for (const auto &S : In.ExtraArgs)
+ Argv.push_back(S.c_str());
+ std::string Filename = getFilenameForTesting(In.Language).str();
+ Argv.push_back(Filename.c_str());
+ Clang->setInvocation(std::make_unique<CompilerInvocation>());
+ if (!CompilerInvocation::CreateFromArgs(Clang->getInvocation(), Argv,
+ Clang->getDiagnostics(), "clang")) {
+ ADD_FAILURE() << "Failed to create invocation";
+ return;
+ }
+ assert(!Clang->getInvocation().getFrontendOpts().DisableFree);
+
+ // Set up a VFS with only the virtual file visible.
+ auto VFS = llvm::makeIntrusiveRefCnt<llvm::vfs::InMemoryFileSystem>();
+ VFS->addFile(Filename, /*ModificationTime=*/0,
+ llvm::MemoryBuffer::getMemBufferCopy(In.Code, Filename));
+ for (const auto &Extra : In.ExtraFiles)
+ VFS->addFile(
+ Extra.getKey(), /*ModificationTime=*/0,
+ llvm::MemoryBuffer::getMemBufferCopy(Extra.getValue(), Extra.getKey()));
+ Clang->createFileManager(VFS);
+
+ // Running the FrontendAction creates the other components: SourceManager,
+ // Preprocessor, ASTContext, Sema. Preprocessor needs TargetInfo to be set.
+ EXPECT_TRUE(Clang->createTarget());
+ Action = std::make_unique<SyntaxOnlyAction>();
+ const FrontendInputFile &Main = Clang->getFrontendOpts().Inputs.front();
+ if (!Action->BeginSourceFile(*Clang, Main)) {
+ ADD_FAILURE() << "Failed to BeginSourceFile()";
+ Action.reset(); // Don't call EndSourceFile if BeginSourceFile failed.
+ return;
+ }
+ if (auto Err = Action->Execute())
+ ADD_FAILURE() << "Failed to Execute(): " << llvm::toString(std::move(Err));
+
+ // Action->EndSourceFile() would destroy the ASTContext, we want to keep it.
+ // But notify the preprocessor we're done now.
+ Clang->getPreprocessor().EndSourceFile();
+ // We're done gathering diagnostics, detach the consumer so we can destroy it.
+ Clang->getDiagnosticClient().EndSourceFile();
+ Clang->getDiagnostics().setClient(new DiagnosticConsumer(),
+ /*ShouldOwnClient=*/true);
+}
+
+void TestAST::clear() {
+ if (Action) {
+ // We notified the preprocessor of EOF already, so detach it first.
+ // Sema needs the PP alive until after EndSourceFile() though.
+ auto PP = Clang->getPreprocessorPtr(); // Keep PP alive for now.
+ Clang->setPreprocessor(nullptr); // Detach so we don't send EOF twice.
+ Action->EndSourceFile(); // Destroy ASTContext and Sema.
+ // Now Sema is gone, PP can safely be destroyed.
+ }
+ Action.reset();
+ Clang.reset();
+ Diagnostics.clear();
+}
+
+TestAST &TestAST::operator=(TestAST &&M) {
+ clear();
+ Action = std::move(M.Action);
+ Clang = std::move(M.Clang);
+ Diagnostics = std::move(M.Diagnostics);
+ return *this;
+}
+
+TestAST::TestAST(TestAST &&M) { *this = std::move(M); }
+
+TestAST::~TestAST() { clear(); }
+
+} // end namespace clang
diff --git a/contrib/llvm-project/clang/lib/Tooling/Core/Replacement.cpp b/contrib/llvm-project/clang/lib/Tooling/Core/Replacement.cpp
index 30e1923bf1cb..aca2afceea44 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Core/Replacement.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Core/Replacement.cpp
@@ -179,9 +179,9 @@ static std::string getReplacementErrString(replacement_error Err) {
std::string ReplacementError::message() const {
std::string Message = getReplacementErrString(Err);
- if (NewReplacement.hasValue())
+ if (NewReplacement)
Message += "\nNew replacement: " + NewReplacement->toString();
- if (ExistingReplacement.hasValue())
+ if (ExistingReplacement)
Message += "\nExisting replacement: " + ExistingReplacement->toString();
return Message;
}
diff --git a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp
index 80a70252721d..026bdfe03f28 100644
--- a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningFilesystem.cpp
@@ -7,7 +7,6 @@
//===----------------------------------------------------------------------===//
#include "clang/Tooling/DependencyScanning/DependencyScanningFilesystem.h"
-#include "clang/Lex/DependencyDirectivesSourceMinimizer.h"
#include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/SmallVectorMemoryBuffer.h"
#include "llvm/Support/Threading.h"
@@ -41,69 +40,44 @@ DependencyScanningWorkerFilesystem::readFile(StringRef Filename) {
return TentativeEntry(Stat, std::move(Buffer));
}
-EntryRef DependencyScanningWorkerFilesystem::minimizeIfNecessary(
+EntryRef DependencyScanningWorkerFilesystem::scanForDirectivesIfNecessary(
const CachedFileSystemEntry &Entry, StringRef Filename, bool Disable) {
if (Entry.isError() || Entry.isDirectory() || Disable ||
- !shouldMinimize(Filename, Entry.getUniqueID()))
- return EntryRef(/*Minimized=*/false, Filename, Entry);
+ !shouldScanForDirectives(Filename))
+ return EntryRef(Filename, Entry);
- CachedFileContents *Contents = Entry.getContents();
+ CachedFileContents *Contents = Entry.getCachedContents();
assert(Contents && "contents not initialized");
// Double-checked locking.
- if (Contents->MinimizedAccess.load())
- return EntryRef(/*Minimized=*/true, Filename, Entry);
+ if (Contents->DepDirectives.load())
+ return EntryRef(Filename, Entry);
std::lock_guard<std::mutex> GuardLock(Contents->ValueLock);
// Double-checked locking.
- if (Contents->MinimizedAccess.load())
- return EntryRef(/*Minimized=*/true, Filename, Entry);
-
- llvm::SmallString<1024> MinimizedFileContents;
- // Minimize the file down to directives that might affect the dependencies.
- SmallVector<minimize_source_to_dependency_directives::Token, 64> Tokens;
- if (minimizeSourceToDependencyDirectives(Contents->Original->getBuffer(),
- MinimizedFileContents, Tokens)) {
+ if (Contents->DepDirectives.load())
+ return EntryRef(Filename, Entry);
+
+ SmallVector<dependency_directives_scan::Directive, 64> Directives;
+ // Scan the file for preprocessor directives that might affect the
+ // dependencies.
+ if (scanSourceForDependencyDirectives(Contents->Original->getBuffer(),
+ Contents->DepDirectiveTokens,
+ Directives)) {
+ Contents->DepDirectiveTokens.clear();
// FIXME: Propagate the diagnostic if desired by the client.
- // Use the original file if the minimization failed.
- Contents->MinimizedStorage =
- llvm::MemoryBuffer::getMemBuffer(*Contents->Original);
- Contents->MinimizedAccess.store(Contents->MinimizedStorage.get());
- return EntryRef(/*Minimized=*/true, Filename, Entry);
+ Contents->DepDirectives.store(new Optional<DependencyDirectivesTy>());
+ return EntryRef(Filename, Entry);
}
- // The contents produced by the minimizer must be null terminated.
- assert(MinimizedFileContents.data()[MinimizedFileContents.size()] == '\0' &&
- "not null terminated contents");
-
- // Compute the skipped PP ranges that speedup skipping over inactive
- // preprocessor blocks.
- llvm::SmallVector<minimize_source_to_dependency_directives::SkippedRange, 32>
- SkippedRanges;
- minimize_source_to_dependency_directives::computeSkippedRanges(Tokens,
- SkippedRanges);
- PreprocessorSkippedRangeMapping Mapping;
- for (const auto &Range : SkippedRanges) {
- if (Range.Length < 16) {
- // Ignore small ranges as non-profitable.
- // FIXME: This is a heuristic, its worth investigating the tradeoffs
- // when it should be applied.
- continue;
- }
- Mapping[Range.Offset] = Range.Length;
- }
- Contents->PPSkippedRangeMapping = std::move(Mapping);
-
- Contents->MinimizedStorage = std::make_unique<llvm::SmallVectorMemoryBuffer>(
- std::move(MinimizedFileContents));
- // This function performed double-checked locking using `MinimizedAccess`.
- // Assigning it must be the last thing this function does. If we were to
- // assign it before `PPSkippedRangeMapping`, other threads may skip the
- // critical section (`MinimizedAccess != nullptr`) and access the mappings
- // that are about to be initialized, leading to a data race.
- Contents->MinimizedAccess.store(Contents->MinimizedStorage.get());
- return EntryRef(/*Minimized=*/true, Filename, Entry);
+ // This function performed double-checked locking using `DepDirectives`.
+ // Assigning it must be the last thing this function does, otherwise other
+ // threads may skip the
+ // critical section (`DepDirectives != nullptr`), leading to a data race.
+ Contents->DepDirectives.store(
+ new Optional<DependencyDirectivesTy>(std::move(Directives)));
+ return EntryRef(Filename, Entry);
}
DependencyScanningFilesystemSharedCache::
@@ -189,7 +163,7 @@ DependencyScanningFilesystemSharedCache::CacheShard::
///
/// This is kinda hacky, it would be better if we knew what kind of file Clang
/// was expecting instead.
-static bool shouldMinimizeBasedOnExtension(StringRef Filename) {
+static bool shouldScanForDirectivesBasedOnExtension(StringRef Filename) {
StringRef Ext = llvm::sys::path::extension(Filename);
if (Ext.empty())
return true; // C++ standard library
@@ -207,22 +181,12 @@ static bool shouldCacheStatFailures(StringRef Filename) {
if (Ext.empty())
return false; // This may be the module cache directory.
// Only cache stat failures on source files.
- return shouldMinimizeBasedOnExtension(Filename);
+ return shouldScanForDirectivesBasedOnExtension(Filename);
}
-void DependencyScanningWorkerFilesystem::disableMinimization(
+bool DependencyScanningWorkerFilesystem::shouldScanForDirectives(
StringRef Filename) {
- // Since we're not done setting up `NotToBeMinimized` yet, we need to disable
- // minimization explicitly.
- if (llvm::ErrorOr<EntryRef> Result =
- getOrCreateFileSystemEntry(Filename, /*DisableMinimization=*/true))
- NotToBeMinimized.insert(Result->getStatus().getUniqueID());
-}
-
-bool DependencyScanningWorkerFilesystem::shouldMinimize(
- StringRef Filename, llvm::sys::fs::UniqueID UID) {
- return shouldMinimizeBasedOnExtension(Filename) &&
- !NotToBeMinimized.contains(UID);
+ return shouldScanForDirectivesBasedOnExtension(Filename);
}
const CachedFileSystemEntry &
@@ -275,14 +239,16 @@ DependencyScanningWorkerFilesystem::computeAndStoreResult(StringRef Filename) {
llvm::ErrorOr<EntryRef>
DependencyScanningWorkerFilesystem::getOrCreateFileSystemEntry(
- StringRef Filename, bool DisableMinimization) {
+ StringRef Filename, bool DisableDirectivesScanning) {
if (const auto *Entry = findEntryByFilenameWithWriteThrough(Filename))
- return minimizeIfNecessary(*Entry, Filename, DisableMinimization)
+ return scanForDirectivesIfNecessary(*Entry, Filename,
+ DisableDirectivesScanning)
.unwrapError();
auto MaybeEntry = computeAndStoreResult(Filename);
if (!MaybeEntry)
return MaybeEntry.getError();
- return minimizeIfNecessary(*MaybeEntry, Filename, DisableMinimization)
+ return scanForDirectivesIfNecessary(*MaybeEntry, Filename,
+ DisableDirectivesScanning)
.unwrapError();
}
@@ -301,15 +267,13 @@ namespace {
/// The VFS that is used by clang consumes the \c CachedFileSystemEntry using
/// this subclass.
-class MinimizedVFSFile final : public llvm::vfs::File {
+class DepScanFile final : public llvm::vfs::File {
public:
- MinimizedVFSFile(std::unique_ptr<llvm::MemoryBuffer> Buffer,
- llvm::vfs::Status Stat)
+ DepScanFile(std::unique_ptr<llvm::MemoryBuffer> Buffer,
+ llvm::vfs::Status Stat)
: Buffer(std::move(Buffer)), Stat(std::move(Stat)) {}
- static llvm::ErrorOr<std::unique_ptr<llvm::vfs::File>>
- create(EntryRef Entry,
- ExcludedPreprocessorDirectiveSkipMapping *PPSkipMappings);
+ static llvm::ErrorOr<std::unique_ptr<llvm::vfs::File>> create(EntryRef Entry);
llvm::ErrorOr<llvm::vfs::Status> status() override { return Stat; }
@@ -328,23 +292,19 @@ private:
} // end anonymous namespace
-llvm::ErrorOr<std::unique_ptr<llvm::vfs::File>> MinimizedVFSFile::create(
- EntryRef Entry, ExcludedPreprocessorDirectiveSkipMapping *PPSkipMappings) {
+llvm::ErrorOr<std::unique_ptr<llvm::vfs::File>>
+DepScanFile::create(EntryRef Entry) {
assert(!Entry.isError() && "error");
if (Entry.isDirectory())
return std::make_error_code(std::errc::is_a_directory);
- auto Result = std::make_unique<MinimizedVFSFile>(
+ auto Result = std::make_unique<DepScanFile>(
llvm::MemoryBuffer::getMemBuffer(Entry.getContents(),
Entry.getStatus().getName(),
/*RequiresNullTerminator=*/false),
Entry.getStatus());
- const auto *EntrySkipMappings = Entry.getPPSkippedRangeMapping();
- if (EntrySkipMappings && !EntrySkipMappings->empty() && PPSkipMappings)
- (*PPSkipMappings)[Result->Buffer->getBufferStart()] = EntrySkipMappings;
-
return llvm::ErrorOr<std::unique_ptr<llvm::vfs::File>>(
std::unique_ptr<llvm::vfs::File>(std::move(Result)));
}
@@ -357,5 +317,5 @@ DependencyScanningWorkerFilesystem::openFileForRead(const Twine &Path) {
llvm::ErrorOr<EntryRef> Result = getOrCreateFileSystemEntry(Filename);
if (!Result)
return Result.getError();
- return MinimizedVFSFile::create(Result.get(), PPSkipMappings);
+ return DepScanFile::create(Result.get());
}
diff --git a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningService.cpp b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningService.cpp
index 4b6c87aba62f..cda3e2dd8550 100644
--- a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningService.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningService.cpp
@@ -15,9 +15,9 @@ using namespace dependencies;
DependencyScanningService::DependencyScanningService(
ScanningMode Mode, ScanningOutputFormat Format, bool ReuseFileManager,
- bool SkipExcludedPPRanges, bool OptimizeArgs)
+ bool OptimizeArgs)
: Mode(Mode), Format(Format), ReuseFileManager(ReuseFileManager),
- SkipExcludedPPRanges(SkipExcludedPPRanges), OptimizeArgs(OptimizeArgs) {
+ OptimizeArgs(OptimizeArgs) {
// Initialize targets for object file support.
llvm::InitializeAllTargets();
llvm::InitializeAllTargetMCs();
diff --git a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningTool.cpp b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningTool.cpp
index 739712baadd0..26c2b2b2f394 100644
--- a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningTool.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningTool.cpp
@@ -9,35 +9,40 @@
#include "clang/Tooling/DependencyScanning/DependencyScanningTool.h"
#include "clang/Frontend/Utils.h"
-namespace clang{
-namespace tooling{
-namespace dependencies{
-
-std::vector<std::string> FullDependencies::getAdditionalArgs(
- std::function<StringRef(ModuleID)> LookupPCMPath,
- std::function<const ModuleDeps &(ModuleID)> LookupModuleDeps) const {
- std::vector<std::string> Ret = getAdditionalArgsWithoutModulePaths();
-
- std::vector<std::string> PCMPaths;
- std::vector<std::string> ModMapPaths;
- dependencies::detail::collectPCMAndModuleMapPaths(
- ClangModuleDeps, LookupPCMPath, LookupModuleDeps, PCMPaths, ModMapPaths);
- for (const std::string &PCMPath : PCMPaths)
- Ret.push_back("-fmodule-file=" + PCMPath);
+namespace clang {
+namespace tooling {
+namespace dependencies {
+
+std::vector<std::string> FullDependencies::getCommandLine(
+ std::function<StringRef(ModuleID)> LookupPCMPath) const {
+ std::vector<std::string> Ret = getCommandLineWithoutModulePaths();
+
+ for (ModuleID MID : ClangModuleDeps)
+ Ret.push_back(("-fmodule-file=" + LookupPCMPath(MID)).str());
return Ret;
}
std::vector<std::string>
-FullDependencies::getAdditionalArgsWithoutModulePaths() const {
- std::vector<std::string> Args{
- "-fno-implicit-modules",
- "-fno-implicit-module-maps",
- };
+FullDependencies::getCommandLineWithoutModulePaths() const {
+ std::vector<std::string> Args = OriginalCommandLine;
+ Args.push_back("-fno-implicit-modules");
+ Args.push_back("-fno-implicit-module-maps");
for (const PrebuiltModuleDep &PMD : PrebuiltModuleDeps)
Args.push_back("-fmodule-file=" + PMD.PCMFile);
+ // These arguments are unused in explicit compiles.
+ llvm::erase_if(Args, [](StringRef Arg) {
+ if (Arg.consume_front("-fmodules-")) {
+ return Arg.startswith("cache-path=") ||
+ Arg.startswith("prune-interval=") ||
+ Arg.startswith("prune-after=") ||
+ Arg == "validate-once-per-build-session";
+ }
+ return Arg.startswith("-fbuild-session-file=");
+ });
+
return Args;
}
@@ -138,9 +143,13 @@ DependencyScanningTool::getFullDependencies(
ContextHash = std::move(Hash);
}
- FullDependenciesResult getFullDependencies() const {
+ FullDependenciesResult getFullDependencies(
+ const std::vector<std::string> &OriginalCommandLine) const {
FullDependencies FD;
+ FD.OriginalCommandLine =
+ ArrayRef<std::string>(OriginalCommandLine).slice(1);
+
FD.ID.ContextHash = std::move(ContextHash);
FD.FileDeps.assign(Dependencies.begin(), Dependencies.end());
@@ -170,7 +179,7 @@ DependencyScanningTool::getFullDependencies(
private:
std::vector<std::string> Dependencies;
std::vector<PrebuiltModuleDep> PrebuiltModuleDeps;
- std::map<std::string, ModuleDeps> ClangModuleDeps;
+ llvm::MapVector<std::string, ModuleDeps, llvm::StringMap<unsigned>> ClangModuleDeps;
std::string ContextHash;
std::vector<std::string> OutputPaths;
const llvm::StringSet<> &AlreadySeen;
@@ -181,7 +190,7 @@ DependencyScanningTool::getFullDependencies(
Worker.computeDependencies(CWD, CommandLine, Consumer, ModuleName);
if (Result)
return std::move(Result);
- return Consumer.getFullDependencies();
+ return Consumer.getFullDependencies(CommandLine);
}
} // end namespace dependencies
diff --git a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp
index 70bb6c5caf87..e7d1375c83f0 100644
--- a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/DependencyScanningWorker.cpp
@@ -46,21 +46,25 @@ private:
DependencyConsumer &C;
};
+using PrebuiltModuleFilesT = decltype(HeaderSearchOptions::PrebuiltModuleFiles);
+
/// A listener that collects the imported modules and optionally the input
/// files.
class PrebuiltModuleListener : public ASTReaderListener {
public:
- PrebuiltModuleListener(llvm::StringMap<std::string> &PrebuiltModuleFiles,
- llvm::StringSet<> &InputFiles, bool VisitInputFiles)
+ PrebuiltModuleListener(PrebuiltModuleFilesT &PrebuiltModuleFiles,
+ llvm::StringSet<> &InputFiles, bool VisitInputFiles,
+ llvm::SmallVector<std::string> &NewModuleFiles)
: PrebuiltModuleFiles(PrebuiltModuleFiles), InputFiles(InputFiles),
- VisitInputFiles(VisitInputFiles) {}
+ VisitInputFiles(VisitInputFiles), NewModuleFiles(NewModuleFiles) {}
bool needsImportVisitation() const override { return true; }
bool needsInputFileVisitation() override { return VisitInputFiles; }
bool needsSystemInputFileVisitation() override { return VisitInputFiles; }
void visitImport(StringRef ModuleName, StringRef Filename) override {
- PrebuiltModuleFiles.insert({ModuleName, Filename.str()});
+ if (PrebuiltModuleFiles.insert({ModuleName.str(), Filename.str()}).second)
+ NewModuleFiles.push_back(Filename.str());
}
bool visitInputFile(StringRef Filename, bool isSystem, bool isOverridden,
@@ -70,13 +74,12 @@ public:
}
private:
- llvm::StringMap<std::string> &PrebuiltModuleFiles;
+ PrebuiltModuleFilesT &PrebuiltModuleFiles;
llvm::StringSet<> &InputFiles;
bool VisitInputFiles;
+ llvm::SmallVector<std::string> &NewModuleFiles;
};
-using PrebuiltModuleFilesT = decltype(HeaderSearchOptions::PrebuiltModuleFiles);
-
/// Visit the given prebuilt module and collect all of the modules it
/// transitively imports and contributing input files.
static void visitPrebuiltModule(StringRef PrebuiltModuleFilename,
@@ -84,33 +87,17 @@ static void visitPrebuiltModule(StringRef PrebuiltModuleFilename,
PrebuiltModuleFilesT &ModuleFiles,
llvm::StringSet<> &InputFiles,
bool VisitInputFiles) {
- // Maps the names of modules that weren't yet visited to their PCM path.
- llvm::StringMap<std::string> ModuleFilesWorklist;
- // Contains PCM paths of all visited modules.
- llvm::StringSet<> VisitedModuleFiles;
-
- PrebuiltModuleListener Listener(ModuleFilesWorklist, InputFiles,
- VisitInputFiles);
+ // List of module files to be processed.
+ llvm::SmallVector<std::string> Worklist{PrebuiltModuleFilename.str()};
+ PrebuiltModuleListener Listener(ModuleFiles, InputFiles, VisitInputFiles,
+ Worklist);
- auto GatherModuleFileInfo = [&](StringRef ASTFile) {
+ while (!Worklist.empty())
ASTReader::readASTFileControlBlock(
- ASTFile, CI.getFileManager(), CI.getPCHContainerReader(),
+ Worklist.pop_back_val(), CI.getFileManager(),
+ CI.getPCHContainerReader(),
/*FindModuleFileExtensions=*/false, Listener,
/*ValidateDiagnosticOptions=*/false);
- };
-
- GatherModuleFileInfo(PrebuiltModuleFilename);
- while (!ModuleFilesWorklist.empty()) {
- auto WorklistItemIt = ModuleFilesWorklist.begin();
-
- if (!VisitedModuleFiles.contains(WorklistItemIt->getValue())) {
- VisitedModuleFiles.insert(WorklistItemIt->getValue());
- GatherModuleFileInfo(WorklistItemIt->getValue());
- ModuleFiles[WorklistItemIt->getKey().str()] = WorklistItemIt->getValue();
- }
-
- ModuleFilesWorklist.erase(WorklistItemIt);
- }
}
/// Transform arbitrary file name into an object-like file name.
@@ -150,12 +137,11 @@ public:
DependencyScanningAction(
StringRef WorkingDirectory, DependencyConsumer &Consumer,
llvm::IntrusiveRefCntPtr<DependencyScanningWorkerFilesystem> DepFS,
- ExcludedPreprocessorDirectiveSkipMapping *PPSkipMappings,
- ScanningOutputFormat Format, bool OptimizeArgs,
+ ScanningOutputFormat Format, bool OptimizeArgs, bool DisableFree,
llvm::Optional<StringRef> ModuleName = None)
: WorkingDirectory(WorkingDirectory), Consumer(Consumer),
- DepFS(std::move(DepFS)), PPSkipMappings(PPSkipMappings), Format(Format),
- OptimizeArgs(OptimizeArgs), ModuleName(ModuleName) {}
+ DepFS(std::move(DepFS)), Format(Format), OptimizeArgs(OptimizeArgs),
+ DisableFree(DisableFree), ModuleName(ModuleName) {}
bool runInvocation(std::shared_ptr<CompilerInvocation> Invocation,
FileManager *FileMgr,
@@ -163,6 +149,8 @@ public:
DiagnosticConsumer *DiagConsumer) override {
// Make a deep copy of the original Clang invocation.
CompilerInvocation OriginalInvocation(*Invocation);
+ // Restore the value of DisableFree, which may be modified by Tooling.
+ OriginalInvocation.getFrontendOpts().DisableFree = DisableFree;
// Create a compiler instance to handle the actual work.
CompilerInstance ScanInstance(std::move(PCHContainerOps));
@@ -177,6 +165,9 @@ public:
ScanInstance.getPreprocessorOpts().AllowPCHWithDifferentModulesCachePath =
true;
+ ScanInstance.getFrontendOpts().GenerateGlobalModuleIndex = false;
+ ScanInstance.getFrontendOpts().UseGlobalModuleIndex = false;
+
FileMgr->getFileSystemOpts().WorkingDir = std::string(WorkingDirectory);
ScanInstance.setFileManager(FileMgr);
ScanInstance.createSourceManager(*FileMgr);
@@ -193,30 +184,21 @@ public:
// Use the dependency scanning optimized file system if requested to do so.
if (DepFS) {
- DepFS->enableMinimizationOfAllFiles();
- // Don't minimize any files that contributed to prebuilt modules. The
- // implicit build validates the modules by comparing the reported sizes of
- // their inputs to the current state of the filesystem. Minimization would
- // throw this mechanism off.
- for (const auto &File : PrebuiltModulesInputFiles)
- DepFS->disableMinimization(File.getKey());
- // Don't minimize any files that were explicitly passed in the build
- // settings and that might be opened.
- for (const auto &E : ScanInstance.getHeaderSearchOpts().UserEntries)
- DepFS->disableMinimization(E.Path);
- for (const auto &F : ScanInstance.getHeaderSearchOpts().VFSOverlayFiles)
- DepFS->disableMinimization(F);
-
// Support for virtual file system overlays on top of the caching
// filesystem.
FileMgr->setVirtualFileSystem(createVFSFromCompilerInvocation(
ScanInstance.getInvocation(), ScanInstance.getDiagnostics(), DepFS));
- // Pass the skip mappings which should speed up excluded conditional block
- // skipping in the preprocessor.
- if (PPSkipMappings)
- ScanInstance.getPreprocessorOpts()
- .ExcludedConditionalDirectiveSkipMappings = PPSkipMappings;
+ llvm::IntrusiveRefCntPtr<DependencyScanningWorkerFilesystem> LocalDepFS =
+ DepFS;
+ ScanInstance.getPreprocessorOpts().DependencyDirectivesForFile =
+ [LocalDepFS = std::move(LocalDepFS)](FileEntryRef File)
+ -> Optional<ArrayRef<dependency_directives_scan::Directive>> {
+ if (llvm::ErrorOr<EntryRef> Entry =
+ LocalDepFS->getOrCreateFileSystemEntry(File.getName()))
+ return Entry->getDirectiveTokens();
+ return None;
+ };
}
// Create the dependency collector that will collect the produced
@@ -258,7 +240,7 @@ public:
std::unique_ptr<FrontendAction> Action;
- if (ModuleName.hasValue())
+ if (ModuleName)
Action = std::make_unique<GetDependenciesByModuleNameAction>(*ModuleName);
else
Action = std::make_unique<ReadPCHAndPreprocessAction>();
@@ -273,9 +255,9 @@ private:
StringRef WorkingDirectory;
DependencyConsumer &Consumer;
llvm::IntrusiveRefCntPtr<DependencyScanningWorkerFilesystem> DepFS;
- ExcludedPreprocessorDirectiveSkipMapping *PPSkipMappings;
ScanningOutputFormat Format;
bool OptimizeArgs;
+ bool DisableFree;
llvm::Optional<StringRef> ModuleName;
};
@@ -298,12 +280,9 @@ DependencyScanningWorker::DependencyScanningWorker(
OverlayFS->pushOverlay(InMemoryFS);
RealFS = OverlayFS;
- if (Service.canSkipExcludedPPRanges())
- PPSkipMappings =
- std::make_unique<ExcludedPreprocessorDirectiveSkipMapping>();
- if (Service.getMode() == ScanningMode::MinimizedSourcePreprocessing)
- DepFS = new DependencyScanningWorkerFilesystem(
- Service.getSharedCache(), RealFS, PPSkipMappings.get());
+ if (Service.getMode() == ScanningMode::DependencyDirectivesScan)
+ DepFS = new DependencyScanningWorkerFilesystem(Service.getSharedCache(),
+ RealFS);
if (Service.canReuseFileManager())
Files = new FileManager(FileSystemOptions(), RealFS);
}
@@ -338,7 +317,7 @@ llvm::Error DependencyScanningWorker::computeDependencies(
Files ? Files : new FileManager(FileSystemOptions(), RealFS);
Optional<std::vector<std::string>> ModifiedCommandLine;
- if (ModuleName.hasValue()) {
+ if (ModuleName) {
ModifiedCommandLine = CommandLine;
InMemoryFS->addFile(*ModuleName, 0, llvm::MemoryBuffer::getMemBuffer(""));
ModifiedCommandLine->emplace_back(*ModuleName);
@@ -353,10 +332,13 @@ llvm::Error DependencyScanningWorker::computeDependencies(
return runWithDiags(CreateAndPopulateDiagOpts(FinalCCommandLine).release(),
[&](DiagnosticConsumer &DC, DiagnosticOptions &DiagOpts) {
+ // DisableFree is modified by Tooling for running
+ // in-process; preserve the original value, which is
+ // always true for a driver invocation.
+ bool DisableFree = true;
DependencyScanningAction Action(
- WorkingDirectory, Consumer, DepFS,
- PPSkipMappings.get(), Format, OptimizeArgs,
- ModuleName);
+ WorkingDirectory, Consumer, DepFS, Format,
+ OptimizeArgs, DisableFree, ModuleName);
// Create an invocation that uses the underlying file
// system to ensure that any file system requests that
// are made by the driver do not go through the
diff --git a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp
index 086215e7a573..f7d96130b971 100644
--- a/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/DependencyScanning/ModuleDepCollector.cpp
@@ -23,9 +23,21 @@ static void optimizeHeaderSearchOpts(HeaderSearchOptions &Opts,
// Only preserve search paths that were used during the dependency scan.
std::vector<HeaderSearchOptions::Entry> Entries = Opts.UserEntries;
Opts.UserEntries.clear();
- for (unsigned I = 0; I < Entries.size(); ++I)
- if (MF.SearchPathUsage[I])
- Opts.UserEntries.push_back(Entries[I]);
+
+ llvm::BitVector SearchPathUsage(Entries.size());
+ llvm::DenseSet<const serialization::ModuleFile *> Visited;
+ std::function<void(const serialization::ModuleFile *)> VisitMF =
+ [&](const serialization::ModuleFile *MF) {
+ SearchPathUsage |= MF->SearchPathUsage;
+ Visited.insert(MF);
+ for (const serialization::ModuleFile *Import : MF->Imports)
+ if (!Visited.contains(Import))
+ VisitMF(Import);
+ };
+ VisitMF(&MF);
+
+ for (auto Idx : SearchPathUsage.set_bits())
+ Opts.UserEntries.push_back(Entries[Idx]);
}
CompilerInvocation ModuleDepCollector::makeInvocationForModuleBuildWithoutPaths(
@@ -49,12 +61,24 @@ CompilerInvocation ModuleDepCollector::makeInvocationForModuleBuildWithoutPaths(
CI.getLangOpts()->ModuleName = Deps.ID.ModuleName;
CI.getFrontendOpts().IsSystemModule = Deps.IsSystem;
+ // Disable implicit modules and canonicalize options that are only used by
+ // implicit modules.
CI.getLangOpts()->ImplicitModules = false;
+ CI.getHeaderSearchOpts().ImplicitModuleMaps = false;
+ CI.getHeaderSearchOpts().ModuleCachePath.clear();
+ CI.getHeaderSearchOpts().ModulesValidateOncePerBuildSession = false;
+ CI.getHeaderSearchOpts().BuildSessionTimestamp = 0;
+ // The specific values we canonicalize to for pruning don't affect behaviour,
+ /// so use the default values so they will be dropped from the command-line.
+ CI.getHeaderSearchOpts().ModuleCachePruneInterval = 7 * 24 * 60 * 60;
+ CI.getHeaderSearchOpts().ModuleCachePruneAfter = 31 * 24 * 60 * 60;
// Report the prebuilt modules this module uses.
for (const auto &PrebuiltModule : Deps.PrebuiltModuleDeps)
CI.getFrontendOpts().ModuleFiles.push_back(PrebuiltModule.PCMFile);
+ CI.getFrontendOpts().ModuleMapFiles = Deps.ModuleMapFileDeps;
+
Optimize(CI);
// The original invocation probably didn't have strict context hash enabled.
@@ -84,8 +108,7 @@ serializeCompilerInvocation(const CompilerInvocation &CI) {
}
std::vector<std::string> ModuleDeps::getCanonicalCommandLine(
- std::function<StringRef(ModuleID)> LookupPCMPath,
- std::function<const ModuleDeps &(ModuleID)> LookupModuleDeps) const {
+ std::function<StringRef(ModuleID)> LookupPCMPath) const {
CompilerInvocation CI(BuildInvocation);
FrontendOptions &FrontendOpts = CI.getFrontendOpts();
@@ -94,9 +117,8 @@ std::vector<std::string> ModuleDeps::getCanonicalCommandLine(
FrontendOpts.Inputs.emplace_back(ClangModuleMapFile, ModuleMapInputKind);
FrontendOpts.OutputFile = std::string(LookupPCMPath(ID));
- dependencies::detail::collectPCMAndModuleMapPaths(
- ClangModuleDeps, LookupPCMPath, LookupModuleDeps,
- FrontendOpts.ModuleFiles, FrontendOpts.ModuleMapFiles);
+ for (ModuleID MID : ClangModuleDeps)
+ FrontendOpts.ModuleFiles.emplace_back(LookupPCMPath(MID));
return serializeCompilerInvocation(CI);
}
@@ -106,30 +128,6 @@ ModuleDeps::getCanonicalCommandLineWithoutModulePaths() const {
return serializeCompilerInvocation(BuildInvocation);
}
-void dependencies::detail::collectPCMAndModuleMapPaths(
- llvm::ArrayRef<ModuleID> Modules,
- std::function<StringRef(ModuleID)> LookupPCMPath,
- std::function<const ModuleDeps &(ModuleID)> LookupModuleDeps,
- std::vector<std::string> &PCMPaths, std::vector<std::string> &ModMapPaths) {
- llvm::StringSet<> AlreadyAdded;
-
- std::function<void(llvm::ArrayRef<ModuleID>)> AddArgs =
- [&](llvm::ArrayRef<ModuleID> Modules) {
- for (const ModuleID &MID : Modules) {
- if (!AlreadyAdded.insert(MID.ModuleName + MID.ContextHash).second)
- continue;
- const ModuleDeps &M = LookupModuleDeps(MID);
- // Depth first traversal.
- AddArgs(M.ClangModuleDeps);
- PCMPaths.push_back(LookupPCMPath(MID).str());
- if (!M.ClangModuleMapFile.empty())
- ModMapPaths.push_back(M.ClangModuleMapFile);
- }
- };
-
- AddArgs(Modules);
-}
-
void ModuleDepCollectorPP::FileChanged(SourceLocation Loc,
FileChangeReason Reason,
SrcMgr::CharacteristicKind FileType,
@@ -157,7 +155,7 @@ void ModuleDepCollectorPP::FileChanged(SourceLocation Loc,
void ModuleDepCollectorPP::InclusionDirective(
SourceLocation HashLoc, const Token &IncludeTok, StringRef FileName,
- bool IsAngled, CharSourceRange FilenameRange, const FileEntry *File,
+ bool IsAngled, CharSourceRange FilenameRange, Optional<FileEntryRef> File,
StringRef SearchPath, StringRef RelativePath, const Module *Imported,
SrcMgr::CharacteristicKind FileType) {
if (!File && !Imported) {
@@ -209,7 +207,7 @@ void ModuleDepCollectorPP::EndOfMainFile() {
MDC.Consumer.handleDependencyOutputOpts(*MDC.Opts);
for (auto &&I : MDC.ModularDeps)
- MDC.Consumer.handleModuleDependency(I.second);
+ MDC.Consumer.handleModuleDependency(*I.second);
for (auto &&I : MDC.FileDeps)
MDC.Consumer.handleFileDependency(I);
@@ -222,11 +220,12 @@ ModuleID ModuleDepCollectorPP::handleTopLevelModule(const Module *M) {
assert(M == M->getTopLevelModule() && "Expected top level module!");
// If this module has been handled already, just return its ID.
- auto ModI = MDC.ModularDeps.insert({M, ModuleDeps{}});
+ auto ModI = MDC.ModularDeps.insert({M, nullptr});
if (!ModI.second)
- return ModI.first->second.ID;
+ return ModI.first->second->ID;
- ModuleDeps &MD = ModI.first->second;
+ ModI.first->second = std::make_unique<ModuleDeps>();
+ ModuleDeps &MD = *ModI.first->second;
MD.ID.ModuleName = M->getFullModuleName();
MD.ImportedByMainFile = DirectModularDeps.contains(M);
@@ -262,6 +261,42 @@ ModuleID ModuleDepCollectorPP::handleTopLevelModule(const Module *M) {
MD.FileDeps.insert(IF.getFile()->getName());
});
+ // We usually don't need to list the module map files of our dependencies when
+ // building a module explicitly: their semantics will be deserialized from PCM
+ // files.
+ //
+ // However, some module maps loaded implicitly during the dependency scan can
+ // describe anti-dependencies. That happens when this module, let's call it
+ // M1, is marked as '[no_undeclared_includes]' and tries to access a header
+ // "M2/M2.h" from another module, M2, but doesn't have a 'use M2;'
+ // declaration. The explicit build needs the module map for M2 so that it
+ // knows that textually including "M2/M2.h" is not allowed.
+ // E.g., '__has_include("M2/M2.h")' should return false, but without M2's
+ // module map the explicit build would return true.
+ //
+ // An alternative approach would be to tell the explicit build what its
+ // textual dependencies are, instead of having it re-discover its
+ // anti-dependencies. For example, we could create and use an `-ivfs-overlay`
+ // with `fall-through: false` that explicitly listed the dependencies.
+ // However, that's more complicated to implement and harder to reason about.
+ if (M->NoUndeclaredIncludes) {
+ // We don't have a good way to determine which module map described the
+ // anti-dependency (let alone what's the corresponding top-level module
+ // map). We simply specify all the module maps in the order they were loaded
+ // during the implicit build during scan.
+ // TODO: Resolve this by serializing and only using Module::UndeclaredUses.
+ MDC.ScanInstance.getASTReader()->visitTopLevelModuleMaps(
+ *MF, [&](const FileEntry *FE) {
+ if (FE->getName().endswith("__inferred_module.map"))
+ return;
+ // The top-level modulemap of this module will be the input file. We
+ // don't need to specify it as a module map.
+ if (FE == ModuleMap)
+ return;
+ MD.ModuleMapFileDeps.push_back(FE->getName().str());
+ });
+ }
+
// Add direct prebuilt module dependencies now, so that we can use them when
// creating a CompilerInvocation and computing context hash for this
// ModuleDeps instance.
@@ -282,13 +317,28 @@ ModuleID ModuleDepCollectorPP::handleTopLevelModule(const Module *M) {
return MD.ID;
}
+static void forEachSubmoduleSorted(const Module *M,
+ llvm::function_ref<void(const Module *)> F) {
+ // Submodule order depends on order of header includes for inferred submodules
+ // we don't care about the exact order, so sort so that it's consistent across
+ // TUs to improve sharing.
+ SmallVector<const Module *> Submodules(M->submodule_begin(),
+ M->submodule_end());
+ llvm::stable_sort(Submodules, [](const Module *A, const Module *B) {
+ return A->Name < B->Name;
+ });
+ for (const Module *SubM : Submodules)
+ F(SubM);
+}
+
void ModuleDepCollectorPP::addAllSubmodulePrebuiltDeps(
const Module *M, ModuleDeps &MD,
llvm::DenseSet<const Module *> &SeenSubmodules) {
addModulePrebuiltDeps(M, MD, SeenSubmodules);
- for (const Module *SubM : M->submodules())
+ forEachSubmoduleSorted(M, [&](const Module *SubM) {
addAllSubmodulePrebuiltDeps(SubM, MD, SeenSubmodules);
+ });
}
void ModuleDepCollectorPP::addModulePrebuiltDeps(
@@ -306,8 +356,9 @@ void ModuleDepCollectorPP::addAllSubmoduleDeps(
llvm::DenseSet<const Module *> &AddedModules) {
addModuleDep(M, MD, AddedModules);
- for (const Module *SubM : M->submodules())
+ forEachSubmoduleSorted(M, [&](const Module *SubM) {
addAllSubmoduleDeps(SubM, MD, AddedModules);
+ });
}
void ModuleDepCollectorPP::addModuleDep(
diff --git a/contrib/llvm-project/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp b/contrib/llvm-project/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp
index fbceb26c39c7..fc8773e60c58 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Inclusions/HeaderIncludes.cpp
@@ -43,8 +43,9 @@ unsigned getOffsetAfterTokenSequence(
GetOffsetAfterSequence) {
SourceManagerForFile VirtualSM(FileName, Code);
SourceManager &SM = VirtualSM.get();
+ LangOptions LangOpts = createLangOpts();
Lexer Lex(SM.getMainFileID(), SM.getBufferOrFake(SM.getMainFileID()), SM,
- createLangOpts());
+ LangOpts);
Token Tok;
// Get the first token.
Lex.LexFromRawLexer(Tok);
diff --git a/contrib/llvm-project/clang/lib/Tooling/Inclusions/StandardLibrary.cpp b/contrib/llvm-project/clang/lib/Tooling/Inclusions/StandardLibrary.cpp
new file mode 100644
index 000000000000..8fb0c8474e64
--- /dev/null
+++ b/contrib/llvm-project/clang/lib/Tooling/Inclusions/StandardLibrary.cpp
@@ -0,0 +1,165 @@
+//===--- StandardLibrary.cpp ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Tooling/Inclusions/StandardLibrary.h"
+#include "llvm/ADT/Optional.h"
+#include "llvm/ADT/StringRef.h"
+#include "llvm/Support/Casting.h"
+
+namespace clang {
+namespace tooling {
+namespace stdlib {
+
+static llvm::StringRef *HeaderNames;
+static std::pair<llvm::StringRef, llvm::StringRef> *SymbolNames;
+static unsigned *SymbolHeaderIDs;
+static llvm::DenseMap<llvm::StringRef, unsigned> *HeaderIDs;
+// Maps symbol name -> Symbol::ID, within a namespace.
+using NSSymbolMap = llvm::DenseMap<llvm::StringRef, unsigned>;
+static llvm::DenseMap<llvm::StringRef, NSSymbolMap *> *NamespaceSymbols;
+
+static int initialize() {
+ unsigned SymCount = 0;
+#define SYMBOL(Name, NS, Header) ++SymCount;
+#include "clang/Tooling/Inclusions/CSymbolMap.inc"
+#include "clang/Tooling/Inclusions/StdSymbolMap.inc"
+#undef SYMBOL
+ SymbolNames = new std::remove_reference_t<decltype(*SymbolNames)>[SymCount];
+ SymbolHeaderIDs =
+ new std::remove_reference_t<decltype(*SymbolHeaderIDs)>[SymCount];
+ NamespaceSymbols = new std::remove_reference_t<decltype(*NamespaceSymbols)>;
+ HeaderIDs = new std::remove_reference_t<decltype(*HeaderIDs)>;
+
+ auto AddNS = [&](llvm::StringRef NS) -> NSSymbolMap & {
+ auto R = NamespaceSymbols->try_emplace(NS, nullptr);
+ if (R.second)
+ R.first->second = new NSSymbolMap();
+ return *R.first->second;
+ };
+
+ auto AddHeader = [&](llvm::StringRef Header) -> unsigned {
+ return HeaderIDs->try_emplace(Header, HeaderIDs->size()).first->second;
+ };
+
+ auto Add = [&, SymIndex(0)](llvm::StringRef Name, llvm::StringRef NS,
+ llvm::StringRef HeaderName) mutable {
+ if (NS == "None")
+ NS = "";
+
+ SymbolNames[SymIndex] = {NS, Name};
+ SymbolHeaderIDs[SymIndex] = AddHeader(HeaderName);
+
+ NSSymbolMap &NSSymbols = AddNS(NS);
+ NSSymbols.try_emplace(Name, SymIndex);
+
+ ++SymIndex;
+ };
+#define SYMBOL(Name, NS, Header) Add(#Name, #NS, #Header);
+#include "clang/Tooling/Inclusions/CSymbolMap.inc"
+#include "clang/Tooling/Inclusions/StdSymbolMap.inc"
+#undef SYMBOL
+
+ HeaderNames = new llvm::StringRef[HeaderIDs->size()];
+ for (const auto &E : *HeaderIDs)
+ HeaderNames[E.second] = E.first;
+
+ return 0;
+}
+
+static void ensureInitialized() {
+ static int Dummy = initialize();
+ (void)Dummy;
+}
+
+llvm::Optional<Header> Header::named(llvm::StringRef Name) {
+ ensureInitialized();
+ auto It = HeaderIDs->find(Name);
+ if (It == HeaderIDs->end())
+ return llvm::None;
+ return Header(It->second);
+}
+llvm::StringRef Header::name() const { return HeaderNames[ID]; }
+llvm::StringRef Symbol::scope() const { return SymbolNames[ID].first; }
+llvm::StringRef Symbol::name() const { return SymbolNames[ID].second; }
+llvm::Optional<Symbol> Symbol::named(llvm::StringRef Scope,
+ llvm::StringRef Name) {
+ ensureInitialized();
+ if (NSSymbolMap *NSSymbols = NamespaceSymbols->lookup(Scope)) {
+ auto It = NSSymbols->find(Name);
+ if (It != NSSymbols->end())
+ return Symbol(It->second);
+ }
+ return llvm::None;
+}
+Header Symbol::header() const { return Header(SymbolHeaderIDs[ID]); }
+llvm::SmallVector<Header> Symbol::headers() const {
+ return {header()}; // FIXME: multiple in case of ambiguity
+}
+
+Recognizer::Recognizer() { ensureInitialized(); }
+
+NSSymbolMap *Recognizer::namespaceSymbols(const NamespaceDecl *D) {
+ auto It = NamespaceCache.find(D);
+ if (It != NamespaceCache.end())
+ return It->second;
+
+ NSSymbolMap *Result = [&]() -> NSSymbolMap * {
+ if (D && D->isAnonymousNamespace())
+ return nullptr;
+ // Print the namespace and its parents ommitting inline scopes.
+ std::string Scope;
+ for (const auto *ND = D; ND;
+ ND = llvm::dyn_cast_or_null<NamespaceDecl>(ND->getParent()))
+ if (!ND->isInlineNamespace() && !ND->isAnonymousNamespace())
+ Scope = ND->getName().str() + "::" + Scope;
+ return NamespaceSymbols->lookup(Scope);
+ }();
+ NamespaceCache.try_emplace(D, Result);
+ return Result;
+}
+
+llvm::Optional<Symbol> Recognizer::operator()(const Decl *D) {
+ // If D is std::vector::iterator, `vector` is the outer symbol to look up.
+ // We keep all the candidate DCs as some may turn out to be anon enums.
+ // Do this resolution lazily as we may turn out not to have a std namespace.
+ llvm::SmallVector<const DeclContext *> IntermediateDecl;
+ const DeclContext *DC = D->getDeclContext();
+ while (DC && !DC->isNamespace()) {
+ if (NamedDecl::classofKind(DC->getDeclKind()))
+ IntermediateDecl.push_back(DC);
+ DC = DC->getParent();
+ }
+ NSSymbolMap *Symbols = namespaceSymbols(cast_or_null<NamespaceDecl>(DC));
+ if (!Symbols)
+ return llvm::None;
+
+ llvm::StringRef Name = [&]() -> llvm::StringRef {
+ for (const auto *SymDC : llvm::reverse(IntermediateDecl)) {
+ DeclarationName N = cast<NamedDecl>(SymDC)->getDeclName();
+ if (const auto *II = N.getAsIdentifierInfo())
+ return II->getName();
+ if (!N.isEmpty())
+ return ""; // e.g. operator<: give up
+ }
+ if (const auto *ND = llvm::dyn_cast<NamedDecl>(D))
+ if (const auto *II = ND->getIdentifier())
+ return II->getName();
+ return "";
+ }();
+ if (Name.empty())
+ return llvm::None;
+
+ auto It = Symbols->find(Name);
+ if (It == Symbols->end())
+ return llvm::None;
+ return Symbol(It->second);
+}
+
+} // namespace stdlib
+} // namespace tooling
+} // namespace clang
diff --git a/contrib/llvm-project/clang/lib/Tooling/InterpolatingCompilationDatabase.cpp b/contrib/llvm-project/clang/lib/Tooling/InterpolatingCompilationDatabase.cpp
index 51e8439b6b79..0143b5f8df6b 100644
--- a/contrib/llvm-project/clang/lib/Tooling/InterpolatingCompilationDatabase.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/InterpolatingCompilationDatabase.cpp
@@ -328,7 +328,7 @@ public:
StringRef Path = Strings.save(StringRef(OriginalPaths[I]).lower());
Paths.emplace_back(Path, I);
- Types.push_back(foldType(guessType(Path)));
+ Types.push_back(foldType(guessType(OriginalPaths[I])));
Stems.emplace_back(sys::path::stem(Path), I);
auto Dir = ++sys::path::rbegin(Path), DirEnd = sys::path::rend(Path);
for (int J = 0; J < DirectorySegmentsIndexed && Dir != DirEnd; ++J, ++Dir)
diff --git a/contrib/llvm-project/clang/lib/Tooling/Refactoring/Rename/USRFindingAction.cpp b/contrib/llvm-project/clang/lib/Tooling/Refactoring/Rename/USRFindingAction.cpp
index a69b76a3c971..de11263efd5a 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Refactoring/Rename/USRFindingAction.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Refactoring/Rename/USRFindingAction.cpp
@@ -145,14 +145,12 @@ private:
void handleVarTemplateDecl(const VarTemplateDecl *VTD) {
USRSet.insert(getUSRForDecl(VTD));
USRSet.insert(getUSRForDecl(VTD->getTemplatedDecl()));
- llvm::for_each(VTD->specializations(), [&](const auto *Spec) {
+ for (const auto *Spec : VTD->specializations())
USRSet.insert(getUSRForDecl(Spec));
- });
SmallVector<VarTemplatePartialSpecializationDecl *, 4> PartialSpecs;
VTD->getPartialSpecializations(PartialSpecs);
- llvm::for_each(PartialSpecs, [&](const auto *Spec) {
+ for (const auto *Spec : PartialSpecs)
USRSet.insert(getUSRForDecl(Spec));
- });
}
void addUSRsOfCtorDtors(const CXXRecordDecl *RD) {
diff --git a/contrib/llvm-project/clang/lib/Tooling/Syntax/BuildTree.cpp b/contrib/llvm-project/clang/lib/Tooling/Syntax/BuildTree.cpp
index fcac2250dd96..484cf61664fe 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Syntax/BuildTree.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Syntax/BuildTree.cpp
@@ -572,7 +572,7 @@ private:
for (const auto &T : A.getTokenBuffer().expandedTokens().drop_back()) {
auto *L = new (A.getAllocator()) syntax::Leaf(&T);
L->Original = true;
- L->CanModify = A.getTokenBuffer().spelledForExpanded(T).hasValue();
+ L->CanModify = A.getTokenBuffer().spelledForExpanded(T).has_value();
Trees.insert(Trees.end(), {&T, L});
}
}
@@ -646,7 +646,7 @@ private:
// Mark that this node came from the AST and is backed by the source code.
Node->Original = true;
Node->CanModify =
- A.getTokenBuffer().spelledForExpanded(Tokens).hasValue();
+ A.getTokenBuffer().spelledForExpanded(Tokens).has_value();
Trees.erase(BeginChildren, EndChildren);
Trees.insert({FirstToken, Node});
diff --git a/contrib/llvm-project/clang/lib/Tooling/Transformer/RewriteRule.cpp b/contrib/llvm-project/clang/lib/Tooling/Transformer/RewriteRule.cpp
index 93bd7e91dba7..3e76489782f3 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Transformer/RewriteRule.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Transformer/RewriteRule.cpp
@@ -166,10 +166,26 @@ ASTEdit transformer::addInclude(RangeSelector Target, StringRef Header,
return E;
}
-RewriteRule transformer::makeRule(DynTypedMatcher M, EditGenerator Edits,
- TextGenerator Explanation) {
- return RewriteRule{{RewriteRule::Case{std::move(M), std::move(Edits),
- std::move(Explanation)}}};
+EditGenerator
+transformer::detail::makeEditGenerator(llvm::SmallVector<ASTEdit, 1> Edits) {
+ return editList(std::move(Edits));
+}
+
+EditGenerator transformer::detail::makeEditGenerator(ASTEdit Edit) {
+ return edit(std::move(Edit));
+}
+
+RewriteRule transformer::detail::makeRule(DynTypedMatcher M,
+ EditGenerator Edits) {
+ RewriteRule R;
+ R.Cases = {{std::move(M), std::move(Edits)}};
+ return R;
+}
+
+RewriteRule transformer::makeRule(ast_matchers::internal::DynTypedMatcher M,
+ std::initializer_list<ASTEdit> Edits) {
+ return detail::makeRule(std::move(M),
+ detail::makeEditGenerator(std::move(Edits)));
}
namespace {
@@ -247,9 +263,8 @@ public:
void run(const MatchFinder::MatchResult &Result) override {
if (!Edits)
return;
- transformer::RewriteRule::Case Case =
- transformer::detail::findSelectedCase(Result, Rule);
- auto Transformations = Case.Edits(Result);
+ size_t I = transformer::detail::findSelectedCase(Result, Rule);
+ auto Transformations = Rule.Cases[I].Edits(Result);
if (!Transformations) {
Edits = Transformations.takeError();
return;
@@ -325,7 +340,7 @@ EditGenerator transformer::rewriteDescendants(std::string NodeId,
};
}
-void transformer::addInclude(RewriteRule &Rule, StringRef Header,
+void transformer::addInclude(RewriteRuleBase &Rule, StringRef Header,
IncludeFormat Format) {
for (auto &Case : Rule.Cases)
Case.Edits = flatten(std::move(Case.Edits), addInclude(Header, Format));
@@ -366,7 +381,9 @@ static std::vector<DynTypedMatcher> taggedMatchers(
// Simply gathers the contents of the various rules into a single rule. The
// actual work to combine these into an ordered choice is deferred to matcher
// registration.
-RewriteRule transformer::applyFirst(ArrayRef<RewriteRule> Rules) {
+template <>
+RewriteRuleWith<void>
+transformer::applyFirst(ArrayRef<RewriteRuleWith<void>> Rules) {
RewriteRule R;
for (auto &Rule : Rules)
R.Cases.append(Rule.Cases.begin(), Rule.Cases.end());
@@ -374,12 +391,13 @@ RewriteRule transformer::applyFirst(ArrayRef<RewriteRule> Rules) {
}
std::vector<DynTypedMatcher>
-transformer::detail::buildMatchers(const RewriteRule &Rule) {
+transformer::detail::buildMatchers(const RewriteRuleBase &Rule) {
// Map the cases into buckets of matchers -- one for each "root" AST kind,
// which guarantees that they can be combined in a single anyOf matcher. Each
// case is paired with an identifying number that is converted to a string id
// in `taggedMatchers`.
- std::map<ASTNodeKind, SmallVector<std::pair<size_t, RewriteRule::Case>, 1>>
+ std::map<ASTNodeKind,
+ SmallVector<std::pair<size_t, RewriteRuleBase::Case>, 1>>
Buckets;
const SmallVectorImpl<RewriteRule::Case> &Cases = Rule.Cases;
for (int I = 0, N = Cases.size(); I < N; ++I) {
@@ -405,7 +423,7 @@ transformer::detail::buildMatchers(const RewriteRule &Rule) {
return Matchers;
}
-DynTypedMatcher transformer::detail::buildMatcher(const RewriteRule &Rule) {
+DynTypedMatcher transformer::detail::buildMatcher(const RewriteRuleBase &Rule) {
std::vector<DynTypedMatcher> Ms = buildMatchers(Rule);
assert(Ms.size() == 1 && "Cases must have compatible matchers.");
return Ms[0];
@@ -428,19 +446,16 @@ SourceLocation transformer::detail::getRuleMatchLoc(const MatchResult &Result) {
// Finds the case that was "selected" -- that is, whose matcher triggered the
// `MatchResult`.
-const RewriteRule::Case &
-transformer::detail::findSelectedCase(const MatchResult &Result,
- const RewriteRule &Rule) {
+size_t transformer::detail::findSelectedCase(const MatchResult &Result,
+ const RewriteRuleBase &Rule) {
if (Rule.Cases.size() == 1)
- return Rule.Cases[0];
+ return 0;
auto &NodesMap = Result.Nodes.getMap();
for (size_t i = 0, N = Rule.Cases.size(); i < N; ++i) {
std::string Tag = ("Tag" + Twine(i)).str();
if (NodesMap.find(Tag) != NodesMap.end())
- return Rule.Cases[i];
+ return i;
}
llvm_unreachable("No tag found for this rule.");
}
-
-const llvm::StringRef RewriteRule::RootID = ::clang::transformer::RootID;
diff --git a/contrib/llvm-project/clang/lib/Tooling/Transformer/Stencil.cpp b/contrib/llvm-project/clang/lib/Tooling/Transformer/Stencil.cpp
index 348d04dbaf4a..82dd4a2587b5 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Transformer/Stencil.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Transformer/Stencil.cpp
@@ -278,7 +278,7 @@ public:
return llvm::make_error<StringError>(errc::invalid_argument,
"Id not bound: " + BaseId);
llvm::Optional<std::string> S = tooling::buildAccess(*E, *Match.Context);
- if (!S.hasValue())
+ if (!S)
return llvm::make_error<StringError>(
errc::invalid_argument,
"Could not construct object text from ID: " + BaseId);
diff --git a/contrib/llvm-project/clang/lib/Tooling/Transformer/Transformer.cpp b/contrib/llvm-project/clang/lib/Tooling/Transformer/Transformer.cpp
index 7a4d8b45f189..f95f2ab7d954 100644
--- a/contrib/llvm-project/clang/lib/Tooling/Transformer/Transformer.cpp
+++ b/contrib/llvm-project/clang/lib/Tooling/Transformer/Transformer.cpp
@@ -16,35 +16,29 @@
#include <utility>
#include <vector>
-using namespace clang;
-using namespace tooling;
+namespace clang {
+namespace tooling {
-using ast_matchers::MatchFinder;
+using ::clang::ast_matchers::MatchFinder;
-void Transformer::registerMatchers(MatchFinder *MatchFinder) {
- for (auto &Matcher : transformer::detail::buildMatchers(Rule))
- MatchFinder->addDynamicMatcher(Matcher, this);
-}
+namespace detail {
-void Transformer::run(const MatchFinder::MatchResult &Result) {
+void TransformerImpl::onMatch(
+ const ast_matchers::MatchFinder::MatchResult &Result) {
if (Result.Context->getDiagnostics().hasErrorOccurred())
return;
- transformer::RewriteRule::Case Case =
- transformer::detail::findSelectedCase(Result, Rule);
- auto Transformations = Case.Edits(Result);
- if (!Transformations) {
- Consumer(Transformations.takeError());
- return;
- }
-
- if (Transformations->empty())
- return;
+ onMatchImpl(Result);
+}
+llvm::Expected<llvm::SmallVector<AtomicChange, 1>>
+TransformerImpl::convertToAtomicChanges(
+ const llvm::SmallVectorImpl<transformer::Edit> &Edits,
+ const MatchFinder::MatchResult &Result) {
// Group the transformations, by file, into AtomicChanges, each anchored by
// the location of the first change in that file.
std::map<FileID, AtomicChange> ChangesByFileID;
- for (const auto &T : *Transformations) {
+ for (const auto &T : Edits) {
auto ID = Result.SourceManager->getFileID(T.Range.getBegin());
auto Iter = ChangesByFileID
.emplace(ID, AtomicChange(*Result.SourceManager,
@@ -55,8 +49,7 @@ void Transformer::run(const MatchFinder::MatchResult &Result) {
case transformer::EditKind::Range:
if (auto Err =
AC.replace(*Result.SourceManager, T.Range, T.Replacement)) {
- Consumer(std::move(Err));
- return;
+ return std::move(Err);
}
break;
case transformer::EditKind::AddInclude:
@@ -65,6 +58,27 @@ void Transformer::run(const MatchFinder::MatchResult &Result) {
}
}
+ llvm::SmallVector<AtomicChange, 1> Changes;
+ Changes.reserve(ChangesByFileID.size());
for (auto &IDChangePair : ChangesByFileID)
- Consumer(std::move(IDChangePair.second));
+ Changes.push_back(std::move(IDChangePair.second));
+
+ return Changes;
+}
+
+} // namespace detail
+
+void Transformer::registerMatchers(MatchFinder *MatchFinder) {
+ for (auto &Matcher : Impl->buildMatchers())
+ MatchFinder->addDynamicMatcher(Matcher, this);
}
+
+void Transformer::run(const MatchFinder::MatchResult &Result) {
+ if (Result.Context->getDiagnostics().hasErrorOccurred())
+ return;
+
+ Impl->onMatch(Result);
+}
+
+} // namespace tooling
+} // namespace clang